pacemaker  2.1.9-49aab99839
Scalable High-Availability cluster resource manager
pcmk_scheduler.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2024 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <crm/crm.h>
13 #include <crm/cib.h>
14 #include <crm/cib/internal.h>
15 #include <crm/common/xml.h>
18 
19 #include <glib.h>
20 
21 #include <crm/pengine/status.h>
22 #include <pacemaker-internal.h>
23 #include "libpacemaker_private.h"
24 
25 CRM_TRACE_INIT_DATA(pacemaker);
26 
42 static void
43 check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
44  enum pcmk__check_parameters check)
45 {
46  const char *reason = NULL;
47  pcmk__op_digest_t *digest_data = NULL;
48 
49  switch (check) {
50  case pcmk__check_active:
51  if (pcmk__check_action_config(rsc, node, rsc_op)
52  && pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
53  NULL)) {
54  reason = "action definition changed";
55  }
56  break;
57 
59  digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
60  rsc->cluster);
61  switch (digest_data->rc) {
63  crm_trace("Resource %s history entry %s on %s has "
64  "no digest to compare",
65  rsc->id, pcmk__xe_id(rsc_op), node->details->id);
66  break;
67  case pcmk__digest_match:
68  break;
69  default:
70  reason = "resource parameters have changed";
71  break;
72  }
73  break;
74  }
75  if (reason != NULL) {
76  pe__clear_failcount(rsc, node, reason, rsc->cluster);
77  }
78 }
79 
90 static bool
91 failcount_clear_action_exists(const pcmk_node_t *node,
92  const pcmk_resource_t *rsc)
93 {
94  GList *list = pe__resource_actions(rsc, node, PCMK_ACTION_CLEAR_FAILCOUNT,
95  TRUE);
96 
97  if (list != NULL) {
98  g_list_free(list);
99  return true;
100  }
101  return false;
102 }
103 
111 static void
112 check_failure_threshold(gpointer data, gpointer user_data)
113 {
114  pcmk_resource_t *rsc = data;
115  const pcmk_node_t *node = user_data;
116 
117  // If this is a collective resource, apply recursively to children instead
118  if (rsc->children != NULL) {
119  g_list_foreach(rsc->children, check_failure_threshold, user_data);
120  return;
121  }
122 
123  if (!failcount_clear_action_exists(node, rsc)) {
124  /* Don't force the resource away from this node due to a failcount
125  * that's going to be cleared.
126  *
127  * @TODO Failcount clearing can be scheduled in
128  * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
129  * schedule_resource_actions() via check_params(). This runs well before
130  * then, so it cannot detect those, meaning we might check the migration
131  * threshold when we shouldn't. Worst case, we stop or move the
132  * resource, then move it back in the next transition.
133  */
134  pcmk_resource_t *failed = NULL;
135 
136  if (pcmk__threshold_reached(rsc, node, &failed)) {
137  resource_location(failed, node, -PCMK_SCORE_INFINITY,
138  "__fail_limit__", rsc->cluster);
139  }
140  }
141 }
142 
156 static void
157 apply_exclusive_discovery(gpointer data, gpointer user_data)
158 {
159  pcmk_resource_t *rsc = data;
160  const pcmk_node_t *node = user_data;
161 
162  if (rsc->exclusive_discover
163  || pe__const_top_resource(rsc, false)->exclusive_discover) {
164  pcmk_node_t *match = NULL;
165 
166  // If this is a collective resource, apply recursively to children
167  g_list_foreach(rsc->children, apply_exclusive_discovery, user_data);
168 
169  match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
170  if ((match != NULL)
171  && (match->rsc_discover_mode != pcmk_probe_exclusive)) {
172  match->weight = -PCMK_SCORE_INFINITY;
173  }
174  }
175 }
176 
184 static void
185 apply_stickiness(gpointer data, gpointer user_data)
186 {
187  pcmk_resource_t *rsc = data;
188  pcmk_node_t *node = NULL;
189 
190  // If this is a collective resource, apply recursively to children instead
191  if (rsc->children != NULL) {
192  g_list_foreach(rsc->children, apply_stickiness, NULL);
193  return;
194  }
195 
196  /* A resource is sticky if it is managed, has stickiness configured, and is
197  * active on a single node.
198  */
200  || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
201  return;
202  }
203 
204  node = rsc->running_on->data;
205 
206  /* In a symmetric cluster, stickiness can always be used. In an
207  * asymmetric cluster, we have to check whether the resource is still
208  * allowed on the node, so we don't keep the resource somewhere it is no
209  * longer explicitly enabled.
210  */
212  && (g_hash_table_lookup(rsc->allowed_nodes,
213  node->details->id) == NULL)) {
214  pcmk__rsc_debug(rsc,
215  "Ignoring %s stickiness because the cluster is "
216  "asymmetric and %s is not explicitly allowed",
217  rsc->id, pcmk__node_name(node));
218  return;
219  }
220 
221  pcmk__rsc_debug(rsc, "Resource %s has %d stickiness on %s",
222  rsc->id, rsc->stickiness, pcmk__node_name(node));
223  resource_location(rsc, node, rsc->stickiness, "stickiness", rsc->cluster);
224 }
225 
232 static void
233 apply_shutdown_locks(pcmk_scheduler_t *scheduler)
234 {
236  return;
237  }
238  for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
239  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
240 
241  rsc->cmds->shutdown_lock(rsc);
242  }
243 }
244 
251 static void
252 count_available_nodes(pcmk_scheduler_t *scheduler)
253 {
255  return;
256  }
257 
258  // @COMPAT for API backward compatibility only (cluster does not use value)
259  for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
260  pcmk_node_t *node = (pcmk_node_t *) iter->data;
261 
262  if ((node != NULL) && (node->weight >= 0) && node->details->online
263  && (node->details->type != node_ping)) {
265  }
266  }
267  crm_trace("Online node count: %d", scheduler->max_valid_nodes);
268 }
269 
270 /*
271  * \internal
272  * \brief Apply node-specific scheduling criteria
273  *
274  * After the CIB has been unpacked, process node-specific scheduling criteria
275  * including shutdown locks, location constraints, resource stickiness,
276  * migration thresholds, and exclusive resource discovery.
277  */
278 static void
279 apply_node_criteria(pcmk_scheduler_t *scheduler)
280 {
281  crm_trace("Applying node-specific scheduling criteria");
282  apply_shutdown_locks(scheduler);
283  count_available_nodes(scheduler);
285  g_list_foreach(scheduler->resources, apply_stickiness, NULL);
286 
287  for (GList *node_iter = scheduler->nodes; node_iter != NULL;
288  node_iter = node_iter->next) {
289  for (GList *rsc_iter = scheduler->resources; rsc_iter != NULL;
290  rsc_iter = rsc_iter->next) {
291  check_failure_threshold(rsc_iter->data, node_iter->data);
292  apply_exclusive_discovery(rsc_iter->data, node_iter->data);
293  }
294  }
295 }
296 
303 static void
304 assign_resources(pcmk_scheduler_t *scheduler)
305 {
306  GList *iter = NULL;
307 
308  crm_trace("Assigning resources to nodes");
309 
310  if (!pcmk__str_eq(scheduler->placement_strategy, PCMK_VALUE_DEFAULT,
311  pcmk__str_casei)) {
313  }
315 
317  /* Assign remote connection resources first (which will also assign any
318  * colocation dependencies). If the connection is migrating, always
319  * prefer the partial migration target.
320  */
321  for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
322  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
323 
324  if (rsc->is_remote_node) {
325  pcmk__rsc_trace(rsc, "Assigning remote connection resource '%s'",
326  rsc->id);
327  rsc->cmds->assign(rsc, rsc->partial_migration_target, true);
328  }
329  }
330  }
331 
332  /* now do the rest of the resources */
333  for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
334  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
335 
336  if (!rsc->is_remote_node) {
337  pcmk__rsc_trace(rsc, "Assigning %s resource '%s'",
338  rsc->xml->name, rsc->id);
339  rsc->cmds->assign(rsc, NULL, true);
340  }
341  }
342 
344 }
345 
353 static void
354 clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
355 {
356  pcmk_resource_t *rsc = data;
357 
358  if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
359  return;
360  }
361  crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
362 
363  /* There's no need to recurse into rsc->children because those
364  * should just be unassigned clone instances.
365  */
366 
367  for (GList *iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
368  pcmk_node_t *node = (pcmk_node_t *) iter->data;
369  pcmk_action_t *clear_op = NULL;
370 
371  if (!node->details->online) {
372  continue;
373  }
374  if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
375  continue;
376  }
377 
378  clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
379  rsc->cluster);
380 
381  /* We can't use order_action_then_stop() here because its
382  * pcmk__ar_guest_allowed breaks things
383  */
384  pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
385  NULL, pcmk__ar_ordered, rsc->cluster);
386  }
387 }
388 
395 static void
396 schedule_resource_actions(pcmk_scheduler_t *scheduler)
397 {
398  // Process deferred action checks
399  pe__foreach_param_check(scheduler, check_params);
401 
403  crm_trace("Scheduling probes");
405  }
406 
408  g_list_foreach(scheduler->resources, clear_failcounts_if_orphaned,
409  NULL);
410  }
411 
412  crm_trace("Scheduling resource actions");
413  for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
414  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
415 
416  rsc->cmds->create_actions(rsc);
417  }
418 }
419 
428 static bool
429 is_managed(const pcmk_resource_t *rsc)
430 {
431  if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
432  return true;
433  }
434  for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
435  if (is_managed((pcmk_resource_t *) iter->data)) {
436  return true;
437  }
438  }
439  return false;
440 }
441 
450 static bool
451 any_managed_resources(const pcmk_scheduler_t *scheduler)
452 {
453  for (const GList *iter = scheduler->resources;
454  iter != NULL; iter = iter->next) {
455  if (is_managed((const pcmk_resource_t *) iter->data)) {
456  return true;
457  }
458  }
459  return false;
460 }
461 
471 static bool
472 needs_fencing(const pcmk_node_t *node, bool have_managed)
473 {
474  return have_managed && node->details->unclean
475  && pe_can_fence(node->details->data_set, node);
476 }
477 
486 static bool
487 needs_shutdown(const pcmk_node_t *node)
488 {
489  if (pcmk__is_pacemaker_remote_node(node)) {
490  /* Do not send shutdown actions for Pacemaker Remote nodes.
491  * @TODO We might come up with a good use for this in the future.
492  */
493  return false;
494  }
495  return node->details->online && node->details->shutdown;
496 }
497 
508 static GList *
509 add_nondc_fencing(GList *list, pcmk_action_t *action,
511 {
513  && (list != NULL)) {
514  /* Concurrent fencing is disabled, so order each non-DC
515  * fencing in a chain. If there is any DC fencing or
516  * shutdown, it will be ordered after the last action in the
517  * chain later.
518  */
520  }
521  return g_list_prepend(list, action);
522 }
523 
530 static pcmk_action_t *
531 schedule_fencing(pcmk_node_t *node)
532 {
533  pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
534  FALSE, node->details->data_set);
535 
536  pcmk__sched_warn("Scheduling node %s for fencing", pcmk__node_name(node));
537  pcmk__order_vs_fence(fencing, node->details->data_set);
538  return fencing;
539 }
540 
547 static void
548 schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
549 {
550  pcmk_action_t *dc_down = NULL;
551  bool integrity_lost = false;
552  bool have_managed = any_managed_resources(scheduler);
553  GList *fencing_ops = NULL;
554  GList *shutdown_ops = NULL;
555 
556  crm_trace("Scheduling fencing and shutdowns as needed");
557  if (!have_managed) {
558  crm_notice("No fencing will be done until there are resources "
559  "to manage");
560  }
561 
562  // Check each node for whether it needs fencing or shutdown
563  for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
564  pcmk_node_t *node = (pcmk_node_t *) iter->data;
565  pcmk_action_t *fencing = NULL;
566 
567  /* Guest nodes are "fenced" by recovering their container resource,
568  * so handle them separately.
569  */
570  if (pcmk__is_guest_or_bundle_node(node)) {
571  if (node->details->remote_requires_reset && have_managed
572  && pe_can_fence(scheduler, node)) {
573  pcmk__fence_guest(node);
574  }
575  continue;
576  }
577 
578  if (needs_fencing(node, have_managed)) {
579  fencing = schedule_fencing(node);
580 
581  // Track DC and non-DC fence actions separately
582  if (node->details->is_dc) {
583  dc_down = fencing;
584  } else {
585  fencing_ops = add_nondc_fencing(fencing_ops, fencing,
586  scheduler);
587  }
588 
589  } else if (needs_shutdown(node)) {
590  pcmk_action_t *down_op = pcmk__new_shutdown_action(node);
591 
592  // Track DC and non-DC shutdown actions separately
593  if (node->details->is_dc) {
594  dc_down = down_op;
595  } else {
596  shutdown_ops = g_list_prepend(shutdown_ops, down_op);
597  }
598  }
599 
600  if ((fencing == NULL) && node->details->unclean) {
601  integrity_lost = true;
602  pcmk__config_warn("Node %s is unclean but cannot be fenced",
603  pcmk__node_name(node));
604  }
605  }
606 
607  if (integrity_lost) {
609  pcmk__config_warn("Resource functionality and data integrity "
610  "cannot be guaranteed (configure, enable, "
611  "and test fencing to correct this)");
612 
614  crm_notice("Unclean nodes will not be fenced until quorum is "
615  "attained or " PCMK_OPT_NO_QUORUM_POLICY " is set to "
617  }
618  }
619 
620  if (dc_down != NULL) {
621  /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
622  * DC elections. However, we don't want to order non-DC shutdowns before
623  * a DC *fencing*, because even though we don't want a node that's
624  * shutting down to become DC, the DC fencing could be ordered before a
625  * clone stop that's also ordered before the shutdowns, thus leading to
626  * a graph loop.
627  */
628  if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
629  pcmk__str_none)) {
630  pcmk__order_after_each(dc_down, shutdown_ops);
631  }
632 
633  // Order any non-DC fencing before any DC fencing or shutdown
634 
636  /* With concurrent fencing, order each non-DC fencing action
637  * separately before any DC fencing or shutdown.
638  */
639  pcmk__order_after_each(dc_down, fencing_ops);
640  } else if (fencing_ops != NULL) {
641  /* Without concurrent fencing, the non-DC fencing actions are
642  * already ordered relative to each other, so we just need to order
643  * the DC fencing after the last action in the chain (which is the
644  * first item in the list).
645  */
646  order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
648  }
649  }
650  g_list_free(fencing_ops);
651  g_list_free(shutdown_ops);
652 }
653 
654 static void
655 log_resource_details(pcmk_scheduler_t *scheduler)
656 {
657  pcmk__output_t *out = scheduler->priv;
658  GList *all = NULL;
659 
660  /* Due to the `crm_mon --node=` feature, out->message() for all the
661  * resource-related messages expects a list of nodes that we are allowed to
662  * output information for. Here, we create a wildcard to match all nodes.
663  */
664  all = g_list_prepend(all, (gpointer) "*");
665 
666  for (GList *item = scheduler->resources; item != NULL; item = item->next) {
667  pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
668 
669  // Log all resources except inactive orphans
671  || (rsc->role != pcmk_role_stopped)) {
672  out->message(out, pcmk__map_element_name(rsc->xml), 0UL, rsc, all,
673  all);
674  }
675  }
676 
677  g_list_free(all);
678 }
679 
680 static void
681 log_all_actions(pcmk_scheduler_t *scheduler)
682 {
683  /* This only ever outputs to the log, so ignore whatever output object was
684  * previously set and just log instead.
685  */
686  pcmk__output_t *prev_out = scheduler->priv;
687  pcmk__output_t *out = NULL;
688 
689  if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
690  return;
691  }
692 
695  pcmk__output_set_log_level(out, LOG_NOTICE);
696  scheduler->priv = out;
697 
698  out->begin_list(out, NULL, NULL, "Actions");
700  out->end_list(out);
701  out->finish(out, CRM_EX_OK, true, NULL);
702  pcmk__output_free(out);
703 
704  scheduler->priv = prev_out;
705 }
706 
713 static void
714 log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
715 {
716  const uint64_t flags = pcmk_action_optional
719 
720  crm_trace("Required but unrunnable actions:");
721  for (const GList *iter = scheduler->actions;
722  iter != NULL; iter = iter->next) {
723 
724  const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
725 
726  if (!pcmk_any_flags_set(action->flags, flags)) {
727  pcmk__log_action("\t", action, true);
728  }
729  }
730 }
731 
740 static void
741 unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
742 {
743  const char* localhost_save = NULL;
744 
746  crm_trace("Reusing previously calculated cluster status");
748  return;
749  }
750 
751  if (scheduler->localhost) {
752  localhost_save = scheduler->localhost;
753  }
754 
755  pcmk__assert(cib != NULL);
756  crm_trace("Calculating cluster status");
757 
758  /* This will zero the entire struct without freeing anything first, so
759  * callers should never call pcmk__schedule_actions() with a populated data
760  * set unless pcmk_sched_have_status is set (i.e. cluster_status() was
761  * previously called, whether directly or via pcmk__schedule_actions()).
762  */
764 
765  if (localhost_save) {
766  scheduler->localhost = localhost_save;
767  }
768 
770  scheduler->input = cib;
771  cluster_status(scheduler); // Sets pcmk_sched_have_status
772 }
773 
782 void
783 pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
785 {
786  unpack_cib(cib, flags, scheduler);
791  return;
792  }
793 
795  && pcmk__is_daemon) {
796  log_resource_details(scheduler);
797  }
798 
799  apply_node_criteria(scheduler);
800 
802  return;
803  }
804 
807  assign_resources(scheduler);
808  schedule_resource_actions(scheduler);
809 
810  /* Remote ordering constraints need to happen prior to calculating fencing
811  * because it is one more place we can mark nodes as needing fencing.
812  */
814 
815  schedule_fencing_and_shutdowns(scheduler);
817  log_all_actions(scheduler);
819 
820  if (get_crm_log_level() == LOG_TRACE) {
821  log_unrunnable_actions(scheduler);
822  }
823 }
824 
846 int
847 pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
849 {
850  // Allows for cleaner syntax than dereferencing the scheduler argument
851  pcmk_scheduler_t *new_scheduler = NULL;
852 
853  new_scheduler = pe_new_working_set();
854  if (new_scheduler == NULL) {
855  return ENOMEM;
856  }
857 
858  pcmk__set_scheduler_flags(new_scheduler,
860 
861  // Populate the scheduler data
862 
863  // Make our own copy of the given input or fetch the CIB and use that
864  if (input != NULL) {
865  new_scheduler->input = pcmk__xml_copy(NULL, input);
866  if (new_scheduler->input == NULL) {
867  out->err(out, "Failed to copy input XML");
868  pe_free_working_set(new_scheduler);
869  return ENOMEM;
870  }
871 
872  } else {
873  int rc = cib__signon_query(out, NULL, &(new_scheduler->input));
874 
875  if (rc != pcmk_rc_ok) {
876  pe_free_working_set(new_scheduler);
877  return rc;
878  }
879  }
880 
881  // Make our own copy of the given crm_time_t object; otherwise
882  // cluster_status() populates with the current time
883  if (date != NULL) {
884  // pcmk_copy_time() guarantees non-NULL
885  new_scheduler->now = pcmk_copy_time(date);
886  }
887 
888  // Unpack everything
889  cluster_status(new_scheduler);
890  *scheduler = new_scheduler;
891 
892  return pcmk_rc_ok;
893 }
pcmk_assignment_methods_t * cmds
Definition: resources.h:413
const pcmk_resource_t * pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle)
Definition: complex.c:1071
void(* end_list)(pcmk__output_t *out)
#define LOG_TRACE
Definition: logging.h:38
bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node)
Definition: utils.c:36
G_GNUC_INTERNAL void pcmk__order_after_each(pcmk_action_t *after, GList *list)
xmlNode * pcmk__xml_copy(xmlNode *parent, xmlNode *src)
Definition: xml.c:974
A dumping ground.
G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler)
#define crm_notice(fmt, args...)
Definition: logging.h:397
pcmk_scheduler_t * cluster
Definition: resources.h:408
char data[0]
Definition: cpg.c:58
void(* create_actions)(pcmk_resource_t *rsc)
pcmk_node_t * partial_migration_target
Definition: resources.h:450
G_GNUC_INTERNAL void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task, pcmk_action_t *first_action, pcmk_resource_t *then_rsc, char *then_task, pcmk_action_t *then_action, uint32_t flags, pcmk_scheduler_t *sched)
Stopped.
Definition: roles.h:36
int cib__signon_query(pcmk__output_t *out, cib_t **cib, xmlNode **cib_object)
Definition: cib_utils.c:970
int(* message)(pcmk__output_t *out, const char *message_id,...)
struct crm_time_s crm_time_t
Definition: iso8601.h:32
enum rsc_role_e role
Definition: resources.h:464
#define pcmk__config_warn(fmt...)
G_GNUC_INTERNAL bool pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_resource_t **failed)
GList * children
Definition: resources.h:471
#define pcmk__rsc_trace(rsc, fmt, args...)
pcmk__op_digest_t * rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op, pcmk_node_t *node, pcmk_scheduler_t *scheduler)
Definition: pe_digest.c:394
xmlNode * xml
Definition: resources.h:400
gboolean exclusive_discover
Definition: resources.h:432
crm_time_t * pcmk_copy_time(const crm_time_t *source)
Definition: iso8601.c:1471
G_GNUC_INTERNAL void pcmk__apply_locations(pcmk_scheduler_t *scheduler)
pcmk_action_t * pe_fence_op(pcmk_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pcmk_scheduler_t *scheduler)
Definition: pe_actions.c:1281
#define PCMK_VALUE_DEFAULT
Definition: options.h:142
G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
#define PCMK_ACTION_DO_SHUTDOWN
Definition: actions.h:51
GList * actions
Definition: scheduler.h:239
pcmk_node_t *(* assign)(pcmk_resource_t *rsc, const pcmk_node_t *prefer, bool stop_if_fail)
pcmk_action_t * pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *reason, pcmk_scheduler_t *scheduler)
Schedule a controller operation to clear a fail count.
Definition: failcounts.c:470
#define PCMK_ACTION_CLEAR_FAILCOUNT
Definition: actions.h:46
G_GNUC_INTERNAL void pcmk__sort_resources(pcmk_scheduler_t *scheduler)
GList * pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: pe_actions.c:1553
pcmk_scheduler_t * data_set
Definition: nodes.h:154
void pcmk__output_set_log_level(pcmk__output_t *out, uint8_t log_level)
Definition: output_log.c:390
void pe__foreach_param_check(pcmk_scheduler_t *scheduler, void(*cb)(pcmk_resource_t *, pcmk_node_t *, const xmlNode *, enum pcmk__check_parameters))
Definition: remote.c:216
gboolean cluster_status(pcmk_scheduler_t *scheduler)
Definition: status.c:96
gboolean remote_requires_reset
Definition: nodes.h:113
G_GNUC_INTERNAL void pcmk__create_graph(pcmk_scheduler_t *scheduler)
int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc, time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
Definition: failcounts.c:373
const char * action
Definition: pcmk_fence.c:30
GList * resources
Definition: scheduler.h:231
#define pcmk__rsc_debug(rsc, fmt, args...)
gboolean is_dc
Definition: nodes.h:101
int weight
Definition: nodes.h:163
Actions are ordered (optionally, if no other flags are set)
void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, const pcmk_action_t *action, bool details)
void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag, pcmk_scheduler_t *scheduler)
Definition: utils.c:359
void pe__free_param_checks(pcmk_scheduler_t *scheduler)
Definition: remote.c:232
#define stop_key(rsc)
Definition: internal.h:213
char * task
Definition: actions.h:343
#define crm_trace(fmt, args...)
Definition: logging.h:404
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:94
CRM_TRACE_INIT_DATA(pacemaker)
struct pe_node_shared_s * details
Definition: nodes.h:168
int(*) int(*) void(* err)(pcmk__output_t *out, const char *format,...) G_GNUC_PRINTF(2
void(* shutdown_lock)(pcmk_resource_t *rsc)
void set_working_set_defaults(pcmk_scheduler_t *scheduler)
Definition: status.c:407
bool pcmk__is_daemon
Definition: logging.c:47
unsigned long long flags
Definition: resources.h:428
void pcmk__register_lib_messages(pcmk__output_t *out)
Definition: pcmk_output.c:2680
#define PCMK_VALUE_IGNORE
Definition: options.h:161
Wrappers for and extensions to libxml2.
void(* finish)(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest)
Success.
Definition: results.h:251
G_GNUC_INTERNAL void pcmk__output_actions(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL pcmk_action_t * pcmk__new_shutdown_action(pcmk_node_t *node)
void pe__register_messages(pcmk__output_t *out)
Definition: pe_output.c:3451
xmlNode * input
Definition: scheduler.h:196
const char * placement_strategy
Definition: scheduler.h:206
pcmk_scheduler_t * pe_new_working_set(void)
Create a new object to hold scheduler data.
Definition: status.c:34
gboolean order_actions(pcmk_action_t *first, pcmk_action_t *then, uint32_t flags)
Definition: utils.c:457
int rsc_discover_mode
Definition: nodes.h:171
const char * id
Definition: nodes.h:73
void pcmk__output_free(pcmk__output_t *out)
Definition: output.c:30
#define pcmk__assert(expr)
void pe_free_working_set(pcmk_scheduler_t *scheduler)
Free scheduler data.
Definition: status.c:50
unsigned int get_crm_log_level(void)
Definition: logging.c:1074
pcmk__check_parameters
const char * localhost
Definition: scheduler.h:251
G_GNUC_INTERNAL void pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__apply_node_health(pcmk_scheduler_t *scheduler)
Cluster status and scheduling.
gboolean is_remote_node
Definition: resources.h:431
void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
pcmk_scheduler_t * scheduler
G_GNUC_INTERNAL void pcmk__apply_orderings(pcmk_scheduler_t *sched)
Cluster Configuration.
G_GNUC_INTERNAL void pcmk__schedule_probes(pcmk_scheduler_t *scheduler)
xmlNode * input
#define pcmk__sched_warn(fmt...)
This structure contains everything that makes up a single output formatter.
gboolean shutdown
Definition: nodes.h:98
void(* begin_list)(pcmk__output_t *out, const char *singular_noun, const char *plural_noun, const char *format,...) G_GNUC_PRINTF(4
#define PCMK_OPT_NO_QUORUM_POLICY
Definition: options.h:46
int pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date, pcmk_scheduler_t **scheduler)
GList * running_on
Definition: resources.h:456
G_GNUC_INTERNAL void pcmk__order_vs_fence(pcmk_action_t *stonith_op, pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__fence_guest(pcmk_node_t *node)
G_GNUC_INTERNAL bool pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *xml_op)
unsigned long long flags
Definition: scheduler.h:211
gboolean unclean
Definition: nodes.h:92
enum node_type type
Definition: nodes.h:75
crm_time_t * now
Definition: scheduler.h:198
#define pcmk__set_scheduler_flags(scheduler, flags_to_set)
G_GNUC_INTERNAL void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler)
gboolean online
Definition: nodes.h:81
uint64_t flags
Definition: remote.c:215
int pcmk__log_output_new(pcmk__output_t **out)
Definition: output.c:291
enum pcmk__digest_result rc
#define PCMK_SCORE_INFINITY
Integer score to use to represent "infinity".
Definition: scores.h:24
GHashTable * allowed_nodes
Definition: resources.h:462