pacemaker  3.0.0-d8340737c4
Scalable High-Availability cluster resource manager
pcmk_scheduler.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2024 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <crm/crm.h>
13 #include <crm/cib.h>
14 #include <crm/cib/internal.h>
15 #include <crm/common/xml.h>
18 
19 #include <glib.h>
20 
21 #include <crm/pengine/status.h>
22 #include <pacemaker-internal.h>
23 #include "libpacemaker_private.h"
24 
25 CRM_TRACE_INIT_DATA(pacemaker);
26 
42 static void
43 check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
44  enum pcmk__check_parameters check)
45 {
46  const char *reason = NULL;
47  pcmk__op_digest_t *digest_data = NULL;
48 
49  switch (check) {
50  case pcmk__check_active:
51  if (pcmk__check_action_config(rsc, node, rsc_op)
52  && pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
53  NULL)) {
54  reason = "action definition changed";
55  }
56  break;
57 
59  digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
60  rsc->priv->scheduler);
61  switch (digest_data->rc) {
63  crm_trace("Resource %s history entry %s on %s has "
64  "no digest to compare",
65  rsc->id, pcmk__xe_id(rsc_op), node->priv->id);
66  break;
67  case pcmk__digest_match:
68  break;
69  default:
70  reason = "resource parameters have changed";
71  break;
72  }
73  break;
74  }
75  if (reason != NULL) {
76  pe__clear_failcount(rsc, node, reason, rsc->priv->scheduler);
77  }
78 }
79 
90 static bool
91 failcount_clear_action_exists(const pcmk_node_t *node,
92  const pcmk_resource_t *rsc)
93 {
94  GList *list = pe__resource_actions(rsc, node, PCMK_ACTION_CLEAR_FAILCOUNT,
95  TRUE);
96 
97  if (list != NULL) {
98  g_list_free(list);
99  return true;
100  }
101  return false;
102 }
103 
111 static void
112 check_failure_threshold(gpointer data, gpointer user_data)
113 {
114  pcmk_resource_t *rsc = data;
115  const pcmk_node_t *node = user_data;
116 
117  // If this is a collective resource, apply recursively to children instead
118  if (rsc->priv->children != NULL) {
119  g_list_foreach(rsc->priv->children, check_failure_threshold,
120  user_data);
121  return;
122  }
123 
124  if (!failcount_clear_action_exists(node, rsc)) {
125  /* Don't force the resource away from this node due to a failcount
126  * that's going to be cleared.
127  *
128  * @TODO Failcount clearing can be scheduled in
129  * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
130  * schedule_resource_actions() via check_params(). This runs well before
131  * then, so it cannot detect those, meaning we might check the migration
132  * threshold when we shouldn't. Worst case, we stop or move the
133  * resource, then move it back in the next transition.
134  */
135  pcmk_resource_t *failed = NULL;
136 
137  if (pcmk__threshold_reached(rsc, node, &failed)) {
138  resource_location(failed, node, -PCMK_SCORE_INFINITY,
139  "__fail_limit__", rsc->priv->scheduler);
140  }
141  }
142 }
143 
157 static void
158 apply_exclusive_discovery(gpointer data, gpointer user_data)
159 {
160  pcmk_resource_t *rsc = data;
161  const pcmk_node_t *node = user_data;
162 
164  || pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
166  pcmk_node_t *match = NULL;
167 
168  // If this is a collective resource, apply recursively to children
169  g_list_foreach(rsc->priv->children, apply_exclusive_discovery,
170  user_data);
171 
172  match = g_hash_table_lookup(rsc->priv->allowed_nodes,
173  node->priv->id);
174  if ((match != NULL)
175  && (match->assign->probe_mode != pcmk__probe_exclusive)) {
176  match->assign->score = -PCMK_SCORE_INFINITY;
177  }
178  }
179 }
180 
188 static void
189 apply_stickiness(gpointer data, gpointer user_data)
190 {
191  pcmk_resource_t *rsc = data;
192  pcmk_node_t *node = NULL;
193 
194  // If this is a collective resource, apply recursively to children instead
195  if (rsc->priv->children != NULL) {
196  g_list_foreach(rsc->priv->children, apply_stickiness, NULL);
197  return;
198  }
199 
200  /* A resource is sticky if it is managed, has stickiness configured, and is
201  * active on a single node.
202  */
204  || (rsc->priv->stickiness < 1)
205  || !pcmk__list_of_1(rsc->priv->active_nodes)) {
206  return;
207  }
208 
209  node = rsc->priv->active_nodes->data;
210 
211  /* In a symmetric cluster, stickiness can always be used. In an
212  * asymmetric cluster, we have to check whether the resource is still
213  * allowed on the node, so we don't keep the resource somewhere it is no
214  * longer explicitly enabled.
215  */
216  if (!pcmk_is_set(rsc->priv->scheduler->flags,
218  && (g_hash_table_lookup(rsc->priv->allowed_nodes,
219  node->priv->id) == NULL)) {
220  pcmk__rsc_debug(rsc,
221  "Ignoring %s stickiness because the cluster is "
222  "asymmetric and %s is not explicitly allowed",
223  rsc->id, pcmk__node_name(node));
224  return;
225  }
226 
227  pcmk__rsc_debug(rsc, "Resource %s has %d stickiness on %s",
228  rsc->id, rsc->priv->stickiness, pcmk__node_name(node));
229  resource_location(rsc, node, rsc->priv->stickiness, "stickiness",
230  rsc->priv->scheduler);
231 }
232 
239 static void
240 apply_shutdown_locks(pcmk_scheduler_t *scheduler)
241 {
243  return;
244  }
245  for (GList *iter = scheduler->priv->resources;
246  iter != NULL; iter = iter->next) {
247 
248  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
249 
250  rsc->priv->cmds->shutdown_lock(rsc);
251  }
252 }
253 
254 /*
255  * \internal
256  * \brief Apply node-specific scheduling criteria
257  *
258  * After the CIB has been unpacked, process node-specific scheduling criteria
259  * including shutdown locks, location constraints, resource stickiness,
260  * migration thresholds, and exclusive resource discovery.
261  */
262 static void
263 apply_node_criteria(pcmk_scheduler_t *scheduler)
264 {
265  crm_trace("Applying node-specific scheduling criteria");
266  apply_shutdown_locks(scheduler);
268  g_list_foreach(scheduler->priv->resources, apply_stickiness, NULL);
269 
270  for (GList *node_iter = scheduler->nodes; node_iter != NULL;
271  node_iter = node_iter->next) {
272 
273  for (GList *rsc_iter = scheduler->priv->resources;
274  rsc_iter != NULL; rsc_iter = rsc_iter->next) {
275 
276  check_failure_threshold(rsc_iter->data, node_iter->data);
277  apply_exclusive_discovery(rsc_iter->data, node_iter->data);
278  }
279  }
280 }
281 
288 static void
289 assign_resources(pcmk_scheduler_t *scheduler)
290 {
291  GList *iter = NULL;
292 
293  crm_trace("Assigning resources to nodes");
294 
296  pcmk__str_casei)) {
298  }
300 
302  /* Assign remote connection resources first (which will also assign any
303  * colocation dependencies). If the connection is migrating, always
304  * prefer the partial migration target.
305  */
306  for (iter = scheduler->priv->resources;
307  iter != NULL; iter = iter->next) {
308 
309  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
311 
313  pcmk__rsc_trace(rsc, "Assigning remote connection resource '%s'",
314  rsc->id);
315  rsc->priv->cmds->assign(rsc, target, true);
316  }
317  }
318  }
319 
320  /* now do the rest of the resources */
321  for (iter = scheduler->priv->resources; iter != NULL; iter = iter->next) {
322  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
323 
325  pcmk__rsc_trace(rsc, "Assigning %s resource '%s'",
326  rsc->priv->xml->name, rsc->id);
327  rsc->priv->cmds->assign(rsc, NULL, true);
328  }
329  }
330 
332 }
333 
341 static void
342 clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
343 {
344  pcmk_resource_t *rsc = data;
345 
346  if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
347  return;
348  }
349  crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
350 
351  /* There's no need to recurse into rsc->private->children because those
352  * should just be unassigned clone instances.
353  */
354 
355  for (GList *iter = rsc->priv->scheduler->nodes;
356  iter != NULL; iter = iter->next) {
357 
358  pcmk_node_t *node = (pcmk_node_t *) iter->data;
359  pcmk_action_t *clear_op = NULL;
360 
361  if (!node->details->online) {
362  continue;
363  }
364  if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
365  continue;
366  }
367 
368  clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
369  rsc->priv->scheduler);
370 
371  /* We can't use order_action_then_stop() here because its
372  * pcmk__ar_guest_allowed breaks things
373  */
374  pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
375  NULL, pcmk__ar_ordered, rsc->priv->scheduler);
376  }
377 }
378 
385 static void
386 schedule_resource_actions(pcmk_scheduler_t *scheduler)
387 {
388  // Process deferred action checks
389  pe__foreach_param_check(scheduler, check_params);
391 
393  crm_trace("Scheduling probes");
395  }
396 
398  g_list_foreach(scheduler->priv->resources, clear_failcounts_if_orphaned,
399  NULL);
400  }
401 
402  crm_trace("Scheduling resource actions");
403  for (GList *iter = scheduler->priv->resources;
404  iter != NULL; iter = iter->next) {
405 
406  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
407 
408  rsc->priv->cmds->create_actions(rsc);
409  }
410 }
411 
420 static bool
421 is_managed(const pcmk_resource_t *rsc)
422 {
423  if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
424  return true;
425  }
426  for (GList *iter = rsc->priv->children;
427  iter != NULL; iter = iter->next) {
428 
429  if (is_managed((pcmk_resource_t *) iter->data)) {
430  return true;
431  }
432  }
433  return false;
434 }
435 
444 static bool
445 any_managed_resources(const pcmk_scheduler_t *scheduler)
446 {
447  for (const GList *iter = scheduler->priv->resources;
448  iter != NULL; iter = iter->next) {
449  if (is_managed((const pcmk_resource_t *) iter->data)) {
450  return true;
451  }
452  }
453  return false;
454 }
455 
465 static bool
466 needs_fencing(const pcmk_node_t *node, bool have_managed)
467 {
468  return have_managed && node->details->unclean
469  && pe_can_fence(node->priv->scheduler, node);
470 }
471 
480 static bool
481 needs_shutdown(const pcmk_node_t *node)
482 {
483  if (pcmk__is_pacemaker_remote_node(node)) {
484  /* Do not send shutdown actions for Pacemaker Remote nodes.
485  * @TODO We might come up with a good use for this in the future.
486  */
487  return false;
488  }
489  return node->details->online && node->details->shutdown;
490 }
491 
502 static GList *
503 add_nondc_fencing(GList *list, pcmk_action_t *action,
505 {
507  && (list != NULL)) {
508  /* Concurrent fencing is disabled, so order each non-DC
509  * fencing in a chain. If there is any DC fencing or
510  * shutdown, it will be ordered after the last action in the
511  * chain later.
512  */
514  }
515  return g_list_prepend(list, action);
516 }
517 
524 static pcmk_action_t *
525 schedule_fencing(pcmk_node_t *node)
526 {
527  pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
528  FALSE, node->priv->scheduler);
529 
530  pcmk__sched_warn(node->priv->scheduler, "Scheduling node %s for fencing",
531  pcmk__node_name(node));
532  pcmk__order_vs_fence(fencing, node->priv->scheduler);
533  return fencing;
534 }
535 
542 static void
543 schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
544 {
545  pcmk_action_t *dc_down = NULL;
546  bool integrity_lost = false;
547  bool have_managed = any_managed_resources(scheduler);
548  GList *fencing_ops = NULL;
549  GList *shutdown_ops = NULL;
550 
551  crm_trace("Scheduling fencing and shutdowns as needed");
552  if (!have_managed) {
553  crm_notice("No fencing will be done until there are resources "
554  "to manage");
555  }
556 
557  // Check each node for whether it needs fencing or shutdown
558  for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
559  pcmk_node_t *node = (pcmk_node_t *) iter->data;
560  pcmk_action_t *fencing = NULL;
561  const bool is_dc = pcmk__same_node(node, scheduler->dc_node);
562 
563  /* Guest nodes are "fenced" by recovering their container resource,
564  * so handle them separately.
565  */
566  if (pcmk__is_guest_or_bundle_node(node)) {
568  && have_managed && pe_can_fence(scheduler, node)) {
569  pcmk__fence_guest(node);
570  }
571  continue;
572  }
573 
574  if (needs_fencing(node, have_managed)) {
575  fencing = schedule_fencing(node);
576 
577  // Track DC and non-DC fence actions separately
578  if (is_dc) {
579  dc_down = fencing;
580  } else {
581  fencing_ops = add_nondc_fencing(fencing_ops, fencing,
582  scheduler);
583  }
584 
585  } else if (needs_shutdown(node)) {
586  pcmk_action_t *down_op = pcmk__new_shutdown_action(node);
587 
588  // Track DC and non-DC shutdown actions separately
589  if (is_dc) {
590  dc_down = down_op;
591  } else {
592  shutdown_ops = g_list_prepend(shutdown_ops, down_op);
593  }
594  }
595 
596  if ((fencing == NULL) && node->details->unclean) {
597  integrity_lost = true;
598  pcmk__config_warn("Node %s is unclean but cannot be fenced",
599  pcmk__node_name(node));
600  }
601  }
602 
603  if (integrity_lost) {
605  pcmk__config_warn("Resource functionality and data integrity "
606  "cannot be guaranteed (configure, enable, "
607  "and test fencing to correct this)");
608 
610  crm_notice("Unclean nodes will not be fenced until quorum is "
611  "attained or " PCMK_OPT_NO_QUORUM_POLICY " is set to "
613  }
614  }
615 
616  if (dc_down != NULL) {
617  /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
618  * DC elections. However, we don't want to order non-DC shutdowns before
619  * a DC *fencing*, because even though we don't want a node that's
620  * shutting down to become DC, the DC fencing could be ordered before a
621  * clone stop that's also ordered before the shutdowns, thus leading to
622  * a graph loop.
623  */
624  if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
625  pcmk__str_none)) {
626  pcmk__order_after_each(dc_down, shutdown_ops);
627  }
628 
629  // Order any non-DC fencing before any DC fencing or shutdown
630 
632  /* With concurrent fencing, order each non-DC fencing action
633  * separately before any DC fencing or shutdown.
634  */
635  pcmk__order_after_each(dc_down, fencing_ops);
636  } else if (fencing_ops != NULL) {
637  /* Without concurrent fencing, the non-DC fencing actions are
638  * already ordered relative to each other, so we just need to order
639  * the DC fencing after the last action in the chain (which is the
640  * first item in the list).
641  */
642  order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
644  }
645  }
646  g_list_free(fencing_ops);
647  g_list_free(shutdown_ops);
648 }
649 
650 static void
651 log_resource_details(pcmk_scheduler_t *scheduler)
652 {
653  pcmk__output_t *out = scheduler->priv->out;
654  GList *all = NULL;
655 
656  /* Due to the `crm_mon --node=` feature, out->message() for all the
657  * resource-related messages expects a list of nodes that we are allowed to
658  * output information for. Here, we create a wildcard to match all nodes.
659  */
660  all = g_list_prepend(all, (gpointer) "*");
661 
662  for (GList *item = scheduler->priv->resources;
663  item != NULL; item = item->next) {
664 
665  pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
666 
667  // Log all resources except inactive orphans
669  || (rsc->priv->orig_role != pcmk_role_stopped)) {
670  out->message(out, (const char *) rsc->priv->xml->name, 0UL,
671  rsc, all, all);
672  }
673  }
674 
675  g_list_free(all);
676 }
677 
678 static void
679 log_all_actions(pcmk_scheduler_t *scheduler)
680 {
681  /* This only ever outputs to the log, so ignore whatever output object was
682  * previously set and just log instead.
683  */
684  pcmk__output_t *prev_out = scheduler->priv->out;
685  pcmk__output_t *out = NULL;
686 
687  if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
688  return;
689  }
690 
693  pcmk__output_set_log_level(out, LOG_NOTICE);
694  scheduler->priv->out = out;
695 
696  out->begin_list(out, NULL, NULL, "Actions");
698  out->end_list(out);
699  out->finish(out, CRM_EX_OK, true, NULL);
700  pcmk__output_free(out);
701 
702  scheduler->priv->out = prev_out;
703 }
704 
711 static void
712 log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
713 {
714  const uint64_t flags = pcmk__action_optional
717 
718  crm_trace("Required but unrunnable actions:");
719  for (const GList *iter = scheduler->priv->actions;
720  iter != NULL; iter = iter->next) {
721 
722  const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
723 
724  if (!pcmk_any_flags_set(action->flags, flags)) {
725  pcmk__log_action("\t", action, true);
726  }
727  }
728 }
729 
738 static void
739 unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
740 {
742  crm_trace("Reusing previously calculated cluster status");
744  return;
745  }
746 
747  pcmk__assert(cib != NULL);
748  crm_trace("Calculating cluster status");
749 
750  /* This will zero the entire struct without freeing anything first, so
751  * callers should never call pcmk__schedule_actions() with a populated data
752  * set unless pcmk__sched_have_status is set (i.e. cluster_status() was
753  * previously called, whether directly or via pcmk__schedule_actions()).
754  */
756 
758  scheduler->input = cib;
759  cluster_status(scheduler); // Sets pcmk__sched_have_status
760 }
761 
770 void
771 pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
773 {
774  unpack_cib(cib, flags, scheduler);
779  return;
780  }
781 
783  && pcmk__is_daemon) {
784  log_resource_details(scheduler);
785  }
786 
787  apply_node_criteria(scheduler);
788 
790  return;
791  }
792 
795  assign_resources(scheduler);
796  schedule_resource_actions(scheduler);
797 
798  /* Remote ordering constraints need to happen prior to calculating fencing
799  * because it is one more place we can mark nodes as needing fencing.
800  */
802 
803  schedule_fencing_and_shutdowns(scheduler);
805  log_all_actions(scheduler);
807 
808  if (get_crm_log_level() == LOG_TRACE) {
809  log_unrunnable_actions(scheduler);
810  }
811 }
812 
834 int
835 pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
837 {
838  // Allows for cleaner syntax than dereferencing the scheduler argument
839  pcmk_scheduler_t *new_scheduler = NULL;
840 
841  new_scheduler = pe_new_working_set();
842  if (new_scheduler == NULL) {
843  return ENOMEM;
844  }
845 
847 
848  // Populate the scheduler data
849 
850  // Make our own copy of the given input or fetch the CIB and use that
851  if (input != NULL) {
852  new_scheduler->input = pcmk__xml_copy(NULL, input);
853  if (new_scheduler->input == NULL) {
854  out->err(out, "Failed to copy input XML");
855  pe_free_working_set(new_scheduler);
856  return ENOMEM;
857  }
858 
859  } else {
860  int rc = cib__signon_query(out, NULL, &(new_scheduler->input));
861 
862  if (rc != pcmk_rc_ok) {
863  pe_free_working_set(new_scheduler);
864  return rc;
865  }
866  }
867 
868  // Make our own copy of the given crm_time_t object; otherwise
869  // cluster_status() populates with the current time
870  if (date != NULL) {
871  // pcmk_copy_time() guarantees non-NULL
872  new_scheduler->priv->now = pcmk_copy_time(date);
873  }
874 
875  // Unpack everything
876  cluster_status(new_scheduler);
877  *scheduler = new_scheduler;
878 
879  return pcmk_rc_ok;
880 }
const pcmk_resource_t * pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle)
Definition: complex.c:1043
void(* end_list)(pcmk__output_t *out)
#define LOG_TRACE
Definition: logging.h:38
bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node)
Definition: utils.c:36
enum pcmk__probe_mode probe_mode
G_GNUC_INTERNAL void pcmk__order_after_each(pcmk_action_t *after, GList *list)
xmlNode * pcmk__xml_copy(xmlNode *parent, xmlNode *src)
Definition: xml.c:805
A dumping ground.
G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler)
#define crm_notice(fmt, args...)
Definition: logging.h:365
char data[0]
Definition: cpg.c:58
void(* create_actions)(pcmk_resource_t *rsc)
G_GNUC_INTERNAL void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task, pcmk_action_t *first_action, pcmk_resource_t *then_rsc, char *then_task, pcmk_action_t *then_action, uint32_t flags, pcmk_scheduler_t *sched)
Stopped.
Definition: roles.h:36
int cib__signon_query(pcmk__output_t *out, cib_t **cib, xmlNode **cib_object)
Definition: cib_utils.c:841
int(* message)(pcmk__output_t *out, const char *message_id,...)
pcmk_node_t *(* assign)(pcmk_resource_t *rsc, const pcmk_node_t *prefer, bool stop_if_fail)
struct crm_time_s crm_time_t
Definition: iso8601.h:32
#define pcmk__config_warn(fmt...)
G_GNUC_INTERNAL bool pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_resource_t **failed)
#define pcmk__rsc_trace(rsc, fmt, args...)
pcmk__op_digest_t * rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op, pcmk_node_t *node, pcmk_scheduler_t *scheduler)
Definition: pe_digest.c:392
crm_time_t * pcmk_copy_time(const crm_time_t *source)
Definition: iso8601.c:1471
G_GNUC_INTERNAL void pcmk__apply_locations(pcmk_scheduler_t *scheduler)
pcmk_action_t * pe_fence_op(pcmk_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pcmk_scheduler_t *scheduler)
Definition: pe_actions.c:1245
#define PCMK_VALUE_DEFAULT
Definition: options.h:143
pcmk__scheduler_private_t * priv
Definition: scheduler.h:99
G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
#define PCMK_ACTION_DO_SHUTDOWN
Definition: actions.h:42
uint64_t flags
Definition: scheduler.h:89
gboolean shutdown
Definition: nodes.h:62
gboolean unclean
Definition: nodes.h:58
pcmk_action_t * pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *reason, pcmk_scheduler_t *scheduler)
Schedule a controller operation to clear a fail count.
Definition: failcounts.c:464
#define PCMK_ACTION_CLEAR_FAILCOUNT
Definition: actions.h:37
G_GNUC_INTERNAL void pcmk__sort_resources(pcmk_scheduler_t *scheduler)
GList * pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: pe_actions.c:1517
void pcmk__output_set_log_level(pcmk__output_t *out, uint8_t log_level)
Definition: output_log.c:390
void pe__foreach_param_check(pcmk_scheduler_t *scheduler, void(*cb)(pcmk_resource_t *, pcmk_node_t *, const xmlNode *, enum pcmk__check_parameters))
Definition: remote.c:220
gboolean cluster_status(pcmk_scheduler_t *scheduler)
Definition: status.c:103
pcmk_node_t * dc_node
Definition: scheduler.h:85
G_GNUC_INTERNAL void pcmk__create_graph(pcmk_scheduler_t *scheduler)
int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc, time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
Definition: failcounts.c:364
const char * action
Definition: pcmk_fence.c:32
#define pcmk__rsc_debug(rsc, fmt, args...)
pcmk__node_private_t * priv
Definition: nodes.h:85
pcmk_scheduler_t * scheduler
Actions are ordered (optionally, if no other flags are set)
void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, const pcmk_action_t *action, bool details)
void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag, pcmk_scheduler_t *scheduler)
Definition: utils.c:365
void pe__free_param_checks(pcmk_scheduler_t *scheduler)
Definition: remote.c:236
#define stop_key(rsc)
Definition: internal.h:196
#define crm_trace(fmt, args...)
Definition: logging.h:372
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:80
CRM_TRACE_INIT_DATA(pacemaker)
int(*) int(*) void(* err)(pcmk__output_t *out, const char *format,...) G_GNUC_PRINTF(2
pcmk__resource_private_t * priv
Definition: resources.h:61
void set_working_set_defaults(pcmk_scheduler_t *scheduler)
Definition: status.c:418
bool pcmk__is_daemon
Definition: logging.c:47
void pcmk__register_lib_messages(pcmk__output_t *out)
Definition: pcmk_output.c:2709
#define PCMK_VALUE_IGNORE
Definition: options.h:162
void(* shutdown_lock)(pcmk_resource_t *rsc)
Wrappers for and extensions to libxml2.
void(* finish)(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest)
Success.
Definition: results.h:231
G_GNUC_INTERNAL void pcmk__output_actions(pcmk_scheduler_t *scheduler)
pcmk_scheduler_t * scheduler
G_GNUC_INTERNAL pcmk_action_t * pcmk__new_shutdown_action(pcmk_node_t *node)
void pe__register_messages(pcmk__output_t *out)
Definition: pe_output.c:3470
pcmk_scheduler_t * pe_new_working_set(void)
Create a new object to hold scheduler data.
Definition: status.c:34
gboolean order_actions(pcmk_action_t *first, pcmk_action_t *then, uint32_t flags)
Definition: utils.c:465
void pcmk__output_free(pcmk__output_t *out)
Definition: output.c:30
#define pcmk__assert(expr)
const char * target
Definition: pcmk_fence.c:31
void pe_free_working_set(pcmk_scheduler_t *scheduler)
Free scheduler data.
Definition: status.c:56
#define pcmk__sched_warn(scheduler, fmt...)
unsigned int get_crm_log_level(void)
Definition: logging.c:1099
pcmk__check_parameters
G_GNUC_INTERNAL void pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__apply_node_health(pcmk_scheduler_t *scheduler)
Cluster status and scheduling.
GList * nodes
Definition: scheduler.h:97
void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
pcmk_scheduler_t * scheduler
G_GNUC_INTERNAL void pcmk__apply_orderings(pcmk_scheduler_t *sched)
Cluster Configuration.
G_GNUC_INTERNAL void pcmk__schedule_probes(pcmk_scheduler_t *scheduler)
xmlNode * input
xmlNode * input
Definition: scheduler.h:81
This structure contains everything that makes up a single output formatter.
void(* begin_list)(pcmk__output_t *out, const char *singular_noun, const char *plural_noun, const char *format,...) G_GNUC_PRINTF(4
#define PCMK_OPT_NO_QUORUM_POLICY
Definition: options.h:46
int pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date, pcmk_scheduler_t **scheduler)
G_GNUC_INTERNAL void pcmk__order_vs_fence(pcmk_action_t *stonith_op, pcmk_scheduler_t *scheduler)
unsigned long long flags
Definition: resources.h:69
G_GNUC_INTERNAL void pcmk__fence_guest(pcmk_node_t *node)
G_GNUC_INTERNAL bool pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *xml_op)
gboolean online
Definition: nodes.h:50
struct pcmk__node_details * details
Definition: nodes.h:82
#define pcmk__set_scheduler_flags(scheduler, flags_to_set)
G_GNUC_INTERNAL void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler)
uint64_t flags
Definition: remote.c:211
int pcmk__log_output_new(pcmk__output_t **out)
Definition: output.c:291
const pcmk__assignment_methods_t * cmds
enum pcmk__digest_result rc
pcmk_node_t * partial_migration_target
#define PCMK_SCORE_INFINITY
Integer score to use to represent "infinity".
Definition: scores.h:26
struct pcmk__node_assignment * assign
Definition: nodes.h:79