pacemaker  2.1.7-0f7f88312f
Scalable High-Availability cluster resource manager
pcmk_scheduler.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2023 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <crm/crm.h>
13 #include <crm/cib.h>
14 #include <crm/msg_xml.h>
15 #include <crm/common/xml.h>
18 
19 #include <glib.h>
20 
21 #include <crm/pengine/status.h>
22 #include <pacemaker-internal.h>
23 #include "libpacemaker_private.h"
24 
25 CRM_TRACE_INIT_DATA(pacemaker);
26 
42 static void
43 check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
44  enum pcmk__check_parameters check)
45 {
46  const char *reason = NULL;
47  op_digest_cache_t *digest_data = NULL;
48 
49  switch (check) {
50  case pcmk__check_active:
51  if (pcmk__check_action_config(rsc, node, rsc_op)
52  && pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
53  NULL)) {
54  reason = "action definition changed";
55  }
56  break;
57 
59  digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
60  rsc->cluster);
61  switch (digest_data->rc) {
63  crm_trace("Resource %s history entry %s on %s has "
64  "no digest to compare",
65  rsc->id, ID(rsc_op), node->details->id);
66  break;
67  case pcmk__digest_match:
68  break;
69  default:
70  reason = "resource parameters have changed";
71  break;
72  }
73  break;
74  }
75  if (reason != NULL) {
76  pe__clear_failcount(rsc, node, reason, rsc->cluster);
77  }
78 }
79 
90 static bool
91 failcount_clear_action_exists(const pcmk_node_t *node,
92  const pcmk_resource_t *rsc)
93 {
94  GList *list = pe__resource_actions(rsc, node, PCMK_ACTION_CLEAR_FAILCOUNT,
95  TRUE);
96 
97  if (list != NULL) {
98  g_list_free(list);
99  return true;
100  }
101  return false;
102 }
103 
111 static void
112 check_failure_threshold(gpointer data, gpointer user_data)
113 {
114  pcmk_resource_t *rsc = data;
115  const pcmk_node_t *node = user_data;
116 
117  // If this is a collective resource, apply recursively to children instead
118  if (rsc->children != NULL) {
119  g_list_foreach(rsc->children, check_failure_threshold, user_data);
120  return;
121  }
122 
123  if (!failcount_clear_action_exists(node, rsc)) {
124  /* Don't force the resource away from this node due to a failcount
125  * that's going to be cleared.
126  *
127  * @TODO Failcount clearing can be scheduled in
128  * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
129  * schedule_resource_actions() via check_params(). This runs well before
130  * then, so it cannot detect those, meaning we might check the migration
131  * threshold when we shouldn't. Worst case, we stop or move the
132  * resource, then move it back in the next transition.
133  */
134  pcmk_resource_t *failed = NULL;
135 
136  if (pcmk__threshold_reached(rsc, node, &failed)) {
137  resource_location(failed, node, -INFINITY, "__fail_limit__",
138  rsc->cluster);
139  }
140  }
141 }
142 
155 static void
156 apply_exclusive_discovery(gpointer data, gpointer user_data)
157 {
158  pcmk_resource_t *rsc = data;
159  const pcmk_node_t *node = user_data;
160 
161  if (rsc->exclusive_discover
162  || pe__const_top_resource(rsc, false)->exclusive_discover) {
163  pcmk_node_t *match = NULL;
164 
165  // If this is a collective resource, apply recursively to children
166  g_list_foreach(rsc->children, apply_exclusive_discovery, user_data);
167 
168  match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
169  if ((match != NULL)
170  && (match->rsc_discover_mode != pcmk_probe_exclusive)) {
171  match->weight = -INFINITY;
172  }
173  }
174 }
175 
183 static void
184 apply_stickiness(gpointer data, gpointer user_data)
185 {
186  pcmk_resource_t *rsc = data;
187  pcmk_node_t *node = NULL;
188 
189  // If this is a collective resource, apply recursively to children instead
190  if (rsc->children != NULL) {
191  g_list_foreach(rsc->children, apply_stickiness, NULL);
192  return;
193  }
194 
195  /* A resource is sticky if it is managed, has stickiness configured, and is
196  * active on a single node.
197  */
199  || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
200  return;
201  }
202 
203  node = rsc->running_on->data;
204 
205  /* In a symmetric cluster, stickiness can always be used. In an
206  * asymmetric cluster, we have to check whether the resource is still
207  * allowed on the node, so we don't keep the resource somewhere it is no
208  * longer explicitly enabled.
209  */
211  && (g_hash_table_lookup(rsc->allowed_nodes,
212  node->details->id) == NULL)) {
213  pe_rsc_debug(rsc,
214  "Ignoring %s stickiness because the cluster is "
215  "asymmetric and %s is not explicitly allowed",
216  rsc->id, pe__node_name(node));
217  return;
218  }
219 
220  pe_rsc_debug(rsc, "Resource %s has %d stickiness on %s",
221  rsc->id, rsc->stickiness, pe__node_name(node));
222  resource_location(rsc, node, rsc->stickiness, "stickiness", rsc->cluster);
223 }
224 
231 static void
232 apply_shutdown_locks(pcmk_scheduler_t *scheduler)
233 {
235  return;
236  }
237  for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
238  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
239 
240  rsc->cmds->shutdown_lock(rsc);
241  }
242 }
243 
250 static void
251 count_available_nodes(pcmk_scheduler_t *scheduler)
252 {
254  return;
255  }
256 
257  // @COMPAT for API backward compatibility only (cluster does not use value)
258  for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
259  pcmk_node_t *node = (pcmk_node_t *) iter->data;
260 
261  if ((node != NULL) && (node->weight >= 0) && node->details->online
262  && (node->details->type != node_ping)) {
264  }
265  }
266  crm_trace("Online node count: %d", scheduler->max_valid_nodes);
267 }
268 
269 /*
270  * \internal
271  * \brief Apply node-specific scheduling criteria
272  *
273  * After the CIB has been unpacked, process node-specific scheduling criteria
274  * including shutdown locks, location constraints, resource stickiness,
275  * migration thresholds, and exclusive resource discovery.
276  */
277 static void
278 apply_node_criteria(pcmk_scheduler_t *scheduler)
279 {
280  crm_trace("Applying node-specific scheduling criteria");
281  apply_shutdown_locks(scheduler);
282  count_available_nodes(scheduler);
284  g_list_foreach(scheduler->resources, apply_stickiness, NULL);
285 
286  for (GList *node_iter = scheduler->nodes; node_iter != NULL;
287  node_iter = node_iter->next) {
288  for (GList *rsc_iter = scheduler->resources; rsc_iter != NULL;
289  rsc_iter = rsc_iter->next) {
290  check_failure_threshold(rsc_iter->data, node_iter->data);
291  apply_exclusive_discovery(rsc_iter->data, node_iter->data);
292  }
293  }
294 }
295 
302 static void
303 assign_resources(pcmk_scheduler_t *scheduler)
304 {
305  GList *iter = NULL;
306 
307  crm_trace("Assigning resources to nodes");
308 
309  if (!pcmk__str_eq(scheduler->placement_strategy, "default",
310  pcmk__str_casei)) {
312  }
314 
316  /* Assign remote connection resources first (which will also assign any
317  * colocation dependencies). If the connection is migrating, always
318  * prefer the partial migration target.
319  */
320  for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
321  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
322 
323  if (rsc->is_remote_node) {
324  pe_rsc_trace(rsc, "Assigning remote connection resource '%s'",
325  rsc->id);
326  rsc->cmds->assign(rsc, rsc->partial_migration_target, true);
327  }
328  }
329  }
330 
331  /* now do the rest of the resources */
332  for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
333  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
334 
335  if (!rsc->is_remote_node) {
336  pe_rsc_trace(rsc, "Assigning %s resource '%s'",
337  rsc->xml->name, rsc->id);
338  rsc->cmds->assign(rsc, NULL, true);
339  }
340  }
341 
343 }
344 
352 static void
353 clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
354 {
355  pcmk_resource_t *rsc = data;
356 
357  if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
358  return;
359  }
360  crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
361 
362  /* There's no need to recurse into rsc->children because those
363  * should just be unassigned clone instances.
364  */
365 
366  for (GList *iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
367  pcmk_node_t *node = (pcmk_node_t *) iter->data;
368  pcmk_action_t *clear_op = NULL;
369 
370  if (!node->details->online) {
371  continue;
372  }
373  if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
374  continue;
375  }
376 
377  clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
378  rsc->cluster);
379 
380  /* We can't use order_action_then_stop() here because its
381  * pcmk__ar_guest_allowed breaks things
382  */
383  pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
384  NULL, pcmk__ar_ordered, rsc->cluster);
385  }
386 }
387 
394 static void
395 schedule_resource_actions(pcmk_scheduler_t *scheduler)
396 {
397  // Process deferred action checks
398  pe__foreach_param_check(scheduler, check_params);
400 
402  crm_trace("Scheduling probes");
404  }
405 
407  g_list_foreach(scheduler->resources, clear_failcounts_if_orphaned,
408  NULL);
409  }
410 
411  crm_trace("Scheduling resource actions");
412  for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
413  pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
414 
415  rsc->cmds->create_actions(rsc);
416  }
417 }
418 
427 static bool
428 is_managed(const pcmk_resource_t *rsc)
429 {
430  if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
431  return true;
432  }
433  for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
434  if (is_managed((pcmk_resource_t *) iter->data)) {
435  return true;
436  }
437  }
438  return false;
439 }
440 
449 static bool
450 any_managed_resources(const pcmk_scheduler_t *scheduler)
451 {
452  for (const GList *iter = scheduler->resources;
453  iter != NULL; iter = iter->next) {
454  if (is_managed((const pcmk_resource_t *) iter->data)) {
455  return true;
456  }
457  }
458  return false;
459 }
460 
470 static bool
471 needs_fencing(const pcmk_node_t *node, bool have_managed)
472 {
473  return have_managed && node->details->unclean
474  && pe_can_fence(node->details->data_set, node);
475 }
476 
485 static bool
486 needs_shutdown(const pcmk_node_t *node)
487 {
488  if (pe__is_guest_or_remote_node(node)) {
489  /* Do not send shutdown actions for Pacemaker Remote nodes.
490  * @TODO We might come up with a good use for this in the future.
491  */
492  return false;
493  }
494  return node->details->online && node->details->shutdown;
495 }
496 
507 static GList *
508 add_nondc_fencing(GList *list, pcmk_action_t *action,
510 {
512  && (list != NULL)) {
513  /* Concurrent fencing is disabled, so order each non-DC
514  * fencing in a chain. If there is any DC fencing or
515  * shutdown, it will be ordered after the last action in the
516  * chain later.
517  */
519  }
520  return g_list_prepend(list, action);
521 }
522 
529 static pcmk_action_t *
530 schedule_fencing(pcmk_node_t *node)
531 {
532  pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
533  FALSE, node->details->data_set);
534 
535  pe_warn("Scheduling node %s for fencing", pe__node_name(node));
536  pcmk__order_vs_fence(fencing, node->details->data_set);
537  return fencing;
538 }
539 
546 static void
547 schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
548 {
549  pcmk_action_t *dc_down = NULL;
550  bool integrity_lost = false;
551  bool have_managed = any_managed_resources(scheduler);
552  GList *fencing_ops = NULL;
553  GList *shutdown_ops = NULL;
554 
555  crm_trace("Scheduling fencing and shutdowns as needed");
556  if (!have_managed) {
557  crm_notice("No fencing will be done until there are resources "
558  "to manage");
559  }
560 
561  // Check each node for whether it needs fencing or shutdown
562  for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
563  pcmk_node_t *node = (pcmk_node_t *) iter->data;
564  pcmk_action_t *fencing = NULL;
565 
566  /* Guest nodes are "fenced" by recovering their container resource,
567  * so handle them separately.
568  */
569  if (pe__is_guest_node(node)) {
570  if (node->details->remote_requires_reset && have_managed
571  && pe_can_fence(scheduler, node)) {
572  pcmk__fence_guest(node);
573  }
574  continue;
575  }
576 
577  if (needs_fencing(node, have_managed)) {
578  fencing = schedule_fencing(node);
579 
580  // Track DC and non-DC fence actions separately
581  if (node->details->is_dc) {
582  dc_down = fencing;
583  } else {
584  fencing_ops = add_nondc_fencing(fencing_ops, fencing,
585  scheduler);
586  }
587 
588  } else if (needs_shutdown(node)) {
589  pcmk_action_t *down_op = pcmk__new_shutdown_action(node);
590 
591  // Track DC and non-DC shutdown actions separately
592  if (node->details->is_dc) {
593  dc_down = down_op;
594  } else {
595  shutdown_ops = g_list_prepend(shutdown_ops, down_op);
596  }
597  }
598 
599  if ((fencing == NULL) && node->details->unclean) {
600  integrity_lost = true;
601  pe_warn("Node %s is unclean but cannot be fenced",
602  pe__node_name(node));
603  }
604  }
605 
606  if (integrity_lost) {
608  pe_warn("Resource functionality and data integrity cannot be "
609  "guaranteed (configure, enable, and test fencing to "
610  "correct this)");
611 
613  crm_notice("Unclean nodes will not be fenced until quorum is "
614  "attained or no-quorum-policy is set to ignore");
615  }
616  }
617 
618  if (dc_down != NULL) {
619  /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
620  * DC elections. However, we don't want to order non-DC shutdowns before
621  * a DC *fencing*, because even though we don't want a node that's
622  * shutting down to become DC, the DC fencing could be ordered before a
623  * clone stop that's also ordered before the shutdowns, thus leading to
624  * a graph loop.
625  */
626  if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
627  pcmk__str_none)) {
628  pcmk__order_after_each(dc_down, shutdown_ops);
629  }
630 
631  // Order any non-DC fencing before any DC fencing or shutdown
632 
634  /* With concurrent fencing, order each non-DC fencing action
635  * separately before any DC fencing or shutdown.
636  */
637  pcmk__order_after_each(dc_down, fencing_ops);
638  } else if (fencing_ops != NULL) {
639  /* Without concurrent fencing, the non-DC fencing actions are
640  * already ordered relative to each other, so we just need to order
641  * the DC fencing after the last action in the chain (which is the
642  * first item in the list).
643  */
644  order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
646  }
647  }
648  g_list_free(fencing_ops);
649  g_list_free(shutdown_ops);
650 }
651 
652 static void
653 log_resource_details(pcmk_scheduler_t *scheduler)
654 {
655  pcmk__output_t *out = scheduler->priv;
656  GList *all = NULL;
657 
658  /* Due to the `crm_mon --node=` feature, out->message() for all the
659  * resource-related messages expects a list of nodes that we are allowed to
660  * output information for. Here, we create a wildcard to match all nodes.
661  */
662  all = g_list_prepend(all, (gpointer) "*");
663 
664  for (GList *item = scheduler->resources; item != NULL; item = item->next) {
665  pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
666 
667  // Log all resources except inactive orphans
669  || (rsc->role != pcmk_role_stopped)) {
670  out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
671  }
672  }
673 
674  g_list_free(all);
675 }
676 
677 static void
678 log_all_actions(pcmk_scheduler_t *scheduler)
679 {
680  /* This only ever outputs to the log, so ignore whatever output object was
681  * previously set and just log instead.
682  */
683  pcmk__output_t *prev_out = scheduler->priv;
684  pcmk__output_t *out = NULL;
685 
686  if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
687  return;
688  }
689 
692  pcmk__output_set_log_level(out, LOG_NOTICE);
693  scheduler->priv = out;
694 
695  out->begin_list(out, NULL, NULL, "Actions");
697  out->end_list(out);
698  out->finish(out, CRM_EX_OK, true, NULL);
699  pcmk__output_free(out);
700 
701  scheduler->priv = prev_out;
702 }
703 
710 static void
711 log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
712 {
713  const uint64_t flags = pcmk_action_optional
716 
717  crm_trace("Required but unrunnable actions:");
718  for (const GList *iter = scheduler->actions;
719  iter != NULL; iter = iter->next) {
720 
721  const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
722 
723  if (!pcmk_any_flags_set(action->flags, flags)) {
724  pcmk__log_action("\t", action, true);
725  }
726  }
727 }
728 
737 static void
738 unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
739 {
740  const char* localhost_save = NULL;
741 
743  crm_trace("Reusing previously calculated cluster status");
745  return;
746  }
747 
748  if (scheduler->localhost) {
749  localhost_save = scheduler->localhost;
750  }
751 
752  CRM_ASSERT(cib != NULL);
753  crm_trace("Calculating cluster status");
754 
755  /* This will zero the entire struct without freeing anything first, so
756  * callers should never call pcmk__schedule_actions() with a populated data
757  * set unless pcmk_sched_have_status is set (i.e. cluster_status() was
758  * previously called, whether directly or via pcmk__schedule_actions()).
759  */
761 
762  if (localhost_save) {
763  scheduler->localhost = localhost_save;
764  }
765 
767  scheduler->input = cib;
768  cluster_status(scheduler); // Sets pcmk_sched_have_status
769 }
770 
779 void
780 pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
782 {
783  unpack_cib(cib, flags, scheduler);
788  return;
789  }
790 
792  && pcmk__is_daemon) {
793  log_resource_details(scheduler);
794  }
795 
796  apply_node_criteria(scheduler);
797 
799  return;
800  }
801 
804  assign_resources(scheduler);
805  schedule_resource_actions(scheduler);
806 
807  /* Remote ordering constraints need to happen prior to calculating fencing
808  * because it is one more place we can mark nodes as needing fencing.
809  */
811 
812  schedule_fencing_and_shutdowns(scheduler);
814  log_all_actions(scheduler);
816 
817  if (get_crm_log_level() == LOG_TRACE) {
818  log_unrunnable_actions(scheduler);
819  }
820 }
pcmk_assignment_methods_t * cmds
Resource assignment methods.
Definition: resources.h:417
const pcmk_resource_t * pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle)
Definition: complex.c:962
void(* end_list)(pcmk__output_t *out)
#define LOG_TRACE
Definition: logging.h:38
bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node)
Definition: utils.c:36
G_GNUC_INTERNAL void pcmk__order_after_each(pcmk_action_t *after, GList *list)
A dumping ground.
G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler)
#define crm_notice(fmt, args...)
Definition: logging.h:383
bool pe__is_guest_or_remote_node(const pcmk_node_t *node)
Definition: remote.c:41
pcmk_scheduler_t * cluster
Cluster that resource is part of.
Definition: resources.h:412
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:36
char data[0]
Definition: cpg.c:55
#define INFINITY
Definition: crm.h:98
Whether action should not be executed.
Definition: actions.h:244
void(* create_actions)(pcmk_resource_t *rsc)
pcmk_node_t * partial_migration_target
The destination node, if migrate_to completed but migrate_from has not.
Definition: resources.h:454
G_GNUC_INTERNAL void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task, pcmk_action_t *first_action, pcmk_resource_t *then_rsc, char *then_task, pcmk_action_t *then_action, uint32_t flags, pcmk_scheduler_t *sched)
Stopped.
Definition: roles.h:29
int(* message)(pcmk__output_t *out, const char *message_id,...)
Whether cluster is symmetric (via symmetric-cluster property)
Definition: scheduler.h:74
enum rsc_role_e role
Resource&#39;s current role.
Definition: resources.h:468
Whether partition has quorum (via have-quorum property)
Definition: scheduler.h:71
G_GNUC_INTERNAL bool pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_resource_t **failed)
GList * children
Resource&#39;s child resources, if any.
Definition: resources.h:475
enum pcmk__digest_result rc
Definition: internal.h:455
gboolean order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action, uint32_t flags)
Definition: utils.c:450
xmlNode * xml
Resource configuration (possibly expanded from template)
Definition: resources.h:404
gboolean exclusive_discover
Whether exclusive probing is enabled.
Definition: resources.h:433
Implementation of pcmk_action_t.
Definition: actions.h:390
G_GNUC_INTERNAL void pcmk__apply_locations(pcmk_scheduler_t *scheduler)
pcmk_action_t * pe_fence_op(pcmk_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pcmk_scheduler_t *scheduler)
Definition: pe_actions.c:1265
op_digest_cache_t * rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op, pcmk_node_t *node, pcmk_scheduler_t *scheduler)
Definition: pe_digest.c:389
G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
#define pe__set_working_set_flags(scheduler, flags_to_set)
Definition: internal.h:52
#define PCMK_ACTION_DO_SHUTDOWN
Definition: actions.h:51
Whether the CIB status section has been parsed yet.
Definition: scheduler.h:131
Whether concurrent fencing is allowed (via concurrent-fencing property)
Definition: scheduler.h:89
GList * actions
Scheduled actions.
Definition: scheduler.h:204
pcmk_node_t *(* assign)(pcmk_resource_t *rsc, const pcmk_node_t *prefer, bool stop_if_fail)
pcmk_action_t * pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *reason, pcmk_scheduler_t *scheduler)
Schedule a controller operation to clear a fail count.
Definition: failcounts.c:453
#define PCMK_ACTION_CLEAR_FAILCOUNT
Definition: actions.h:46
G_GNUC_INTERNAL void pcmk__sort_resources(pcmk_scheduler_t *scheduler)
GList * pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: pe_actions.c:1588
pcmk_scheduler_t * data_set
Cluster that node is part of.
Definition: nodes.h:126
void pcmk__output_set_log_level(pcmk__output_t *out, uint8_t log_level)
Definition: output_log.c:369
void pe__foreach_param_check(pcmk_scheduler_t *scheduler, void(*cb)(pcmk_resource_t *, pcmk_node_t *, const xmlNode *, enum pcmk__check_parameters))
Definition: remote.c:252
Implementation of pcmk_scheduler_t.
Definition: scheduler.h:172
gboolean cluster_status(pcmk_scheduler_t *scheduler)
Definition: status.c:71
gboolean remote_requires_reset
Definition: nodes.h:88
G_GNUC_INTERNAL void pcmk__create_graph(pcmk_scheduler_t *scheduler)
int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc, time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
Definition: failcounts.c:360
const char * action
Definition: pcmk_fence.c:30
GList * resources
Resources in cluster.
Definition: scheduler.h:196
GList * nodes
Nodes in cluster.
Definition: scheduler.h:195
gboolean is_dc
Whether node is cluster&#39;s DC.
Definition: nodes.h:80
#define pe_warn(fmt...)
Definition: internal.h:44
int weight
Node score for a given resource.
Definition: nodes.h:131
Implementation of pcmk_resource_t.
Definition: resources.h:399
Actions are ordered (optionally, if no other flags are set)
void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, const pcmk_action_t *action, bool details)
void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag, pcmk_scheduler_t *scheduler)
Definition: utils.c:360
void pe__free_param_checks(pcmk_scheduler_t *scheduler)
Definition: remote.c:268
#define stop_key(rsc)
Definition: internal.h:378
Whether the cluster includes any Pacemaker Remote nodes (via CIB)
Definition: scheduler.h:134
char * task
Action name.
Definition: actions.h:403
#define crm_trace(fmt, args...)
Definition: logging.h:387
void * priv
For Pacemaker use only.
Definition: scheduler.h:229
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:99
CRM_TRACE_INIT_DATA(pacemaker)
struct pe_node_shared_s * details
Basic node information.
Definition: nodes.h:134
void(* shutdown_lock)(pcmk_resource_t *rsc)
void set_working_set_defaults(pcmk_scheduler_t *scheduler)
Definition: status.c:368
bool pcmk__is_daemon
Definition: logging.c:47
unsigned long long flags
Group of enum pcmk_rsc_flags.
Definition: resources.h:429
void pcmk__register_lib_messages(pcmk__output_t *out)
Definition: pcmk_output.c:2403
Wrappers for and extensions to libxml2.
void(* finish)(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest)
Never probe resource on node.
Definition: nodes.h:51
Success.
Definition: results.h:240
G_GNUC_INTERNAL void pcmk__output_actions(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL pcmk_action_t * pcmk__new_shutdown_action(pcmk_node_t *node)
void pe__register_messages(pcmk__output_t *out)
Definition: pe_output.c:3162
Implementation of pcmk_node_t.
Definition: nodes.h:130
xmlNode * input
CIB XML.
Definition: scheduler.h:175
const char * placement_strategy
Value of placement-strategy property.
Definition: scheduler.h:180
int rsc_discover_mode
Probe mode (enum pe_discover_e)
Definition: nodes.h:137
const char * id
Node ID at the cluster layer.
Definition: nodes.h:67
void pcmk__output_free(pcmk__output_t *out)
Definition: output.c:28
bool pe__is_guest_node(const pcmk_node_t *node)
Definition: remote.c:33
unsigned int get_crm_log_level(void)
Definition: logging.c:1076
pcmk__check_parameters
const char * localhost
Definition: scheduler.h:216
G_GNUC_INTERNAL void pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__apply_node_health(pcmk_scheduler_t *scheduler)
Cluster status and scheduling.
gboolean is_remote_node
Whether this is a remote connection.
Definition: resources.h:432
Whether action is runnable.
Definition: actions.h:241
void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
Whether fencing is enabled (via stonith-enabled property)
Definition: scheduler.h:80
G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
pcmk_scheduler_t * scheduler
Whether action does not require invoking an agent.
Definition: actions.h:238
G_GNUC_INTERNAL void pcmk__apply_orderings(pcmk_scheduler_t *sched)
#define CRM_ASSERT(expr)
Definition: results.h:42
Cluster Configuration.
G_GNUC_INTERNAL void pcmk__schedule_probes(pcmk_scheduler_t *scheduler)
This structure contains everything that makes up a single output formatter.
gboolean shutdown
Whether shutting down.
Definition: nodes.h:78
void(* begin_list)(pcmk__output_t *out, const char *singular_noun, const char *plural_noun, const char *format,...) G_GNUC_PRINTF(4
GList * running_on
Nodes where resource may be active.
Definition: resources.h:460
G_GNUC_INTERNAL void pcmk__order_vs_fence(pcmk_action_t *stonith_op, pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__fence_guest(pcmk_node_t *node)
G_GNUC_INTERNAL bool pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *xml_op)
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:37
unsigned long long flags
Group of enum pcmk_scheduler_flags.
Definition: scheduler.h:183
#define ID(x)
Definition: msg_xml.h:474
int stickiness
Extra preference for current node.
Definition: resources.h:423
Whether resource is managed.
Definition: resources.h:106
gboolean unclean
Whether node requires fencing.
Definition: nodes.h:76
Whether resource has been removed from the configuration.
Definition: resources.h:103
enum node_type type
Node variant.
Definition: nodes.h:69
G_GNUC_INTERNAL void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler)
gboolean online
Whether online.
Definition: nodes.h:72
uint64_t flags
Definition: remote.c:215
int pcmk__log_output_new(pcmk__output_t **out)
Definition: output.c:272
char * id
Resource ID in configuration.
Definition: resources.h:400
GHashTable * allowed_nodes
Nodes where resource may run (key is node ID, not name)
Definition: resources.h:466