pacemaker  2.1.4-dc6eb4362
Scalable High-Availability cluster resource manager
pcmk_sched_allocate.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2022 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <crm/crm.h>
13 #include <crm/cib.h>
14 #include <crm/msg_xml.h>
15 #include <crm/common/xml.h>
17 
18 #include <glib.h>
19 
20 #include <crm/pengine/status.h>
21 #include <pacemaker-internal.h>
22 #include "libpacemaker_private.h"
23 
24 CRM_TRACE_INIT_DATA(pacemaker);
25 
42 static void
43 check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
45 {
46  const char *reason = NULL;
47  op_digest_cache_t *digest_data = NULL;
48 
49  switch (check) {
50  case pe_check_active:
51  if (pcmk__check_action_config(rsc, node, rsc_op)
52  && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
53  data_set)) {
54  reason = "action definition changed";
55  }
56  break;
57 
59  digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
60  switch (digest_data->rc) {
61  case RSC_DIGEST_UNKNOWN:
62  crm_trace("Resource %s history entry %s on %s has "
63  "no digest to compare",
64  rsc->id, ID(rsc_op), node->details->id);
65  break;
66  case RSC_DIGEST_MATCH:
67  break;
68  default:
69  reason = "resource parameters have changed";
70  break;
71  }
72  break;
73  }
74  if (reason != NULL) {
75  pe__clear_failcount(rsc, node, reason, data_set);
76  }
77 }
78 
89 static bool
90 failcount_clear_action_exists(pe_node_t *node, pe_resource_t *rsc)
91 {
92  GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
93 
94  if (list != NULL) {
95  g_list_free(list);
96  return true;
97  }
98  return false;
99 }
100 
108 static void
109 check_failure_threshold(pe_resource_t *rsc, pe_node_t *node)
110 {
111  // If this is a collective resource, apply recursively to children instead
112  if (rsc->children != NULL) {
113  g_list_foreach(rsc->children, (GFunc) check_failure_threshold,
114  node);
115  return;
116 
117  } else if (failcount_clear_action_exists(node, rsc)) {
118  /* Don't force the resource away from this node due to a failcount
119  * that's going to be cleared.
120  *
121  * @TODO Failcount clearing can be scheduled in
122  * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
123  * schedule_resource_actions() via check_params(). This runs well before
124  * then, so it cannot detect those, meaning we might check the migration
125  * threshold when we shouldn't. Worst case, we stop or move the
126  * resource, then move it back in the next transition.
127  */
128  return;
129 
130  } else {
131  pe_resource_t *failed = NULL;
132 
133  if (pcmk__threshold_reached(rsc, node, &failed)) {
134  resource_location(failed, node, -INFINITY, "__fail_limit__",
135  rsc->cluster);
136  }
137  }
138 }
139 
152 static void
153 apply_exclusive_discovery(pe_resource_t *rsc, pe_node_t *node)
154 {
155  if (rsc->exclusive_discover || uber_parent(rsc)->exclusive_discover) {
156  pe_node_t *match = NULL;
157 
158  // If this is a collective resource, apply recursively to children
159  g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery, node);
160 
161  match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
162  if ((match != NULL)
163  && (match->rsc_discover_mode != pe_discover_exclusive)) {
164  match->weight = -INFINITY;
165  }
166  }
167 }
168 
176 static void
177 apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
178 {
179  pe_node_t *node = NULL;
180 
181  // If this is a collective resource, apply recursively to children instead
182  if (rsc->children != NULL) {
183  g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set);
184  return;
185  }
186 
187  /* A resource is sticky if it is managed, has stickiness configured, and is
188  * active on a single node.
189  */
190  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
191  || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
192  return;
193  }
194 
195  node = rsc->running_on->data;
196 
197  /* In a symmetric cluster, stickiness can always be used. In an
198  * asymmetric cluster, we have to check whether the resource is still
199  * allowed on the node, so we don't keep the resource somewhere it is no
200  * longer explicitly enabled.
201  */
203  && (pe_hash_table_lookup(rsc->allowed_nodes,
204  node->details->id) == NULL)) {
205  pe_rsc_debug(rsc,
206  "Ignoring %s stickiness because the cluster is "
207  "asymmetric and node %s is not explicitly allowed",
208  rsc->id, node->details->uname);
209  return;
210  }
211 
212  pe_rsc_debug(rsc, "Resource %s has %d stickiness on node %s",
213  rsc->id, rsc->stickiness, node->details->uname);
214  resource_location(rsc, node, rsc->stickiness, "stickiness",
215  rsc->cluster);
216 }
217 
224 static void
225 apply_shutdown_locks(pe_working_set_t *data_set)
226 {
228  return;
229  }
230  for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
231  pe_resource_t *rsc = (pe_resource_t *) iter->data;
232 
233  rsc->cmds->shutdown_lock(rsc);
234  }
235 }
236 
243 static void
244 count_available_nodes(pe_working_set_t *data_set)
245 {
247  return;
248  }
249 
250  // @COMPAT for API backward compatibility only (cluster does not use value)
251  for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
252  pe_node_t *node = (pe_node_t *) iter->data;
253 
254  if ((node != NULL) && (node->weight >= 0) && node->details->online
255  && (node->details->type != node_ping)) {
257  }
258  }
259  crm_trace("Online node count: %d", data_set->max_valid_nodes);
260 }
261 
262 /*
263  * \internal
264  * \brief Apply node-specific scheduling criteria
265  *
266  * After the CIB has been unpacked, process node-specific scheduling criteria
267  * including shutdown locks, location constraints, resource stickiness,
268  * migration thresholds, and exclusive resource discovery.
269  */
270 static void
271 apply_node_criteria(pe_working_set_t *data_set)
272 {
273  crm_trace("Applying node-specific scheduling criteria");
274  apply_shutdown_locks(data_set);
275  count_available_nodes(data_set);
277  g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set);
278 
279  for (GList *node_iter = data_set->nodes; node_iter != NULL;
280  node_iter = node_iter->next) {
281  for (GList *rsc_iter = data_set->resources; rsc_iter != NULL;
282  rsc_iter = rsc_iter->next) {
283  pe_node_t *node = (pe_node_t *) node_iter->data;
284  pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
285 
286  check_failure_threshold(rsc, node);
287  apply_exclusive_discovery(rsc, node);
288  }
289  }
290 }
291 
298 static void
299 allocate_resources(pe_working_set_t *data_set)
300 {
301  GList *iter = NULL;
302 
303  crm_trace("Allocating resources to nodes");
304 
305  if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
307  }
309 
311  /* Allocate remote connection resources first (which will also allocate
312  * any colocation dependencies). If the connection is migrating, always
313  * prefer the partial migration target.
314  */
315  for (iter = data_set->resources; iter != NULL; iter = iter->next) {
316  pe_resource_t *rsc = (pe_resource_t *) iter->data;
317 
318  if (rsc->is_remote_node) {
319  pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
320  rsc->id);
321  rsc->cmds->allocate(rsc, rsc->partial_migration_target,
322  data_set);
323  }
324  }
325  }
326 
327  /* now do the rest of the resources */
328  for (iter = data_set->resources; iter != NULL; iter = iter->next) {
329  pe_resource_t *rsc = (pe_resource_t *) iter->data;
330 
331  if (!rsc->is_remote_node) {
332  pe_rsc_trace(rsc, "Allocating %s resource '%s'",
333  crm_element_name(rsc->xml), rsc->id);
334  rsc->cmds->allocate(rsc, NULL, data_set);
335  }
336  }
337 
338  pcmk__show_node_capacities("Remaining", data_set);
339 }
340 
348 static void
349 clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
350 {
351  if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
352  return;
353  }
354  crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
355 
356  /* There's no need to recurse into rsc->children because those
357  * should just be unallocated clone instances.
358  */
359 
360  for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
361  pe_node_t *node = (pe_node_t *) iter->data;
362  pe_action_t *clear_op = NULL;
363 
364  if (!node->details->online) {
365  continue;
366  }
367  if (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
368  data_set) == 0) {
369  continue;
370  }
371 
372  clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set);
373 
374  /* We can't use order_action_then_stop() here because its
375  * pe_order_preserve breaks things
376  */
377  pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
378  NULL, pe_order_optional, data_set);
379  }
380 }
381 
388 static void
389 schedule_resource_actions(pe_working_set_t *data_set)
390 {
391  // Process deferred action checks
392  pe__foreach_param_check(data_set, check_params);
394 
396  crm_trace("Scheduling probes");
398  }
399 
401  g_list_foreach(data_set->resources,
402  (GFunc) clear_failcounts_if_orphaned, data_set);
403  }
404 
405  crm_trace("Scheduling resource actions");
406  for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
407  pe_resource_t *rsc = (pe_resource_t *) iter->data;
408 
409  rsc->cmds->create_actions(rsc, data_set);
410  }
411 }
412 
421 static bool
422 is_managed(const pe_resource_t *rsc)
423 {
424  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
425  return true;
426  }
427  for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
428  if (is_managed((pe_resource_t *) iter->data)) {
429  return true;
430  }
431  }
432  return false;
433 }
434 
443 static bool
444 any_managed_resources(pe_working_set_t *data_set)
445 {
446  for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
447  if (is_managed((pe_resource_t *) iter->data)) {
448  return true;
449  }
450  }
451  return false;
452 }
453 
464 static bool
465 needs_fencing(pe_node_t *node, bool have_managed, pe_working_set_t *data_set)
466 {
467  return have_managed && node->details->unclean
468  && pe_can_fence(data_set, node);
469 }
470 
479 static bool
480 needs_shutdown(pe_node_t *node)
481 {
482  if (pe__is_guest_or_remote_node(node)) {
483  /* Do not send shutdown actions for Pacemaker Remote nodes.
484  * @TODO We might come up with a good use for this in the future.
485  */
486  return false;
487  }
488  return node->details->online && node->details->shutdown;
489 }
490 
500 static GList *
501 add_nondc_fencing(GList *list, pe_action_t *action, pe_working_set_t *data_set)
502 {
504  && (list != NULL)) {
505  /* Concurrent fencing is disabled, so order each non-DC
506  * fencing in a chain. If there is any DC fencing or
507  * shutdown, it will be ordered after the last action in the
508  * chain later.
509  */
511  }
512  return g_list_prepend(list, action);
513 }
514 
522 static pe_action_t *
523 schedule_fencing(pe_node_t *node, pe_working_set_t *data_set)
524 {
525  pe_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
526  FALSE, data_set);
527 
528  pe_warn("Scheduling node %s for fencing", node->details->uname);
529  pcmk__order_vs_fence(fencing, data_set);
530  return fencing;
531 }
532 
539 static void
540 schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
541 {
542  pe_action_t *dc_down = NULL;
543  bool integrity_lost = false;
544  bool have_managed = any_managed_resources(data_set);
545  GList *fencing_ops = NULL;
546  GList *shutdown_ops = NULL;
547 
548  crm_trace("Scheduling fencing and shutdowns as needed");
549  if (!have_managed) {
550  crm_notice("No fencing will be done until there are resources to manage");
551  }
552 
553  // Check each node for whether it needs fencing or shutdown
554  for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
555  pe_node_t *node = (pe_node_t *) iter->data;
556  pe_action_t *fencing = NULL;
557 
558  /* Guest nodes are "fenced" by recovering their container resource,
559  * so handle them separately.
560  */
561  if (pe__is_guest_node(node)) {
562  if (node->details->remote_requires_reset && have_managed
563  && pe_can_fence(data_set, node)) {
565  }
566  continue;
567  }
568 
569  if (needs_fencing(node, have_managed, data_set)) {
570  fencing = schedule_fencing(node, data_set);
571 
572  // Track DC and non-DC fence actions separately
573  if (node->details->is_dc) {
574  dc_down = fencing;
575  } else {
576  fencing_ops = add_nondc_fencing(fencing_ops, fencing, data_set);
577  }
578 
579  } else if (needs_shutdown(node)) {
581 
582  // Track DC and non-DC shutdown actions separately
583  if (node->details->is_dc) {
584  dc_down = down_op;
585  } else {
586  shutdown_ops = g_list_prepend(shutdown_ops, down_op);
587  }
588  }
589 
590  if ((fencing == NULL) && node->details->unclean) {
591  integrity_lost = true;
592  pe_warn("Node %s is unclean but cannot be fenced",
593  node->details->uname);
594  }
595  }
596 
597  if (integrity_lost) {
599  pe_warn("Resource functionality and data integrity cannot be "
600  "guaranteed (configure, enable, and test fencing to "
601  "correct this)");
602 
604  crm_notice("Unclean nodes will not be fenced until quorum is "
605  "attained or no-quorum-policy is set to ignore");
606  }
607  }
608 
609  if (dc_down != NULL) {
610  /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
611  * DC elections. However, we don't want to order non-DC shutdowns before
612  * a DC *fencing*, because even though we don't want a node that's
613  * shutting down to become DC, the DC fencing could be ordered before a
614  * clone stop that's also ordered before the shutdowns, thus leading to
615  * a graph loop.
616  */
617  if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_none)) {
618  pcmk__order_after_each(dc_down, shutdown_ops);
619  }
620 
621  // Order any non-DC fencing before any DC fencing or shutdown
622 
624  /* With concurrent fencing, order each non-DC fencing action
625  * separately before any DC fencing or shutdown.
626  */
627  pcmk__order_after_each(dc_down, fencing_ops);
628  } else if (fencing_ops != NULL) {
629  /* Without concurrent fencing, the non-DC fencing actions are
630  * already ordered relative to each other, so we just need to order
631  * the DC fencing after the last action in the chain (which is the
632  * first item in the list).
633  */
634  order_actions((pe_action_t *) fencing_ops->data, dc_down,
636  }
637  }
638  g_list_free(fencing_ops);
639  g_list_free(shutdown_ops);
640 }
641 
642 static void
643 log_resource_details(pe_working_set_t *data_set)
644 {
645  pcmk__output_t *out = data_set->priv;
646  GList *all = NULL;
647 
648  /* We need a list of nodes that we are allowed to output information for.
649  * This is necessary because out->message for all the resource-related
650  * messages expects such a list, due to the `crm_mon --node=` feature. Here,
651  * we just make it a list of all the nodes.
652  */
653  all = g_list_prepend(all, (gpointer) "*");
654 
655  for (GList *item = data_set->resources; item != NULL; item = item->next) {
656  pe_resource_t *rsc = (pe_resource_t *) item->data;
657 
658  // Log all resources except inactive orphans
659  if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
660  || (rsc->role != RSC_ROLE_STOPPED)) {
661  out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
662  }
663  }
664 
665  g_list_free(all);
666 }
667 
668 static void
669 log_all_actions(pe_working_set_t *data_set)
670 {
671  /* This only ever outputs to the log, so ignore whatever output object was
672  * previously set and just log instead.
673  */
674  pcmk__output_t *prev_out = data_set->priv;
676 
677  if (out == NULL) {
678  return;
679  }
680 
681  pcmk__output_set_log_level(out, LOG_NOTICE);
682  data_set->priv = out;
683 
684  out->begin_list(out, NULL, NULL, "Actions");
686  out->end_list(out);
687  out->finish(out, CRM_EX_OK, true, NULL);
688  pcmk__output_free(out);
689 
690  data_set->priv = prev_out;
691 }
692 
699 static void
700 log_unrunnable_actions(pe_working_set_t *data_set)
701 {
703 
704  crm_trace("Required but unrunnable actions:");
705  for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
706  pe_action_t *action = (pe_action_t *) iter->data;
707 
708  if (!pcmk_any_flags_set(action->flags, flags)) {
709  pcmk__log_action("\t", action, true);
710  }
711  }
712 }
713 
722 static void
723 unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
724 {
726  crm_trace("Reusing previously calculated cluster status");
728  return;
729  }
730 
731  CRM_ASSERT(cib != NULL);
732  crm_trace("Calculating cluster status");
733 
734  /* This will zero the entire struct without freeing anything first, so
735  * callers should never call pcmk__schedule_actions() with a populated data
736  * set unless pe_flag_have_status is set (i.e. cluster_status() was
737  * previously called, whether directly or via pcmk__schedule_actions()).
738  */
740 
742  data_set->input = cib;
743  cluster_status(data_set); // Sets pe_flag_have_status
744 }
745 
754 void
755 pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
757 {
758  unpack_cib(cib, flags, data_set);
763  return;
764  }
765 
767  pcmk__is_daemon) {
768  log_resource_details(data_set);
769  }
770 
771  apply_node_criteria(data_set);
772 
774  return;
775  }
776 
779  allocate_resources(data_set);
780  schedule_resource_actions(data_set);
781 
782  /* Remote ordering constraints need to happen prior to calculating fencing
783  * because it is one more place we can mark nodes as needing fencing.
784  */
786 
787  schedule_fencing_and_shutdowns(data_set);
789  log_all_actions(data_set);
791 
792  if (get_crm_log_level() == LOG_TRACE) {
793  log_unrunnable_actions(data_set);
794  }
795 }
void(* end_list)(pcmk__output_t *out)
#define LOG_TRACE
Definition: logging.h:37
void pe__foreach_param_check(pe_working_set_t *data_set, void(*cb)(pe_resource_t *, pe_node_t *, xmlNode *, enum pe_check_parameters, pe_working_set_t *))
Definition: remote.c:246
A dumping ground.
#define crm_notice(fmt, args...)
Definition: logging.h:360
pcmk__output_t * pcmk__new_logger(void)
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition: remote.c:41
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:21
#define INFINITY
Definition: crm.h:99
pe_check_parameters
Definition: pe_types.h:202
G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set)
int(* message)(pcmk__output_t *out, const char *message_id,...)
#define pe_flag_concurrent_fencing
Definition: pe_types.h:102
void pcmk__output_set_log_level(pcmk__output_t *out, int log_level)
Definition: output_log.c:304
enum rsc_role_e role
Definition: pe_types.h:384
GList * children
Definition: pe_types.h:391
resource_alloc_functions_t * cmds
Definition: pe_types.h:348
#define pe_flag_symmetric_cluster
Definition: pe_types.h:96
G_GNUC_INTERNAL void pcmk__set_allocation_methods(pe_working_set_t *data_set)
#define pe_flag_no_compat
Definition: pe_types.h:132
xmlNode * xml
Definition: pe_types.h:338
G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *lh_rsc, char *lh_task, pe_action_t *lh_action, pe_resource_t *rh_rsc, char *rh_task, pe_action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
G_GNUC_INTERNAL bool pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node, pe_resource_t **failed)
gboolean exclusive_discover
Definition: pe_types.h:366
G_GNUC_INTERNAL void pcmk__apply_node_health(pe_working_set_t *data_set)
#define pe_flag_have_status
Definition: pe_types.h:117
GList * actions
Definition: pe_types.h:171
void resource_location(pe_resource_t *rsc, pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1692
G_GNUC_INTERNAL bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op)
pe_node_t * partial_migration_target
Definition: pe_types.h:378
pe_node_t *(* allocate)(pe_resource_t *, pe_node_t *, pe_working_set_t *)
#define pe_flag_have_quorum
Definition: pe_types.h:95
#define pe_flag_check_config
Definition: pe_types.h:141
G_GNUC_INTERNAL void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set)
gboolean remote_requires_reset
Definition: pe_types.h:231
const char * action
Definition: pcmk_fence.c:29
GList * resources
Definition: pe_types.h:165
GList * nodes
Definition: pe_types.h:164
gboolean is_dc
Definition: pe_types.h:228
G_GNUC_INTERNAL void pcmk__apply_locations(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__schedule_probes(pe_working_set_t *data_set)
#define CRM_OP_CLEAR_FAILCOUNT
Definition: crm.h:155
#define pe_warn(fmt...)
Definition: internal.h:29
int weight
Definition: pe_types.h:249
#define pe_flag_have_remote_nodes
Definition: pe_types.h:118
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:913
bool pe__is_guest_node(const pe_node_t *node)
Definition: remote.c:33
void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
#define stop_key(rsc)
Definition: internal.h:378
char * task
Definition: pe_types.h:428
G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
#define crm_trace(fmt, args...)
Definition: logging.h:364
enum rsc_digest_cmp_val rc
Definition: internal.h:480
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:122
void set_working_set_defaults(pe_working_set_t *data_set)
Definition: status.c:368
G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, pe_action_t *action, bool details)
struct pe_node_shared_s * details
Definition: pe_types.h:252
G_GNUC_INTERNAL pe_action_t * pcmk__new_shutdown_action(pe_node_t *node, pe_working_set_t *data_set)
bool pcmk__is_daemon
Definition: logging.c:47
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1906
unsigned long long flags
Definition: pe_types.h:362
const char * uname
Definition: pe_types.h:216
pe_working_set_t * data_set
void pcmk__unpack_constraints(pe_working_set_t *data_set)
Wrappers for and extensions to libxml2.
void(* finish)(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest)
#define pe_flag_stonith_enabled
Definition: pe_types.h:99
Success.
Definition: results.h:239
G_GNUC_INTERNAL void pcmk__create_internal_constraints(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__order_after_each(pe_action_t *after, GList *list)
G_GNUC_INTERNAL void pcmk__output_actions(pe_working_set_t *data_set)
xmlNode * input
Definition: pe_types.h:144
const char * placement_strategy
Definition: pe_types.h:151
int rsc_discover_mode
Definition: pe_types.h:253
#define CRM_OP_SHUTDOWN
Definition: crm.h:144
void pe__free_param_checks(pe_working_set_t *data_set)
Definition: remote.c:261
const char * id
Definition: pe_types.h:215
void pcmk__output_free(pcmk__output_t *out)
Definition: output.c:19
bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node)
Definition: utils.c:89
unsigned int get_crm_log_level(void)
Definition: logging.c:1030
#define pe_flag_quick_location
Definition: pe_types.h:120
Cluster status and scheduling.
gboolean is_remote_node
Definition: pe_types.h:365
G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set)
#define CRM_ASSERT(expr)
Definition: results.h:42
pe_action_t * pe__clear_failcount(pe_resource_t *rsc, pe_node_t *node, const char *reason, pe_working_set_t *data_set)
Schedule a controller operation to clear a fail count.
Definition: failcounts.c:366
Cluster Configuration.
op_digest_cache_t * rsc_action_digest_cmp(pe_resource_t *rsc, xmlNode *xml_op, pe_node_t *node, pe_working_set_t *data_set)
Definition: pe_digest.c:392
gboolean cluster_status(pe_working_set_t *data_set)
Definition: status.c:71
This structure contains everything that makes up a single output formatter.
G_GNUC_INTERNAL void pcmk__create_graph(pe_working_set_t *data_set)
gboolean shutdown
Definition: pe_types.h:226
void(* begin_list)(pcmk__output_t *out, const char *singular_noun, const char *plural_noun, const char *format,...) G_GNUC_PRINTF(4
#define pe__set_working_set_flags(working_set, flags_to_set)
Definition: internal.h:37
GList * running_on
Definition: pe_types.h:380
void(* create_actions)(pe_resource_t *, pe_working_set_t *)
pe_working_set_t * cluster
Definition: pe_types.h:342
G_GNUC_INTERNAL void pcmk__sort_resources(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__fence_guest(pe_node_t *node, pe_working_set_t *data_set)
CRM_TRACE_INIT_DATA(pacemaker)
void(* shutdown_lock)(pe_resource_t *rsc)
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:22
unsigned long long flags
Definition: pe_types.h:153
#define ID(x)
Definition: msg_xml.h:460
int stickiness
Definition: pe_types.h:355
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1644
gboolean unclean
Definition: pe_types.h:224
enum node_type type
Definition: pe_types.h:217
G_GNUC_INTERNAL void pcmk__apply_orderings(pe_working_set_t *data_set)
#define pe_rsc_managed
Definition: pe_types.h:257
#define pe_rsc_orphan
Definition: pe_types.h:256
gboolean online
Definition: pe_types.h:220
uint64_t flags
Definition: remote.c:149
int pe_get_failcount(pe_node_t *node, pe_resource_t *rsc, time_t *last_failure, uint32_t flags, xmlNode *xml_op, pe_working_set_t *data_set)
Definition: failcounts.c:251
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
Definition: utils.c:2136
#define pe_flag_shutdown_lock
Definition: pe_types.h:114
char * id
Definition: pe_types.h:336
GHashTable * allowed_nodes
Definition: pe_types.h:382
#define pe_flag_startup_probes
Definition: pe_types.h:116
#define pe_flag_stop_rsc_orphans
Definition: pe_types.h:104