pacemaker  2.1.5-b7adf64e51
Scalable High-Availability cluster resource manager
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
pcmk_sched_allocate.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2022 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <crm/crm.h>
13 #include <crm/cib.h>
14 #include <crm/msg_xml.h>
15 #include <crm/common/xml.h>
17 
18 #include <glib.h>
19 
20 #include <crm/pengine/status.h>
21 #include <pacemaker-internal.h>
22 #include "libpacemaker_private.h"
23 
24 CRM_TRACE_INIT_DATA(pacemaker);
25 
42 static void
43 check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
45 {
46  const char *reason = NULL;
47  op_digest_cache_t *digest_data = NULL;
48 
49  switch (check) {
50  case pe_check_active:
51  if (pcmk__check_action_config(rsc, node, rsc_op)
52  && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
53  data_set)) {
54  reason = "action definition changed";
55  }
56  break;
57 
59  digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
60  switch (digest_data->rc) {
61  case RSC_DIGEST_UNKNOWN:
62  crm_trace("Resource %s history entry %s on %s has "
63  "no digest to compare",
64  rsc->id, ID(rsc_op), node->details->id);
65  break;
66  case RSC_DIGEST_MATCH:
67  break;
68  default:
69  reason = "resource parameters have changed";
70  break;
71  }
72  break;
73  }
74  if (reason != NULL) {
75  pe__clear_failcount(rsc, node, reason, data_set);
76  }
77 }
78 
89 static bool
90 failcount_clear_action_exists(pe_node_t *node, pe_resource_t *rsc)
91 {
92  GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
93 
94  if (list != NULL) {
95  g_list_free(list);
96  return true;
97  }
98  return false;
99 }
100 
108 static void
109 check_failure_threshold(pe_resource_t *rsc, pe_node_t *node)
110 {
111  // If this is a collective resource, apply recursively to children instead
112  if (rsc->children != NULL) {
113  g_list_foreach(rsc->children, (GFunc) check_failure_threshold,
114  node);
115  return;
116 
117  } else if (failcount_clear_action_exists(node, rsc)) {
118  /* Don't force the resource away from this node due to a failcount
119  * that's going to be cleared.
120  *
121  * @TODO Failcount clearing can be scheduled in
122  * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
123  * schedule_resource_actions() via check_params(). This runs well before
124  * then, so it cannot detect those, meaning we might check the migration
125  * threshold when we shouldn't. Worst case, we stop or move the
126  * resource, then move it back in the next transition.
127  */
128  return;
129 
130  } else {
131  pe_resource_t *failed = NULL;
132 
133  if (pcmk__threshold_reached(rsc, node, &failed)) {
134  resource_location(failed, node, -INFINITY, "__fail_limit__",
135  rsc->cluster);
136  }
137  }
138 }
139 
152 static void
153 apply_exclusive_discovery(pe_resource_t *rsc, pe_node_t *node)
154 {
155  if (rsc->exclusive_discover || uber_parent(rsc)->exclusive_discover) {
156  pe_node_t *match = NULL;
157 
158  // If this is a collective resource, apply recursively to children
159  g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery, node);
160 
161  match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
162  if ((match != NULL)
163  && (match->rsc_discover_mode != pe_discover_exclusive)) {
164  match->weight = -INFINITY;
165  }
166  }
167 }
168 
176 static void
177 apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
178 {
179  pe_node_t *node = NULL;
180 
181  // If this is a collective resource, apply recursively to children instead
182  if (rsc->children != NULL) {
183  g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set);
184  return;
185  }
186 
187  /* A resource is sticky if it is managed, has stickiness configured, and is
188  * active on a single node.
189  */
190  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
191  || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
192  return;
193  }
194 
195  node = rsc->running_on->data;
196 
197  /* In a symmetric cluster, stickiness can always be used. In an
198  * asymmetric cluster, we have to check whether the resource is still
199  * allowed on the node, so we don't keep the resource somewhere it is no
200  * longer explicitly enabled.
201  */
203  && (pe_hash_table_lookup(rsc->allowed_nodes,
204  node->details->id) == NULL)) {
205  pe_rsc_debug(rsc,
206  "Ignoring %s stickiness because the cluster is "
207  "asymmetric and %s is not explicitly allowed",
208  rsc->id, pe__node_name(node));
209  return;
210  }
211 
212  pe_rsc_debug(rsc, "Resource %s has %d stickiness on %s",
213  rsc->id, rsc->stickiness, pe__node_name(node));
214  resource_location(rsc, node, rsc->stickiness, "stickiness",
215  rsc->cluster);
216 }
217 
224 static void
225 apply_shutdown_locks(pe_working_set_t *data_set)
226 {
228  return;
229  }
230  for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
231  pe_resource_t *rsc = (pe_resource_t *) iter->data;
232 
233  rsc->cmds->shutdown_lock(rsc);
234  }
235 }
236 
243 static void
244 count_available_nodes(pe_working_set_t *data_set)
245 {
247  return;
248  }
249 
250  // @COMPAT for API backward compatibility only (cluster does not use value)
251  for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
252  pe_node_t *node = (pe_node_t *) iter->data;
253 
254  if ((node != NULL) && (node->weight >= 0) && node->details->online
255  && (node->details->type != node_ping)) {
257  }
258  }
259  crm_trace("Online node count: %d", data_set->max_valid_nodes);
260 }
261 
262 /*
263  * \internal
264  * \brief Apply node-specific scheduling criteria
265  *
266  * After the CIB has been unpacked, process node-specific scheduling criteria
267  * including shutdown locks, location constraints, resource stickiness,
268  * migration thresholds, and exclusive resource discovery.
269  */
270 static void
271 apply_node_criteria(pe_working_set_t *data_set)
272 {
273  crm_trace("Applying node-specific scheduling criteria");
274  apply_shutdown_locks(data_set);
275  count_available_nodes(data_set);
277  g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set);
278 
279  for (GList *node_iter = data_set->nodes; node_iter != NULL;
280  node_iter = node_iter->next) {
281  for (GList *rsc_iter = data_set->resources; rsc_iter != NULL;
282  rsc_iter = rsc_iter->next) {
283  pe_node_t *node = (pe_node_t *) node_iter->data;
284  pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
285 
286  check_failure_threshold(rsc, node);
287  apply_exclusive_discovery(rsc, node);
288  }
289  }
290 }
291 
298 static void
299 allocate_resources(pe_working_set_t *data_set)
300 {
301  GList *iter = NULL;
302 
303  crm_trace("Allocating resources to nodes");
304 
305  if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
307  }
309 
311  /* Allocate remote connection resources first (which will also allocate
312  * any colocation dependencies). If the connection is migrating, always
313  * prefer the partial migration target.
314  */
315  for (iter = data_set->resources; iter != NULL; iter = iter->next) {
316  pe_resource_t *rsc = (pe_resource_t *) iter->data;
317 
318  if (rsc->is_remote_node) {
319  pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
320  rsc->id);
321  rsc->cmds->assign(rsc, rsc->partial_migration_target);
322  }
323  }
324  }
325 
326  /* now do the rest of the resources */
327  for (iter = data_set->resources; iter != NULL; iter = iter->next) {
328  pe_resource_t *rsc = (pe_resource_t *) iter->data;
329 
330  if (!rsc->is_remote_node) {
331  pe_rsc_trace(rsc, "Allocating %s resource '%s'",
332  crm_element_name(rsc->xml), rsc->id);
333  rsc->cmds->assign(rsc, NULL);
334  }
335  }
336 
337  pcmk__show_node_capacities("Remaining", data_set);
338 }
339 
347 static void
348 clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
349 {
350  if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
351  return;
352  }
353  crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
354 
355  /* There's no need to recurse into rsc->children because those
356  * should just be unallocated clone instances.
357  */
358 
359  for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
360  pe_node_t *node = (pe_node_t *) iter->data;
361  pe_action_t *clear_op = NULL;
362 
363  if (!node->details->online) {
364  continue;
365  }
366  if (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
367  data_set) == 0) {
368  continue;
369  }
370 
371  clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set);
372 
373  /* We can't use order_action_then_stop() here because its
374  * pe_order_preserve breaks things
375  */
376  pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
377  NULL, pe_order_optional, data_set);
378  }
379 }
380 
387 static void
388 schedule_resource_actions(pe_working_set_t *data_set)
389 {
390  // Process deferred action checks
391  pe__foreach_param_check(data_set, check_params);
393 
395  crm_trace("Scheduling probes");
397  }
398 
400  g_list_foreach(data_set->resources,
401  (GFunc) clear_failcounts_if_orphaned, data_set);
402  }
403 
404  crm_trace("Scheduling resource actions");
405  for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
406  pe_resource_t *rsc = (pe_resource_t *) iter->data;
407 
408  rsc->cmds->create_actions(rsc);
409  }
410 }
411 
420 static bool
421 is_managed(const pe_resource_t *rsc)
422 {
423  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
424  return true;
425  }
426  for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
427  if (is_managed((pe_resource_t *) iter->data)) {
428  return true;
429  }
430  }
431  return false;
432 }
433 
442 static bool
443 any_managed_resources(pe_working_set_t *data_set)
444 {
445  for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
446  if (is_managed((pe_resource_t *) iter->data)) {
447  return true;
448  }
449  }
450  return false;
451 }
452 
463 static bool
464 needs_fencing(pe_node_t *node, bool have_managed, pe_working_set_t *data_set)
465 {
466  return have_managed && node->details->unclean
467  && pe_can_fence(data_set, node);
468 }
469 
478 static bool
479 needs_shutdown(pe_node_t *node)
480 {
481  if (pe__is_guest_or_remote_node(node)) {
482  /* Do not send shutdown actions for Pacemaker Remote nodes.
483  * @TODO We might come up with a good use for this in the future.
484  */
485  return false;
486  }
487  return node->details->online && node->details->shutdown;
488 }
489 
499 static GList *
500 add_nondc_fencing(GList *list, pe_action_t *action, pe_working_set_t *data_set)
501 {
503  && (list != NULL)) {
504  /* Concurrent fencing is disabled, so order each non-DC
505  * fencing in a chain. If there is any DC fencing or
506  * shutdown, it will be ordered after the last action in the
507  * chain later.
508  */
510  }
511  return g_list_prepend(list, action);
512 }
513 
521 static pe_action_t *
522 schedule_fencing(pe_node_t *node, pe_working_set_t *data_set)
523 {
524  pe_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
525  FALSE, data_set);
526 
527  pe_warn("Scheduling node %s for fencing", pe__node_name(node));
528  pcmk__order_vs_fence(fencing, data_set);
529  return fencing;
530 }
531 
538 static void
539 schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
540 {
541  pe_action_t *dc_down = NULL;
542  bool integrity_lost = false;
543  bool have_managed = any_managed_resources(data_set);
544  GList *fencing_ops = NULL;
545  GList *shutdown_ops = NULL;
546 
547  crm_trace("Scheduling fencing and shutdowns as needed");
548  if (!have_managed) {
549  crm_notice("No fencing will be done until there are resources to manage");
550  }
551 
552  // Check each node for whether it needs fencing or shutdown
553  for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
554  pe_node_t *node = (pe_node_t *) iter->data;
555  pe_action_t *fencing = NULL;
556 
557  /* Guest nodes are "fenced" by recovering their container resource,
558  * so handle them separately.
559  */
560  if (pe__is_guest_node(node)) {
561  if (node->details->remote_requires_reset && have_managed
562  && pe_can_fence(data_set, node)) {
563  pcmk__fence_guest(node);
564  }
565  continue;
566  }
567 
568  if (needs_fencing(node, have_managed, data_set)) {
569  fencing = schedule_fencing(node, data_set);
570 
571  // Track DC and non-DC fence actions separately
572  if (node->details->is_dc) {
573  dc_down = fencing;
574  } else {
575  fencing_ops = add_nondc_fencing(fencing_ops, fencing, data_set);
576  }
577 
578  } else if (needs_shutdown(node)) {
579  pe_action_t *down_op = pcmk__new_shutdown_action(node);
580 
581  // Track DC and non-DC shutdown actions separately
582  if (node->details->is_dc) {
583  dc_down = down_op;
584  } else {
585  shutdown_ops = g_list_prepend(shutdown_ops, down_op);
586  }
587  }
588 
589  if ((fencing == NULL) && node->details->unclean) {
590  integrity_lost = true;
591  pe_warn("Node %s is unclean but cannot be fenced",
592  pe__node_name(node));
593  }
594  }
595 
596  if (integrity_lost) {
598  pe_warn("Resource functionality and data integrity cannot be "
599  "guaranteed (configure, enable, and test fencing to "
600  "correct this)");
601 
603  crm_notice("Unclean nodes will not be fenced until quorum is "
604  "attained or no-quorum-policy is set to ignore");
605  }
606  }
607 
608  if (dc_down != NULL) {
609  /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
610  * DC elections. However, we don't want to order non-DC shutdowns before
611  * a DC *fencing*, because even though we don't want a node that's
612  * shutting down to become DC, the DC fencing could be ordered before a
613  * clone stop that's also ordered before the shutdowns, thus leading to
614  * a graph loop.
615  */
616  if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_none)) {
617  pcmk__order_after_each(dc_down, shutdown_ops);
618  }
619 
620  // Order any non-DC fencing before any DC fencing or shutdown
621 
623  /* With concurrent fencing, order each non-DC fencing action
624  * separately before any DC fencing or shutdown.
625  */
626  pcmk__order_after_each(dc_down, fencing_ops);
627  } else if (fencing_ops != NULL) {
628  /* Without concurrent fencing, the non-DC fencing actions are
629  * already ordered relative to each other, so we just need to order
630  * the DC fencing after the last action in the chain (which is the
631  * first item in the list).
632  */
633  order_actions((pe_action_t *) fencing_ops->data, dc_down,
635  }
636  }
637  g_list_free(fencing_ops);
638  g_list_free(shutdown_ops);
639 }
640 
641 static void
642 log_resource_details(pe_working_set_t *data_set)
643 {
644  pcmk__output_t *out = data_set->priv;
645  GList *all = NULL;
646 
647  /* We need a list of nodes that we are allowed to output information for.
648  * This is necessary because out->message for all the resource-related
649  * messages expects such a list, due to the `crm_mon --node=` feature. Here,
650  * we just make it a list of all the nodes.
651  */
652  all = g_list_prepend(all, (gpointer) "*");
653 
654  for (GList *item = data_set->resources; item != NULL; item = item->next) {
655  pe_resource_t *rsc = (pe_resource_t *) item->data;
656 
657  // Log all resources except inactive orphans
658  if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
659  || (rsc->role != RSC_ROLE_STOPPED)) {
660  out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
661  }
662  }
663 
664  g_list_free(all);
665 }
666 
667 static void
668 log_all_actions(pe_working_set_t *data_set)
669 {
670  /* This only ever outputs to the log, so ignore whatever output object was
671  * previously set and just log instead.
672  */
673  pcmk__output_t *prev_out = data_set->priv;
674  pcmk__output_t *out = NULL;
675 
676  if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
677  return;
678  }
679 
682  pcmk__output_set_log_level(out, LOG_NOTICE);
683  data_set->priv = out;
684 
685  out->begin_list(out, NULL, NULL, "Actions");
687  out->end_list(out);
688  out->finish(out, CRM_EX_OK, true, NULL);
689  pcmk__output_free(out);
690 
691  data_set->priv = prev_out;
692 }
693 
700 static void
701 log_unrunnable_actions(pe_working_set_t *data_set)
702 {
704 
705  crm_trace("Required but unrunnable actions:");
706  for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
707  pe_action_t *action = (pe_action_t *) iter->data;
708 
709  if (!pcmk_any_flags_set(action->flags, flags)) {
710  pcmk__log_action("\t", action, true);
711  }
712  }
713 }
714 
723 static void
724 unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
725 {
726  const char* localhost_save = NULL;
727 
729  crm_trace("Reusing previously calculated cluster status");
731  return;
732  }
733 
734  if (data_set->localhost) {
735  localhost_save = data_set->localhost;
736  }
737 
738  CRM_ASSERT(cib != NULL);
739  crm_trace("Calculating cluster status");
740 
741  /* This will zero the entire struct without freeing anything first, so
742  * callers should never call pcmk__schedule_actions() with a populated data
743  * set unless pe_flag_have_status is set (i.e. cluster_status() was
744  * previously called, whether directly or via pcmk__schedule_actions()).
745  */
747 
748  if (localhost_save) {
749  data_set->localhost = localhost_save;
750  }
751 
753  data_set->input = cib;
754  cluster_status(data_set); // Sets pe_flag_have_status
755 }
756 
765 void
766 pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
768 {
769  unpack_cib(cib, flags, data_set);
774  return;
775  }
776 
778  pcmk__is_daemon) {
779  log_resource_details(data_set);
780  }
781 
782  apply_node_criteria(data_set);
783 
785  return;
786  }
787 
790  allocate_resources(data_set);
791  schedule_resource_actions(data_set);
792 
793  /* Remote ordering constraints need to happen prior to calculating fencing
794  * because it is one more place we can mark nodes as needing fencing.
795  */
797 
798  schedule_fencing_and_shutdowns(data_set);
800  log_all_actions(data_set);
802 
803  if (get_crm_log_level() == LOG_TRACE) {
804  log_unrunnable_actions(data_set);
805  }
806 }
void(* end_list)(pcmk__output_t *out)
#define LOG_TRACE
Definition: logging.h:37
void pe__foreach_param_check(pe_working_set_t *data_set, void(*cb)(pe_resource_t *, pe_node_t *, xmlNode *, enum pe_check_parameters, pe_working_set_t *))
Definition: remote.c:246
A dumping ground.
#define crm_notice(fmt, args...)
Definition: logging.h:361
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition: remote.c:41
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:46
#define INFINITY
Definition: crm.h:99
pe_check_parameters
Definition: pe_types.h:202
G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set)
int(* message)(pcmk__output_t *out, const char *message_id,...)
#define pe_flag_concurrent_fencing
Definition: pe_types.h:102
void pcmk__output_set_log_level(pcmk__output_t *out, int log_level)
Definition: output_log.c:309
enum rsc_role_e role
Definition: pe_types.h:377
GList * children
Definition: pe_types.h:384
resource_alloc_functions_t * cmds
Definition: pe_types.h:341
#define pe_flag_symmetric_cluster
Definition: pe_types.h:96
G_GNUC_INTERNAL void pcmk__set_allocation_methods(pe_working_set_t *data_set)
#define pe_flag_no_compat
Definition: pe_types.h:132
xmlNode * xml
Definition: pe_types.h:331
G_GNUC_INTERNAL bool pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node, pe_resource_t **failed)
gboolean exclusive_discover
Definition: pe_types.h:359
G_GNUC_INTERNAL void pcmk__apply_node_health(pe_working_set_t *data_set)
#define pe_flag_have_status
Definition: pe_types.h:117
GList * actions
Definition: pe_types.h:171
void resource_location(pe_resource_t *rsc, pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:385
G_GNUC_INTERNAL bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op)
pe_node_t * partial_migration_target
Definition: pe_types.h:371
#define pe_flag_have_quorum
Definition: pe_types.h:95
#define pe_flag_check_config
Definition: pe_types.h:141
G_GNUC_INTERNAL void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set)
gboolean remote_requires_reset
Definition: pe_types.h:231
const char * action
Definition: pcmk_fence.c:30
GList * resources
Definition: pe_types.h:165
pe_node_t *(* assign)(pe_resource_t *rsc, const pe_node_t *prefer)
GList * nodes
Definition: pe_types.h:164
gboolean is_dc
Definition: pe_types.h:228
G_GNUC_INTERNAL void pcmk__apply_locations(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__schedule_probes(pe_working_set_t *data_set)
#define CRM_OP_CLEAR_FAILCOUNT
Definition: crm.h:153
#define pe_warn(fmt...)
Definition: internal.h:54
int weight
Definition: pe_types.h:249
#define pe_flag_have_remote_nodes
Definition: pe_types.h:118
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:912
bool pe__is_guest_node(const pe_node_t *node)
Definition: remote.c:33
void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
#define stop_key(rsc)
Definition: internal.h:414
char * task
Definition: pe_types.h:410
G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
#define crm_trace(fmt, args...)
Definition: logging.h:365
enum rsc_digest_cmp_val rc
Definition: internal.h:517
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:121
void set_working_set_defaults(pe_working_set_t *data_set)
Definition: status.c:368
G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, pe_action_t *action, bool details)
struct pe_node_shared_s * details
Definition: pe_types.h:252
bool pcmk__is_daemon
Definition: logging.c:47
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:474
unsigned long long flags
Definition: pe_types.h:355
void pcmk__register_lib_messages(pcmk__output_t *out)
Definition: pcmk_output.c:2195
pe_working_set_t * data_set
void pcmk__unpack_constraints(pe_working_set_t *data_set)
Wrappers for and extensions to libxml2.
void(* finish)(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest)
void(* create_actions)(pe_resource_t *rsc)
#define pe_flag_stonith_enabled
Definition: pe_types.h:99
G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task, pe_action_t *first_action, pe_resource_t *then_rsc, char *then_task, pe_action_t *then_action, uint32_t flags, pe_working_set_t *data_set)
Success.
Definition: results.h:234
G_GNUC_INTERNAL void pcmk__create_internal_constraints(pe_working_set_t *data_set)
G_GNUC_INTERNAL void pcmk__order_after_each(pe_action_t *after, GList *list)
G_GNUC_INTERNAL void pcmk__output_actions(pe_working_set_t *data_set)
void pe__register_messages(pcmk__output_t *out)
Definition: pe_output.c:3010
xmlNode * input
Definition: pe_types.h:144
const char * placement_strategy
Definition: pe_types.h:151
int rsc_discover_mode
Definition: pe_types.h:253
#define CRM_OP_SHUTDOWN
Definition: crm.h:143
void pe__free_param_checks(pe_working_set_t *data_set)
Definition: remote.c:261
const char * id
Definition: pe_types.h:215
void pcmk__output_free(pcmk__output_t *out)
Definition: output.c:26
G_GNUC_INTERNAL void pcmk__fence_guest(pe_node_t *node)
bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node)
Definition: utils.c:36
unsigned int get_crm_log_level(void)
Definition: logging.c:1030
const char * localhost
Definition: pe_types.h:186
#define pe_flag_quick_location
Definition: pe_types.h:120
Cluster status and scheduling.
gboolean is_remote_node
Definition: pe_types.h:358
G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set)
G_GNUC_INTERNAL pe_action_t * pcmk__new_shutdown_action(pe_node_t *node)
#define CRM_ASSERT(expr)
Definition: results.h:42
pe_action_t * pe__clear_failcount(pe_resource_t *rsc, pe_node_t *node, const char *reason, pe_working_set_t *data_set)
Schedule a controller operation to clear a fail count.
Definition: failcounts.c:360
Cluster Configuration.
op_digest_cache_t * rsc_action_digest_cmp(pe_resource_t *rsc, xmlNode *xml_op, pe_node_t *node, pe_working_set_t *data_set)
Definition: pe_digest.c:380
gboolean cluster_status(pe_working_set_t *data_set)
Definition: status.c:71
This structure contains everything that makes up a single output formatter.
G_GNUC_INTERNAL void pcmk__create_graph(pe_working_set_t *data_set)
gboolean shutdown
Definition: pe_types.h:226
void(* begin_list)(pcmk__output_t *out, const char *singular_noun, const char *plural_noun, const char *format,...) G_GNUC_PRINTF(4
#define pe__set_working_set_flags(working_set, flags_to_set)
Definition: internal.h:62
GList * running_on
Definition: pe_types.h:373
pe_working_set_t * cluster
Definition: pe_types.h:335
G_GNUC_INTERNAL void pcmk__sort_resources(pe_working_set_t *data_set)
CRM_TRACE_INIT_DATA(pacemaker)
void(* shutdown_lock)(pe_resource_t *rsc)
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:47
unsigned long long flags
Definition: pe_types.h:153
#define ID(x)
Definition: msg_xml.h:468
int stickiness
Definition: pe_types.h:348
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: pe_actions.c:1398
gboolean unclean
Definition: pe_types.h:224
enum node_type type
Definition: pe_types.h:217
G_GNUC_INTERNAL void pcmk__apply_orderings(pe_working_set_t *data_set)
#define pe_rsc_managed
Definition: pe_types.h:257
#define pe_rsc_orphan
Definition: pe_types.h:256
gboolean online
Definition: pe_types.h:220
uint64_t flags
Definition: remote.c:215
int pcmk__log_output_new(pcmk__output_t **out)
Definition: output.c:237
int pe_get_failcount(pe_node_t *node, pe_resource_t *rsc, time_t *last_failure, uint32_t flags, xmlNode *xml_op, pe_working_set_t *data_set)
Definition: failcounts.c:251
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
Definition: pe_actions.c:1081
#define pe_flag_shutdown_lock
Definition: pe_types.h:114
char * id
Definition: pe_types.h:329
GHashTable * allowed_nodes
Definition: pe_types.h:375
#define pe_flag_startup_probes
Definition: pe_types.h:116
#define pe_flag_stop_rsc_orphans
Definition: pe_types.h:104