pacemaker 3.0.1-16e74fc4da
Scalable High-Availability cluster resource manager
Loading...
Searching...
No Matches
pcmk_scheduler.c
Go to the documentation of this file.
1/*
2 * Copyright 2004-2025 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU General Public License version 2
7 * or later (GPLv2+) WITHOUT ANY WARRANTY.
8 */
9
10#include <crm_internal.h>
11
12#include <crm/crm.h>
13#include <crm/cib.h>
14#include <crm/cib/internal.h>
15#include <crm/common/xml.h>
18
19#include <glib.h>
20
21#include <crm/pengine/status.h>
22#include <pacemaker-internal.h>
24
26
42static void
43check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
44 enum pcmk__check_parameters check)
45{
46 const char *reason = NULL;
47 pcmk__op_digest_t *digest_data = NULL;
48
49 switch (check) {
51 if (pcmk__check_action_config(rsc, node, rsc_op)
52 && pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
53 NULL)) {
54 reason = "action definition changed";
55 }
56 break;
57
59 digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
60 rsc->priv->scheduler);
61 switch (digest_data->rc) {
63 crm_trace("Resource %s history entry %s on %s has "
64 "no digest to compare",
65 rsc->id, pcmk__xe_id(rsc_op), node->priv->id);
66 break;
68 break;
69 default:
70 reason = "resource parameters have changed";
71 break;
72 }
73 break;
74 }
75 if (reason != NULL) {
76 pe__clear_failcount(rsc, node, reason, rsc->priv->scheduler);
77 }
78}
79
90static bool
91failcount_clear_action_exists(const pcmk_node_t *node,
92 const pcmk_resource_t *rsc)
93{
95 TRUE);
96
97 if (list != NULL) {
98 g_list_free(list);
99 return true;
100 }
101 return false;
102}
103
111static void
112check_failure_threshold(gpointer data, gpointer user_data)
113{
114 pcmk_resource_t *rsc = data;
115 const pcmk_node_t *node = user_data;
116
117 // If this is a collective resource, apply recursively to children instead
118 if (rsc->priv->children != NULL) {
119 g_list_foreach(rsc->priv->children, check_failure_threshold,
120 user_data);
121 return;
122 }
123
124 if (!failcount_clear_action_exists(node, rsc)) {
125 /* Don't force the resource away from this node due to a failcount
126 * that's going to be cleared.
127 *
128 * @TODO Failcount clearing can be scheduled in
129 * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
130 * schedule_resource_actions() via check_params(). This runs well before
131 * then, so it cannot detect those, meaning we might check the migration
132 * threshold when we shouldn't. Worst case, we stop or move the
133 * resource, then move it back in the next transition.
134 */
135 pcmk_resource_t *failed = NULL;
136
137 if (pcmk__threshold_reached(rsc, node, &failed)) {
139 "__fail_limit__", rsc->priv->scheduler);
140 }
141 }
142}
143
157static void
158apply_exclusive_discovery(gpointer data, gpointer user_data)
159{
160 pcmk_resource_t *rsc = data;
161 const pcmk_node_t *node = user_data;
162
163 /* @TODO This checks rsc and the top rsc, but should probably check all
164 * ancestors (a cloned group could have it set on the group)
165 */
169 pcmk_node_t *match = NULL;
170
171 // If this is a collective resource, apply recursively to children
172 g_list_foreach(rsc->priv->children, apply_exclusive_discovery,
173 user_data);
174
175 match = g_hash_table_lookup(rsc->priv->allowed_nodes,
176 node->priv->id);
177 if ((match != NULL)
178 && (match->assign->probe_mode != pcmk__probe_exclusive)) {
180 }
181 }
182}
183
191static void
192apply_stickiness(gpointer data, gpointer user_data)
193{
194 pcmk_resource_t *rsc = data;
195 pcmk_node_t *node = NULL;
196
197 // If this is a collective resource, apply recursively to children instead
198 if (rsc->priv->children != NULL) {
199 g_list_foreach(rsc->priv->children, apply_stickiness, NULL);
200 return;
201 }
202
203 /* A resource is sticky if it is managed, has stickiness configured, and is
204 * active on a single node.
205 */
207 || (rsc->priv->stickiness < 1)
208 || !pcmk__list_of_1(rsc->priv->active_nodes)) {
209 return;
210 }
211
212 node = rsc->priv->active_nodes->data;
213
214 /* In a symmetric cluster, stickiness can always be used. In an
215 * asymmetric cluster, we have to check whether the resource is still
216 * allowed on the node, so we don't keep the resource somewhere it is no
217 * longer explicitly enabled.
218 */
219 if (!pcmk_is_set(rsc->priv->scheduler->flags,
221 && (g_hash_table_lookup(rsc->priv->allowed_nodes,
222 node->priv->id) == NULL)) {
223 pcmk__rsc_debug(rsc,
224 "Ignoring %s stickiness because the cluster is "
225 "asymmetric and %s is not explicitly allowed",
226 rsc->id, pcmk__node_name(node));
227 return;
228 }
229
230 pcmk__rsc_debug(rsc, "Resource %s has %d stickiness on %s",
231 rsc->id, rsc->priv->stickiness, pcmk__node_name(node));
232 resource_location(rsc, node, rsc->priv->stickiness, "stickiness",
233 rsc->priv->scheduler);
234}
235
242static void
243apply_shutdown_locks(pcmk_scheduler_t *scheduler)
244{
246 return;
247 }
248 for (GList *iter = scheduler->priv->resources;
249 iter != NULL; iter = iter->next) {
250
251 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
252
253 rsc->priv->cmds->shutdown_lock(rsc);
254 }
255}
256
257/*
258 * \internal
259 * \brief Apply node-specific scheduling criteria
260 *
261 * After the CIB has been unpacked, process node-specific scheduling criteria
262 * including shutdown locks, location constraints, resource stickiness,
263 * migration thresholds, and exclusive resource discovery.
264 */
265static void
266apply_node_criteria(pcmk_scheduler_t *scheduler)
267{
268 crm_trace("Applying node-specific scheduling criteria");
269 apply_shutdown_locks(scheduler);
271 g_list_foreach(scheduler->priv->resources, apply_stickiness, NULL);
272
273 for (GList *node_iter = scheduler->nodes; node_iter != NULL;
274 node_iter = node_iter->next) {
275
276 for (GList *rsc_iter = scheduler->priv->resources;
277 rsc_iter != NULL; rsc_iter = rsc_iter->next) {
278
279 check_failure_threshold(rsc_iter->data, node_iter->data);
280 apply_exclusive_discovery(rsc_iter->data, node_iter->data);
281 }
282 }
283}
284
291static void
292assign_resources(pcmk_scheduler_t *scheduler)
293{
294 GList *iter = NULL;
295
296 crm_trace("Assigning resources to nodes");
297
301 }
303
305 /* Assign remote connection resources first (which will also assign any
306 * colocation dependencies). If the connection is migrating, always
307 * prefer the partial migration target.
308 */
309 for (iter = scheduler->priv->resources;
310 iter != NULL; iter = iter->next) {
311
312 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
314
316 pcmk__rsc_trace(rsc, "Assigning remote connection resource '%s'",
317 rsc->id);
318 rsc->priv->cmds->assign(rsc, target, true);
319 }
320 }
321 }
322
323 /* now do the rest of the resources */
324 for (iter = scheduler->priv->resources; iter != NULL; iter = iter->next) {
325 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
326
328 pcmk__rsc_trace(rsc, "Assigning %s resource '%s'",
329 rsc->priv->xml->name, rsc->id);
330 rsc->priv->cmds->assign(rsc, NULL, true);
331 }
332 }
333
335}
336
344static void
345clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
346{
347 pcmk_resource_t *rsc = data;
348
349 if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
350 return;
351 }
352 crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
353
354 /* There's no need to recurse into rsc->private->children because those
355 * should just be unassigned clone instances.
356 */
357
358 for (GList *iter = rsc->priv->scheduler->nodes;
359 iter != NULL; iter = iter->next) {
360
361 pcmk_node_t *node = (pcmk_node_t *) iter->data;
362 pcmk_action_t *clear_op = NULL;
363
364 if (!node->details->online) {
365 continue;
366 }
367 if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
368 continue;
369 }
370
371 clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
372 rsc->priv->scheduler);
373
374 /* We can't use order_action_then_stop() here because its
375 * pcmk__ar_guest_allowed breaks things
376 */
377 pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
378 NULL, pcmk__ar_ordered, rsc->priv->scheduler);
379 }
380}
381
388static void
389schedule_resource_actions(pcmk_scheduler_t *scheduler)
390{
391 // Process deferred action checks
394
396 crm_trace("Scheduling probes");
398 }
399
401 g_list_foreach(scheduler->priv->resources, clear_failcounts_if_orphaned,
402 NULL);
403 }
404
405 crm_trace("Scheduling resource actions");
406 for (GList *iter = scheduler->priv->resources;
407 iter != NULL; iter = iter->next) {
408
409 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
410
411 rsc->priv->cmds->create_actions(rsc);
412 }
413}
414
423static bool
424is_managed(const pcmk_resource_t *rsc)
425{
427 return true;
428 }
429 for (GList *iter = rsc->priv->children;
430 iter != NULL; iter = iter->next) {
431
432 if (is_managed((pcmk_resource_t *) iter->data)) {
433 return true;
434 }
435 }
436 return false;
437}
438
447static bool
448any_managed_resources(const pcmk_scheduler_t *scheduler)
449{
450 for (const GList *iter = scheduler->priv->resources;
451 iter != NULL; iter = iter->next) {
452 if (is_managed((const pcmk_resource_t *) iter->data)) {
453 return true;
454 }
455 }
456 return false;
457}
458
468static bool
469needs_fencing(const pcmk_node_t *node, bool have_managed)
470{
471 return have_managed && node->details->unclean
472 && pe_can_fence(node->priv->scheduler, node);
473}
474
483static bool
484needs_shutdown(const pcmk_node_t *node)
485{
486 if (pcmk__is_pacemaker_remote_node(node)) {
487 /* Do not send shutdown actions for Pacemaker Remote nodes.
488 * @TODO We might come up with a good use for this in the future.
489 */
490 return false;
491 }
492 return node->details->online && node->details->shutdown;
493}
494
505static GList *
506add_nondc_fencing(GList *list, pcmk_action_t *action,
508{
510 && (list != NULL)) {
511 /* Concurrent fencing is disabled, so order each non-DC
512 * fencing in a chain. If there is any DC fencing or
513 * shutdown, it will be ordered after the last action in the
514 * chain later.
515 */
517 }
518 return g_list_prepend(list, action);
519}
520
527static pcmk_action_t *
528schedule_fencing(pcmk_node_t *node)
529{
530 pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
531 FALSE, node->priv->scheduler);
532
533 pcmk__sched_warn(node->priv->scheduler, "Scheduling node %s for fencing",
534 pcmk__node_name(node));
535 pcmk__order_vs_fence(fencing, node->priv->scheduler);
536 return fencing;
537}
538
545static void
546schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
547{
548 pcmk_action_t *dc_down = NULL;
549 bool integrity_lost = false;
550 bool have_managed = any_managed_resources(scheduler);
551 GList *fencing_ops = NULL;
552 GList *shutdown_ops = NULL;
553
554 crm_trace("Scheduling fencing and shutdowns as needed");
555 if (!have_managed) {
556 crm_notice("No fencing will be done until there are resources "
557 "to manage");
558 }
559
560 // Check each node for whether it needs fencing or shutdown
561 for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
562 pcmk_node_t *node = (pcmk_node_t *) iter->data;
563 pcmk_action_t *fencing = NULL;
564 const bool is_dc = pcmk__same_node(node, scheduler->dc_node);
565
566 /* Guest nodes are "fenced" by recovering their container resource,
567 * so handle them separately.
568 */
569 if (pcmk__is_guest_or_bundle_node(node)) {
571 && have_managed && pe_can_fence(scheduler, node)) {
572 pcmk__fence_guest(node);
573 }
574 continue;
575 }
576
577 if (needs_fencing(node, have_managed)) {
578 fencing = schedule_fencing(node);
579
580 // Track DC and non-DC fence actions separately
581 if (is_dc) {
582 dc_down = fencing;
583 } else {
584 fencing_ops = add_nondc_fencing(fencing_ops, fencing,
585 scheduler);
586 }
587
588 } else if (needs_shutdown(node)) {
590
591 // Track DC and non-DC shutdown actions separately
592 if (is_dc) {
593 dc_down = down_op;
594 } else {
595 shutdown_ops = g_list_prepend(shutdown_ops, down_op);
596 }
597 }
598
599 if ((fencing == NULL) && node->details->unclean) {
600 integrity_lost = true;
601 pcmk__config_warn("Node %s is unclean but cannot be fenced",
602 pcmk__node_name(node));
603 }
604 }
605
606 if (integrity_lost) {
608 pcmk__config_warn("Resource functionality and data integrity "
609 "cannot be guaranteed (configure, enable, "
610 "and test fencing to correct this)");
611
613 crm_notice("Unclean nodes will not be fenced until quorum is "
614 "attained or " PCMK_OPT_NO_QUORUM_POLICY " is set to "
616 }
617 }
618
619 if (dc_down != NULL) {
620 /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
621 * DC elections. However, we don't want to order non-DC shutdowns before
622 * a DC *fencing*, because even though we don't want a node that's
623 * shutting down to become DC, the DC fencing could be ordered before a
624 * clone stop that's also ordered before the shutdowns, thus leading to
625 * a graph loop.
626 */
627 if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
629 pcmk__order_after_each(dc_down, shutdown_ops);
630 }
631
632 // Order any non-DC fencing before any DC fencing or shutdown
633
635 /* With concurrent fencing, order each non-DC fencing action
636 * separately before any DC fencing or shutdown.
637 */
638 pcmk__order_after_each(dc_down, fencing_ops);
639 } else if (fencing_ops != NULL) {
640 /* Without concurrent fencing, the non-DC fencing actions are
641 * already ordered relative to each other, so we just need to order
642 * the DC fencing after the last action in the chain (which is the
643 * first item in the list).
644 */
645 order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
647 }
648 }
649 g_list_free(fencing_ops);
650 g_list_free(shutdown_ops);
651}
652
653static void
654log_resource_details(pcmk_scheduler_t *scheduler)
655{
657 GList *all = NULL;
658
659 /* Due to the `crm_mon --node=` feature, out->message() for all the
660 * resource-related messages expects a list of nodes that we are allowed to
661 * output information for. Here, we create a wildcard to match all nodes.
662 */
663 all = g_list_prepend(all, (gpointer) "*");
664
665 for (GList *item = scheduler->priv->resources;
666 item != NULL; item = item->next) {
667
668 pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
669
670 // Log all resources except inactive orphans
672 || (rsc->priv->orig_role != pcmk_role_stopped)) {
673 out->message(out, (const char *) rsc->priv->xml->name, 0UL,
674 rsc, all, all);
675 }
676 }
677
678 g_list_free(all);
679}
680
681static void
682log_all_actions(pcmk_scheduler_t *scheduler)
683{
684 /* This only ever outputs to the log, so ignore whatever output object was
685 * previously set and just log instead.
686 */
687 pcmk__output_t *prev_out = scheduler->priv->out;
688 pcmk__output_t *out = NULL;
689
690 if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
691 return;
692 }
693
696 pcmk__output_set_log_level(out, LOG_NOTICE);
697 scheduler->priv->out = out;
698
699 out->begin_list(out, NULL, NULL, "Actions");
701 out->end_list(out);
702 out->finish(out, CRM_EX_OK, true, NULL);
704
705 scheduler->priv->out = prev_out;
706}
707
714static void
715log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
716{
717 const uint64_t flags = pcmk__action_optional
720
721 crm_trace("Required but unrunnable actions:");
722 for (const GList *iter = scheduler->priv->actions;
723 iter != NULL; iter = iter->next) {
724
725 const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
726
727 if (!pcmk_any_flags_set(action->flags, flags)) {
728 pcmk__log_action("\t", action, true);
729 }
730 }
731}
732
739void
741{
747 return;
748 }
749
751 && pcmk__is_daemon) {
752 log_resource_details(scheduler);
753 }
754
755 apply_node_criteria(scheduler);
756
758 return;
759 }
760
763 assign_resources(scheduler);
764 schedule_resource_actions(scheduler);
765
766 /* Remote ordering constraints need to happen prior to calculating fencing
767 * because it is one more place we can mark nodes as needing fencing.
768 */
770
771 schedule_fencing_and_shutdowns(scheduler);
773 log_all_actions(scheduler);
775
776 if (get_crm_log_level() == LOG_TRACE) {
777 log_unrunnable_actions(scheduler);
778 }
779}
780
802int
805{
806 // Allows for cleaner syntax than dereferencing the scheduler argument
807 pcmk_scheduler_t *new_scheduler = NULL;
808
809 new_scheduler = pcmk_new_scheduler();
810 if (new_scheduler == NULL) {
811 return ENOMEM;
812 }
813
815
816 // Populate the scheduler data
817
818 // Make our own copy of the given input or fetch the CIB and use that
819 if (input != NULL) {
820 new_scheduler->input = pcmk__xml_copy(NULL, input);
821 if (new_scheduler->input == NULL) {
822 out->err(out, "Failed to copy input XML");
823 pcmk_free_scheduler(new_scheduler);
824 return ENOMEM;
825 }
826
827 } else {
828 int rc = cib__signon_query(out, NULL, &(new_scheduler->input));
829
830 if (rc != pcmk_rc_ok) {
831 pcmk_free_scheduler(new_scheduler);
832 return rc;
833 }
834 }
835
836 // Make our own copy of the given crm_time_t object; otherwise
837 // cluster_status() populates with the current time
838 if (date != NULL) {
839 // pcmk_copy_time() guarantees non-NULL
840 new_scheduler->priv->now = pcmk_copy_time(date);
841 }
842
843 // Unpack everything
844 cluster_status(new_scheduler);
845 *scheduler = new_scheduler;
846
847 return pcmk_rc_ok;
848}
@ pcmk__ar_ordered
Actions are ordered (optionally, if no other flags are set)
#define PCMK_ACTION_CLEAR_FAILCOUNT
Definition actions.h:37
#define PCMK_ACTION_DO_SHUTDOWN
Definition actions.h:42
@ pcmk__action_runnable
@ pcmk__action_optional
@ pcmk__action_pseudo
int cib__signon_query(pcmk__output_t *out, cib_t **cib, xmlNode **cib_object)
Definition cib_utils.c:863
Cluster Configuration.
bool pcmk__is_daemon
Definition logging.c:47
uint64_t flags
Definition remote.c:3
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition util.h:80
char data[0]
Definition cpg.c:10
A dumping ground.
@ pcmk__digest_match
@ pcmk__digest_unknown
@ pcmk__fc_effective
crm_time_t * pcmk_copy_time(const crm_time_t *source)
Definition iso8601.c:1471
struct crm_time_s crm_time_t
Definition iso8601.h:32
G_GNUC_INTERNAL void pcmk__order_vs_fence(pcmk_action_t *stonith_op, pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task, pcmk_action_t *first_action, pcmk_resource_t *then_rsc, char *then_task, pcmk_action_t *then_action, uint32_t flags, pcmk_scheduler_t *sched)
G_GNUC_INTERNAL void pcmk__log_action(const char *pre_text, const pcmk_action_t *action, bool details)
G_GNUC_INTERNAL pcmk_action_t * pcmk__new_shutdown_action(pcmk_node_t *node)
G_GNUC_INTERNAL void pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__schedule_probes(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__fence_guest(pcmk_node_t *node)
G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL bool pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_resource_t **failed)
G_GNUC_INTERNAL void pcmk__apply_node_health(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__sort_resources(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__apply_orderings(pcmk_scheduler_t *sched)
G_GNUC_INTERNAL bool pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *xml_op)
G_GNUC_INTERNAL void pcmk__order_after_each(pcmk_action_t *after, GList *list)
G_GNUC_INTERNAL void pcmk__create_graph(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__apply_locations(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler)
G_GNUC_INTERNAL void pcmk__output_actions(pcmk_scheduler_t *scheduler)
#define CRM_TRACE_INIT_DATA(name)
Definition logging.h:111
unsigned int get_crm_log_level(void)
Definition logging.c:1103
#define crm_notice(fmt, args...)
Definition logging.h:363
#define crm_trace(fmt, args...)
Definition logging.h:370
#define LOG_TRACE
Definition logging.h:38
#define pcmk__config_warn(fmt...)
pcmk_scheduler_t * scheduler
xmlNode * input
@ pcmk__node_remote_reset
@ pcmk__probe_exclusive
#define PCMK_OPT_NO_QUORUM_POLICY
Definition options.h:47
#define PCMK_VALUE_IGNORE
Definition options.h:163
#define PCMK_VALUE_DEFAULT
Definition options.h:144
void pcmk__output_set_log_level(pcmk__output_t *out, uint8_t log_level)
Definition output_log.c:390
void pcmk__output_free(pcmk__output_t *out)
Definition output.c:30
int pcmk__log_output_new(pcmk__output_t **out)
Definition output.c:293
const char * action
Definition pcmk_fence.c:32
const char * target
Definition pcmk_fence.c:31
int pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date, pcmk_scheduler_t **scheduler)
void pcmk__schedule_actions(pcmk_scheduler_t *scheduler)
void pcmk__register_lib_messages(pcmk__output_t *out)
void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler)
bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node)
Definition utils.c:35
pcmk_action_t * pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node, const char *reason, pcmk_scheduler_t *scheduler)
Schedule a controller operation to clear a fail count.
Definition failcounts.c:467
GList * pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
const pcmk_resource_t * pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle)
Definition complex.c:1025
pcmk__op_digest_t * rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op, pcmk_node_t *node, pcmk_scheduler_t *scheduler)
Definition pe_digest.c:391
void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score, const char *tag, pcmk_scheduler_t *scheduler)
Definition utils.c:398
gboolean order_actions(pcmk_action_t *first, pcmk_action_t *then, uint32_t flags)
Definition utils.c:483
#define stop_key(rsc)
Definition internal.h:190
int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc, time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
Definition failcounts.c:367
void pe__register_messages(pcmk__output_t *out)
Definition pe_output.c:3482
pcmk_action_t * pe_fence_op(pcmk_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pcmk_scheduler_t *scheduler)
@ pcmk__rsc_managed
@ pcmk__rsc_removed
@ pcmk__rsc_exclusive_probes
@ pcmk__rsc_is_remote_connection
@ CRM_EX_OK
Success.
Definition results.h:233
@ pcmk_rc_ok
Definition results.h:159
@ pcmk_role_stopped
Stopped.
Definition roles.h:36
void pcmk_free_scheduler(pcmk_scheduler_t *scheduler)
Free scheduler data.
Definition scheduler.c:193
pcmk_scheduler_t * pcmk_new_scheduler(void)
Create a new object to hold scheduler data.
Definition scheduler.c:32
#define pcmk__rsc_trace(rsc, fmt, args...)
pcmk__check_parameters
@ pcmk__check_last_failure
@ pcmk__check_active
void pcmk__free_param_checks(pcmk_scheduler_t *scheduler)
Definition scheduler.c:410
#define pcmk__rsc_debug(rsc, fmt, args...)
#define pcmk__sched_warn(scheduler, fmt...)
void pcmk__foreach_param_check(pcmk_scheduler_t *scheduler, void(*cb)(pcmk_resource_t *, pcmk_node_t *, const xmlNode *, enum pcmk__check_parameters))
Definition scheduler.c:387
#define pcmk__set_scheduler_flags(scheduler, flags_to_set)
@ pcmk__sched_quorate
@ pcmk__sched_stop_removed_resources
@ pcmk__sched_shutdown_lock
@ pcmk__sched_fencing_enabled
@ pcmk__sched_symmetric_cluster
@ pcmk__sched_no_counts
@ pcmk__sched_have_remote_nodes
@ pcmk__sched_concurrent_fencing
@ pcmk__sched_validate_only
@ pcmk__sched_probe_resources
@ pcmk__sched_location_only
#define PCMK_SCORE_INFINITY
Integer score to use to represent "infinity".
Definition scores.h:26
Cluster status and scheduling.
gboolean cluster_status(pcmk_scheduler_t *scheduler)
Definition status.c:73
@ pcmk__str_none
@ pcmk__str_casei
pcmk_resource_t * rsc
pcmk_node_t *(* assign)(pcmk_resource_t *rsc, const pcmk_node_t *prefer, bool stop_if_fail)
void(* create_actions)(pcmk_resource_t *rsc)
void(* shutdown_lock)(pcmk_resource_t *rsc)
enum pcmk__probe_mode probe_mode
gboolean shutdown
Definition nodes.h:62
gboolean online
Definition nodes.h:50
gboolean unclean
Definition nodes.h:58
pcmk_scheduler_t * scheduler
enum pcmk__digest_result rc
This structure contains everything that makes up a single output formatter.
void(* end_list)(pcmk__output_t *out)
int(* message)(pcmk__output_t *out, const char *message_id,...)
int int void(* err)(pcmk__output_t *out, const char *format,...) G_GNUC_PRINTF(2
void(* finish)(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest)
void(* begin_list)(pcmk__output_t *out, const char *singular_noun, const char *plural_noun, const char *format,...) G_GNUC_PRINTF(4
pcmk_node_t * partial_migration_target
pcmk_scheduler_t * scheduler
const pcmk__assignment_methods_t * cmds
unsigned long long flags
Definition resources.h:69
pcmk__resource_private_t * priv
Definition resources.h:61
pcmk_node_t * dc_node
Definition scheduler.h:85
pcmk__scheduler_private_t * priv
Definition scheduler.h:99
GList * nodes
Definition scheduler.h:97
xmlNode * input
Definition scheduler.h:81
uint64_t flags
Definition scheduler.h:89
pcmk__node_private_t * priv
Definition nodes.h:85
struct pcmk__node_details * details
Definition nodes.h:82
struct pcmk__node_assignment * assign
Definition nodes.h:79
Wrappers for and extensions to libxml2.
xmlNode * pcmk__xml_copy(xmlNode *parent, xmlNode *src)
Definition xml.c:832