pacemaker 3.0.1-16e74fc4da
Scalable High-Availability cluster resource manager
Loading...
Searching...
No Matches
pcmk_sched_remote.c
Go to the documentation of this file.
1/*
2 * Copyright 2004-2024 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU General Public License version 2
7 * or later (GPLv2+) WITHOUT ANY WARRANTY.
8 */
9
10#include <crm_internal.h>
11
12#include <sys/param.h>
13
14#include <crm/crm.h>
15#include <crm/cib.h>
16#include <crm/common/xml.h>
18
19#include <glib.h>
20
21#include <crm/pengine/status.h>
22#include <pacemaker-internal.h>
24
32
33static const char *
34state2text(enum remote_connection_state state)
35{
36 switch (state) {
38 return "unknown";
40 return "alive";
42 return "resting";
44 return "failed";
46 return "stopped";
47 }
48
49 return "impossible";
50}
51
52/* We always use pcmk__ar_guest_allowed with these convenience functions to
53 * exempt internally generated constraints from the prohibition of user
54 * constraints involving remote connection resources.
55 *
56 * The start ordering additionally uses pcmk__ar_unrunnable_first_blocks so that
57 * the specified action is not runnable if the start is not runnable.
58 */
59
60static inline void
61order_start_then_action(pcmk_resource_t *first_rsc, pcmk_action_t *then_action,
62 uint32_t extra)
63{
64 if ((first_rsc != NULL) && (then_action != NULL)) {
65
66 pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
67 then_action->rsc, NULL, then_action,
70 |extra,
71 first_rsc->priv->scheduler);
72 }
73}
74
75static inline void
76order_action_then_stop(pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
77 uint32_t extra)
78{
79 if ((first_action != NULL) && (then_rsc != NULL)) {
80 pcmk__new_ordering(first_action->rsc, NULL, first_action,
81 then_rsc, stop_key(then_rsc), NULL,
83 then_rsc->priv->scheduler);
84 }
85}
86
88get_remote_node_state(const pcmk_node_t *node)
89{
90 const pcmk_resource_t *remote_rsc = NULL;
91 const pcmk_node_t *cluster_node = NULL;
92
93 pcmk__assert(node != NULL);
94
95 remote_rsc = node->priv->remote;
96 pcmk__assert(remote_rsc != NULL);
97
98 cluster_node = pcmk__current_node(remote_rsc);
99
100 /* If the cluster node the remote connection resource resides on
101 * is unclean or went offline, we can't process any operations
102 * on that remote node until after it starts elsewhere.
103 */
104 if ((remote_rsc->priv->next_role == pcmk_role_stopped)
105 || (remote_rsc->priv->assigned_node == NULL)) {
106
107 // The connection resource is not going to run anywhere
108
109 if ((cluster_node != NULL) && cluster_node->details->unclean) {
110 /* The remote connection is failed because its resource is on a
111 * failed node and can't be recovered elsewhere, so we must fence.
112 */
113 return remote_state_failed;
114 }
115
116 if (!pcmk_is_set(remote_rsc->flags, pcmk__rsc_failed)) {
117 /* Connection resource is cleanly stopped */
119 }
120
121 /* Connection resource is failed */
122
123 if ((remote_rsc->priv->next_role == pcmk_role_stopped)
124 && (remote_rsc->priv->remote_reconnect_ms > 0U)
126 && !pe__shutdown_requested(node)) {
127
128 /* We won't know whether the connection is recoverable until the
129 * reconnect interval expires and we reattempt connection.
130 */
132 }
133
134 /* The remote connection is in a failed state. If there are any
135 * resources known to be active on it (stop) or in an unknown state
136 * (probe), we must assume the worst and fence it.
137 */
138 return remote_state_failed;
139
140 } else if (cluster_node == NULL) {
141 /* Connection is recoverable but not currently running anywhere, so see
142 * if we can recover it first
143 */
145
146 } else if (cluster_node->details->unclean
147 || !(cluster_node->details->online)) {
148 // Connection is running on a dead node, see if we can recover it first
150
151 } else if (pcmk__list_of_multiple(remote_rsc->priv->active_nodes)
152 && (remote_rsc->priv->partial_migration_source != NULL)
153 && (remote_rsc->priv->partial_migration_target != NULL)) {
154 /* We're in the middle of migrating a connection resource, so wait until
155 * after the migration completes before performing any actions.
156 */
158
159 }
160 return remote_state_alive;
161}
162
169static void
170apply_remote_ordering(pcmk_action_t *action)
171{
172 pcmk_resource_t *remote_rsc = NULL;
174 enum remote_connection_state state = get_remote_node_state(action->node);
175
176 uint32_t order_opts = pcmk__ar_none;
177
178 if (action->rsc == NULL) {
179 return;
180 }
181
182 pcmk__assert(pcmk__is_pacemaker_remote_node(action->node));
183
184 remote_rsc = action->node->priv->remote;
185 pcmk__assert(remote_rsc != NULL);
186
187 crm_trace("Order %s action %s relative to %s%s (state: %s)",
188 action->task, action->uuid,
189 pcmk_is_set(remote_rsc->flags, pcmk__rsc_failed)? "failed " : "",
190 remote_rsc->id, state2text(state));
191
194 /* Migration ops map to pcmk__action_unspecified, but we need to apply
195 * the same ordering as for stop or demote (see get_router_node()).
196 */
197 task = pcmk__action_stop;
198 }
199
200 switch (task) {
203 order_opts = pcmk__ar_none;
204
205 if (state == remote_state_failed) {
206 /* Force recovery, by making this action required */
207 pcmk__set_relation_flags(order_opts,
209 }
210
211 /* Ensure connection is up before running this action */
212 order_start_then_action(remote_rsc, action, order_opts);
213 break;
214
216 if (state == remote_state_alive) {
217 order_action_then_stop(action, remote_rsc,
219
220 } else if (state == remote_state_failed) {
221 /* The resource is active on the node, but since we don't have a
222 * valid connection, the only way to stop the resource is by
223 * fencing the node. There is no need to order the stop relative
224 * to the remote connection, since the stop will become implied
225 * by the fencing.
226 */
227 pe_fence_node(remote_rsc->priv->scheduler, action->node,
228 "resources are active but "
229 "connection is unrecoverable",
230 FALSE);
231
232 } else if (remote_rsc->priv->next_role == pcmk_role_stopped) {
233 /* State must be remote_state_unknown or remote_state_stopped.
234 * Since the connection is not coming back up in this
235 * transition, stop this resource first.
236 */
237 order_action_then_stop(action, remote_rsc,
239
240 } else {
241 /* The connection is going to be started somewhere else, so
242 * stop this resource after that completes.
243 */
244 order_start_then_action(remote_rsc, action, pcmk__ar_none);
245 }
246 break;
247
249 /* Only order this demote relative to the connection start if the
250 * connection isn't being torn down. Otherwise, the demote would be
251 * blocked because the connection start would not be allowed.
252 */
253 if ((state == remote_state_resting)
254 || (state == remote_state_unknown)) {
255
256 order_start_then_action(remote_rsc, action, pcmk__ar_none);
257 } /* Otherwise we can rely on the stop ordering */
258 break;
259
260 default:
261 /* Wait for the connection resource to be up */
263 /* In case we ever get the recovery logic wrong, force
264 * recurring monitors to be restarted, even if just
265 * the connection was re-established
266 */
267 order_start_then_action(remote_rsc, action,
269
270 } else {
271 pcmk_node_t *cluster_node = pcmk__current_node(remote_rsc);
272
273 if ((task == pcmk__action_monitor)
274 && (state == remote_state_failed)) {
275 /* We would only be here if we do not know the state of the
276 * resource on the remote node. Since we have no way to find
277 * out, it is necessary to fence the node.
278 */
279 pe_fence_node(remote_rsc->priv->scheduler, action->node,
280 "resources are in unknown state "
281 "and connection is unrecoverable", FALSE);
282 }
283
284 if ((cluster_node != NULL) && (state == remote_state_stopped)) {
285 /* The connection is currently up, but is going down
286 * permanently. Make sure we check services are actually
287 * stopped _before_ we let the connection get closed.
288 */
289 order_action_then_stop(action, remote_rsc,
291
292 } else {
293 order_start_then_action(remote_rsc, action, pcmk__ar_none);
294 }
295 }
296 break;
297 }
298}
299
300static void
301apply_launcher_ordering(pcmk_action_t *action)
302{
303 pcmk_resource_t *remote_rsc = NULL;
304 pcmk_resource_t *launcher = NULL;
306
307 pcmk__assert(action->rsc != NULL);
308 pcmk__assert(pcmk__is_pacemaker_remote_node(action->node));
309
310 remote_rsc = action->node->priv->remote;
311 pcmk__assert(remote_rsc != NULL);
312
313 launcher = remote_rsc->priv->launcher;
314 pcmk__assert(launcher != NULL);
315
316 if (pcmk_is_set(launcher->flags, pcmk__rsc_failed)) {
317 pe_fence_node(action->rsc->priv->scheduler, action->node,
318 "container failed", FALSE);
319 }
320
321 crm_trace("Order %s action %s relative to %s%s for %s%s",
322 action->task, action->uuid,
323 pcmk_is_set(remote_rsc->flags, pcmk__rsc_failed)? "failed " : "",
324 remote_rsc->id,
325 pcmk_is_set(launcher->flags, pcmk__rsc_failed)? "failed " : "",
326 launcher->id);
327
330 /* Migration ops map to pcmk__action_unspecified, but we need to apply
331 * the same ordering as for stop or demote (see get_router_node()).
332 */
333 task = pcmk__action_stop;
334 }
335
336 switch (task) {
339 // Force resource recovery if the launcher is recovered
340 order_start_then_action(launcher, action,
342
343 // Wait for the connection resource to be up, too
344 order_start_then_action(remote_rsc, action, pcmk__ar_none);
345 break;
346
349 if (pcmk_is_set(launcher->flags, pcmk__rsc_failed)) {
350 /* When the launcher representing a guest node fails, any stop
351 * or demote actions for resources running on the guest node
352 * are implied by the launcher stopping. This is similar to
353 * how fencing operations work for cluster nodes and remote
354 * nodes.
355 */
356 } else {
357 /* Ensure the operation happens before the connection is brought
358 * down.
359 *
360 * If we really wanted to, we could order these after the
361 * connection start, IFF the launcher's current role was
362 * stopped (otherwise we re-introduce an ordering loop when the
363 * connection is restarting).
364 */
365 order_action_then_stop(action, remote_rsc, pcmk__ar_none);
366 }
367 break;
368
369 default:
370 /* Wait for the connection resource to be up */
372 /* In case we ever get the recovery logic wrong, force
373 * recurring monitors to be restarted, even if just
374 * the connection was re-established
375 */
376 if (task != pcmk__action_unspecified) {
377 order_start_then_action(remote_rsc, action,
379 }
380 } else {
381 order_start_then_action(remote_rsc, action, pcmk__ar_none);
382 }
383 break;
384 }
385}
386
393void
395{
397 return;
398 }
399
400 crm_trace("Creating remote connection orderings");
401
402 for (GList *iter = scheduler->priv->actions;
403 iter != NULL; iter = iter->next) {
404 pcmk_action_t *action = iter->data;
405 pcmk_resource_t *remote = NULL;
406
407 // We are only interested in resource actions
408 if (action->rsc == NULL) {
409 continue;
410 }
411
412 /* Special case: If we are clearing the failcount of an actual
413 * remote connection resource, then make sure this happens before
414 * any start of the resource in this transition.
415 */
417 && pcmk__str_eq(action->task, PCMK_ACTION_CLEAR_FAILCOUNT,
419
420 pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
422 0),
424
425 continue;
426 }
427
428 // We are only interested in actions assigned to a node
429 if (action->node == NULL) {
430 continue;
431 }
432
433 if (!pcmk__is_pacemaker_remote_node(action->node)) {
434 continue;
435 }
436
437 /* We are only interested in real actions.
438 *
439 * @TODO This is probably wrong; pseudo-actions might be converted to
440 * real actions and vice versa later in update_actions() at the end of
441 * pcmk__apply_orderings().
442 */
444 continue;
445 }
446
447 remote = action->node->priv->remote;
448 if (remote == NULL) {
449 // Orphaned
450 continue;
451 }
452
453 /* Another special case: if a resource is moving to a Pacemaker Remote
454 * node, order the stop on the original node after any start of the
455 * remote connection. This ensures that if the connection fails to
456 * start, we leave the resource running on the original node.
457 */
458 if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none)) {
459 for (GList *item = action->rsc->priv->actions; item != NULL;
460 item = item->next) {
461 pcmk_action_t *rsc_action = item->data;
462
463 if (!pcmk__same_node(rsc_action->node, action->node)
464 && pcmk__str_eq(rsc_action->task, PCMK_ACTION_STOP,
466 pcmk__new_ordering(remote, start_key(remote), NULL,
467 action->rsc, NULL, rsc_action,
469 }
470 }
471 }
472
473 /* The action occurs across a remote connection, so create
474 * ordering constraints that guarantee the action occurs while the node
475 * is active (after start, before stop ... things like that).
476 *
477 * This is somewhat brittle in that we need to make sure the results of
478 * this ordering are compatible with the result of get_router_node().
479 * It would probably be better to add PCMK__XA_ROUTER_NODE as part of
480 * this logic rather than create_graph_action().
481 */
482 if (remote->priv->launcher != NULL) {
483 crm_trace("Container ordering for %s", action->uuid);
484 apply_launcher_ordering(action);
485
486 } else {
487 crm_trace("Remote ordering for %s", action->uuid);
488 apply_remote_ordering(action);
489 }
490 }
491}
492
501bool
503{
504 return pcmk__is_remote_node(node) && (node->priv->remote != NULL)
505 && (get_remote_node_state(node) == remote_state_failed);
506}
507
518bool
520 const pcmk_node_t *node)
521{
522 return (rsc != NULL) && (rsc->priv->launched != NULL) && (node != NULL)
523 && (node->priv->remote != NULL)
524 && (node->priv->remote->priv->launcher == rsc);
525}
526
543{
544 pcmk_node_t *began_on = NULL;
545 pcmk_node_t *ended_on = NULL;
546 bool partial_migration = false;
547 const char *task = action->task;
548 pcmk_resource_t *remote = NULL;
549
550 if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_none)
551 || !pcmk__is_pacemaker_remote_node(action->node)) {
552 return NULL;
553 }
554
555 remote = action->node->priv->remote;
556 pcmk__assert(remote != NULL);
557
558 began_on = pcmk__current_node(remote);
559 ended_on = remote->priv->assigned_node;
560 if ((remote->priv->launcher == NULL)
561 && (remote->priv->partial_migration_target != NULL)) {
562 partial_migration = true;
563 }
564
565 if (began_on == NULL) {
566 crm_trace("Routing %s for %s through remote connection's "
567 "next node %s (starting)%s",
568 action->task, (action->rsc? action->rsc->id : "no resource"),
569 (ended_on? ended_on->priv->name : "none"),
570 partial_migration? " (partial migration)" : "");
571 return ended_on;
572 }
573
574 if (ended_on == NULL) {
575 crm_trace("Routing %s for %s through remote connection's "
576 "current node %s (stopping)%s",
577 action->task, (action->rsc? action->rsc->id : "no resource"),
578 (began_on? began_on->priv->name : "none"),
579 partial_migration? " (partial migration)" : "");
580 return began_on;
581 }
582
583 if (pcmk__same_node(began_on, ended_on)) {
584 crm_trace("Routing %s for %s through remote connection's "
585 "current node %s (not moving)%s",
586 action->task, (action->rsc? action->rsc->id : "no resource"),
587 (began_on? began_on->priv->name : "none"),
588 partial_migration? " (partial migration)" : "");
589 return began_on;
590 }
591
592 /* If we get here, the remote connection is moving during this transition.
593 * This means some actions for resources behind the connection will get
594 * routed through the cluster node the connection resource is currently on,
595 * and others are routed through the cluster node the connection will end up
596 * on.
597 */
598
599 if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
600 task = g_hash_table_lookup(action->meta, "notify_operation");
601 }
602
603 /*
604 * Stop, demote, and migration actions must occur before the connection can
605 * move (these actions are required before the remote resource can stop). In
606 * this case, we know these actions have to be routed through the initial
607 * cluster node the connection resource lived on before the move takes
608 * place.
609 *
610 * The exception is a partial migration of a (non-guest) remote connection
611 * resource; in that case, all actions (even these) will be ordered after
612 * the connection's pseudo-start on the migration target, so the target is
613 * the router node.
614 */
618 && !partial_migration) {
619 crm_trace("Routing %s for %s through remote connection's "
620 "current node %s (moving)%s",
621 action->task, (action->rsc? action->rsc->id : "no resource"),
622 (began_on? began_on->priv->name : "none"),
623 partial_migration? " (partial migration)" : "");
624 return began_on;
625 }
626
627 /* Everything else (start, promote, monitor, probe, refresh,
628 * clear failcount, delete, ...) must occur after the connection starts on
629 * the node it is moving to.
630 */
631 crm_trace("Routing %s for %s through remote connection's "
632 "next node %s (moving)%s",
633 action->task, (action->rsc? action->rsc->id : "no resource"),
634 (ended_on? ended_on->priv->name : "none"),
635 partial_migration? " (partial migration)" : "");
636 return ended_on;
637}
638
651void
653{
654 const char *remote_addr = g_hash_table_lookup(params, PCMK_REMOTE_RA_ADDR);
655
656 if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
657 GHashTable *base = pe_rsc_params(rsc, NULL, rsc->priv->scheduler);
658
659 remote_addr = g_hash_table_lookup(base, PCMK_REMOTE_RA_ADDR);
660 if (remote_addr != NULL) {
661 pcmk__insert_dup(params, PCMK_REMOTE_RA_ADDR, remote_addr);
662 }
663 }
664}
665
681void
683{
684 const pcmk_node_t *guest = action->node;
685 const pcmk_node_t *host = NULL;
686 const pcmk_resource_t *launcher = NULL;
687 enum pcmk__action_type task;
688
689 if (!pcmk__is_guest_or_bundle_node(guest)) {
690 return;
691 }
692 launcher = guest->priv->remote->priv->launcher;
693
694 task = pcmk__parse_action(action->task);
695 if ((task == pcmk__action_notify) || (task == pcmk__action_notified)) {
696 task = pcmk__parse_action(g_hash_table_lookup(action->meta,
697 "notify_operation"));
698 }
699
700 switch (task) {
705 // "Down" actions take place on guest's current host
706 host = pcmk__current_node(launcher);
707 break;
708
714 // "Up" actions take place on guest's next host
715 host = launcher->priv->assigned_node;
716 break;
717
718 default:
719 break;
720 }
721
722 if (host != NULL) {
723 gpointer target =
724 g_hash_table_lookup(action->rsc->priv->meta,
726
728 target,
729 (gpointer) args_xml);
731 (gpointer) host->priv->name,
732 (gpointer) args_xml);
733 }
734}
@ pcmk__ar_first_implies_then
@ pcmk__ar_then_implies_first
@ pcmk__ar_none
No relation (compare with equality rather than bit set)
@ pcmk__ar_unrunnable_first_blocks
'then' is runnable (and migratable) only if 'first' is runnable
@ pcmk__ar_ordered
Actions are ordered (optionally, if no other flags are set)
@ pcmk__ar_guest_allowed
Ordering applies even if 'first' runs on guest node created by 'then'.
#define pcmk__set_relation_flags(ar_flags, flags_to_set)
#define PCMK_ACTION_STOP
Definition actions.h:66
#define PCMK_ACTION_CANCEL
Definition actions.h:36
#define PCMK_ACTION_START
Definition actions.h:63
#define PCMK_ACTION_CLEAR_FAILCOUNT
Definition actions.h:37
#define PCMK_ACTION_MIGRATE_FROM
Definition actions.h:49
#define PCMK_ACTION_MIGRATE_TO
Definition actions.h:50
#define PCMK_ACTION_STONITH
Definition actions.h:65
#define PCMK_ACTION_DEMOTE
Definition actions.h:40
#define PCMK_ACTION_NOTIFY
Definition actions.h:53
@ pcmk__action_pseudo
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition actions.c:225
enum pcmk__action_type pcmk__parse_action(const char *action_name)
Definition actions.c:90
pcmk__action_type
@ pcmk__action_stop
@ pcmk__action_started
@ pcmk__action_monitor
@ pcmk__action_stopped
@ pcmk__action_notify
@ pcmk__action_start
@ pcmk__action_demote
@ pcmk__action_promote
@ pcmk__action_notified
@ pcmk__action_unspecified
@ pcmk__action_promoted
@ pcmk__action_demoted
Cluster Configuration.
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition util.h:80
GHashTable * pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
Get a table of resource parameters.
Definition complex.c:462
pcmk__cpg_host_t host
Definition cpg.c:4
A dumping ground.
G_GNUC_INTERNAL void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task, pcmk_action_t *first_action, pcmk_resource_t *then_rsc, char *then_task, pcmk_action_t *then_action, uint32_t flags, pcmk_scheduler_t *sched)
G_GNUC_INTERNAL bool pcmk__action_is_recurring(const pcmk_action_t *action)
#define crm_trace(fmt, args...)
Definition logging.h:370
pcmk_scheduler_t * scheduler
@ pcmk__node_remote_fenced
void hash2metafield(gpointer key, gpointer value, gpointer user_data)
Set XML attribute based on hash table entry, as meta-attribute name.
Definition nvpair.c:266
#define PCMK_META_CONTAINER_ATTRIBUTE_TARGET
Definition options.h:86
#define PCMK_REMOTE_RA_ADDR
Definition options.h:123
#define PCMK__META_PHYSICAL_HOST
const char * action
Definition pcmk_fence.c:32
const char * target
Definition pcmk_fence.c:31
bool pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc, const pcmk_node_t *node)
pcmk_node_t * pcmk__connection_host_for_action(const pcmk_action_t *action)
void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
bool pcmk__is_failed_remote_node(const pcmk_node_t *node)
void pcmk__add_guest_meta_to_xml(xmlNode *args_xml, const pcmk_action_t *action)
Add special guest node meta-attributes to XML.
void pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params)
remote_connection_state
@ remote_state_failed
@ remote_state_unknown
@ remote_state_alive
@ remote_state_stopped
@ remote_state_resting
void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition unpack.c:116
#define start_key(rsc)
Definition internal.h:192
#define stop_key(rsc)
Definition internal.h:190
bool pe__shutdown_requested(const pcmk_node_t *node)
Definition utils.c:676
@ pcmk__rsc_is_remote_connection
@ pcmk__rsc_failed
#define pcmk__assert(expr)
@ pcmk_role_stopped
Stopped.
Definition roles.h:36
@ pcmk__sched_have_remote_nodes
Cluster status and scheduling.
void pcmk__insert_dup(GHashTable *table, const char *name, const char *value)
Definition strings.c:703
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition strings.c:1029
@ pcmk__str_none
pcmk_node_t * node
pcmk_resource_t * rsc
gboolean online
Definition nodes.h:50
gboolean unclean
Definition nodes.h:58
pcmk_resource_t * remote
pcmk_node_t * partial_migration_target
pcmk_scheduler_t * scheduler
pcmk_node_t * partial_migration_source
unsigned long long flags
Definition resources.h:69
pcmk__resource_private_t * priv
Definition resources.h:61
pcmk__scheduler_private_t * priv
Definition scheduler.h:99
uint64_t flags
Definition scheduler.h:89
pcmk__node_private_t * priv
Definition nodes.h:85
struct pcmk__node_details * details
Definition nodes.h:82
Wrappers for and extensions to libxml2.