pacemaker  2.1.5-b7adf64e51
Scalable High-Availability cluster resource manager
pcmk_sched_remote.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2022 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <sys/param.h>
13 
14 #include <crm/crm.h>
15 #include <crm/cib.h>
16 #include <crm/msg_xml.h>
17 #include <crm/common/xml.h>
19 
20 #include <glib.h>
21 
22 #include <crm/pengine/status.h>
23 #include <pacemaker-internal.h>
24 #include "libpacemaker_private.h"
25 
32 };
33 
34 static const char *
35 state2text(enum remote_connection_state state)
36 {
37  switch (state) {
39  return "unknown";
40  case remote_state_alive:
41  return "alive";
43  return "resting";
45  return "failed";
47  return "stopped";
48  }
49 
50  return "impossible";
51 }
52 
53 /* We always use pe_order_preserve with these convenience functions to exempt
54  * internally generated constraints from the prohibition of user constraints
55  * involving remote connection resources.
56  *
57  * The start ordering additionally uses pe_order_runnable_left so that the
58  * specified action is not runnable if the start is not runnable.
59  */
60 
61 static inline void
62 order_start_then_action(pe_resource_t *first_rsc, pe_action_t *then_action,
63  uint32_t extra, pe_working_set_t *data_set)
64 {
65  if ((first_rsc != NULL) && (then_action != NULL) && (data_set != NULL)) {
66  pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
67  then_action->rsc, NULL, then_action,
69  data_set);
70  }
71 }
72 
73 static inline void
74 order_action_then_stop(pe_action_t *first_action, pe_resource_t *then_rsc,
75  uint32_t extra, pe_working_set_t *data_set)
76 {
77  if ((first_action != NULL) && (then_rsc != NULL) && (data_set != NULL)) {
78  pcmk__new_ordering(first_action->rsc, NULL, first_action,
79  then_rsc, stop_key(then_rsc), NULL,
81  }
82 }
83 
84 static enum remote_connection_state
85 get_remote_node_state(pe_node_t *node)
86 {
87  pe_resource_t *remote_rsc = NULL;
88  pe_node_t *cluster_node = NULL;
89 
90  CRM_ASSERT(node != NULL);
91 
92  remote_rsc = node->details->remote_rsc;
93  CRM_ASSERT(remote_rsc != NULL);
94 
95  cluster_node = pe__current_node(remote_rsc);
96 
97  /* If the cluster node the remote connection resource resides on
98  * is unclean or went offline, we can't process any operations
99  * on that remote node until after it starts elsewhere.
100  */
101  if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
102  || (remote_rsc->allocated_to == NULL)) {
103 
104  // The connection resource is not going to run anywhere
105 
106  if ((cluster_node != NULL) && cluster_node->details->unclean) {
107  /* The remote connection is failed because its resource is on a
108  * failed node and can't be recovered elsewhere, so we must fence.
109  */
110  return remote_state_failed;
111  }
112 
113  if (!pcmk_is_set(remote_rsc->flags, pe_rsc_failed)) {
114  /* Connection resource is cleanly stopped */
115  return remote_state_stopped;
116  }
117 
118  /* Connection resource is failed */
119 
120  if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
121  && remote_rsc->remote_reconnect_ms
122  && node->details->remote_was_fenced
123  && !pe__shutdown_requested(node)) {
124 
125  /* We won't know whether the connection is recoverable until the
126  * reconnect interval expires and we reattempt connection.
127  */
128  return remote_state_unknown;
129  }
130 
131  /* The remote connection is in a failed state. If there are any
132  * resources known to be active on it (stop) or in an unknown state
133  * (probe), we must assume the worst and fence it.
134  */
135  return remote_state_failed;
136 
137  } else if (cluster_node == NULL) {
138  /* Connection is recoverable but not currently running anywhere, so see
139  * if we can recover it first
140  */
141  return remote_state_unknown;
142 
143  } else if (cluster_node->details->unclean
144  || !(cluster_node->details->online)) {
145  // Connection is running on a dead node, see if we can recover it first
146  return remote_state_resting;
147 
148  } else if (pcmk__list_of_multiple(remote_rsc->running_on)
149  && (remote_rsc->partial_migration_source != NULL)
150  && (remote_rsc->partial_migration_target != NULL)) {
151  /* We're in the middle of migrating a connection resource, so wait until
152  * after the migration completes before performing any actions.
153  */
154  return remote_state_resting;
155 
156  }
157  return remote_state_alive;
158 }
159 
164 static void
165 apply_remote_ordering(pe_action_t *action, pe_working_set_t *data_set)
166 {
167  pe_resource_t *remote_rsc = NULL;
168  enum action_tasks task = text2task(action->task);
169  enum remote_connection_state state = get_remote_node_state(action->node);
170 
171  uint32_t order_opts = pe_order_none;
172 
173  if (action->rsc == NULL) {
174  return;
175  }
176 
178 
179  remote_rsc = action->node->details->remote_rsc;
180  CRM_ASSERT(remote_rsc != NULL);
181 
182  crm_trace("Order %s action %s relative to %s%s (state: %s)",
183  action->task, action->uuid,
184  pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
185  remote_rsc->id, state2text(state));
186 
188  CRMD_ACTION_MIGRATED, NULL)) {
189  /* Migration ops map to "no_action", but we need to apply the same
190  * ordering as for stop or demote (see get_router_node()).
191  */
192  task = stop_rsc;
193  }
194 
195  switch (task) {
196  case start_rsc:
197  case action_promote:
198  order_opts = pe_order_none;
199 
200  if (state == remote_state_failed) {
201  /* Force recovery, by making this action required */
203  }
204 
205  /* Ensure connection is up before running this action */
206  order_start_then_action(remote_rsc, action, order_opts, data_set);
207  break;
208 
209  case stop_rsc:
210  if (state == remote_state_alive) {
211  order_action_then_stop(action, remote_rsc,
213 
214  } else if (state == remote_state_failed) {
215  /* The resource is active on the node, but since we don't have a
216  * valid connection, the only way to stop the resource is by
217  * fencing the node. There is no need to order the stop relative
218  * to the remote connection, since the stop will become implied
219  * by the fencing.
220  */
222  "resources are active but connection is unrecoverable",
223  FALSE);
224 
225  } else if (remote_rsc->next_role == RSC_ROLE_STOPPED) {
226  /* State must be remote_state_unknown or remote_state_stopped.
227  * Since the connection is not coming back up in this
228  * transition, stop this resource first.
229  */
230  order_action_then_stop(action, remote_rsc,
232 
233  } else {
234  /* The connection is going to be started somewhere else, so
235  * stop this resource after that completes.
236  */
237  order_start_then_action(remote_rsc, action, pe_order_none,
238  data_set);
239  }
240  break;
241 
242  case action_demote:
243  /* Only order this demote relative to the connection start if the
244  * connection isn't being torn down. Otherwise, the demote would be
245  * blocked because the connection start would not be allowed.
246  */
247  if ((state == remote_state_resting)
248  || (state == remote_state_unknown)) {
249 
250  order_start_then_action(remote_rsc, action, pe_order_none,
251  data_set);
252  } /* Otherwise we can rely on the stop ordering */
253  break;
254 
255  default:
256  /* Wait for the connection resource to be up */
258  /* In case we ever get the recovery logic wrong, force
259  * recurring monitors to be restarted, even if just
260  * the connection was re-established
261  */
262  order_start_then_action(remote_rsc, action,
264 
265  } else {
266  pe_node_t *cluster_node = pe__current_node(remote_rsc);
267 
268  if ((task == monitor_rsc) && (state == remote_state_failed)) {
269  /* We would only be here if we do not know the state of the
270  * resource on the remote node. Since we have no way to find
271  * out, it is necessary to fence the node.
272  */
274  "resources are in unknown state "
275  "and connection is unrecoverable", FALSE);
276  }
277 
278  if ((cluster_node != NULL) && (state == remote_state_stopped)) {
279  /* The connection is currently up, but is going down
280  * permanently. Make sure we check services are actually
281  * stopped _before_ we let the connection get closed.
282  */
283  order_action_then_stop(action, remote_rsc,
285 
286  } else {
287  order_start_then_action(remote_rsc, action, pe_order_none,
288  data_set);
289  }
290  }
291  break;
292  }
293 }
294 
295 static void
296 apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
297 {
298  /* VMs are also classified as containers for these purposes... in
299  * that they both involve a 'thing' running on a real or remote
300  * cluster node.
301  *
302  * This allows us to be smarter about the type and extent of
303  * recovery actions required in various scenarios
304  */
305  pe_resource_t *remote_rsc = NULL;
306  pe_resource_t *container = NULL;
307  enum action_tasks task = text2task(action->task);
308 
309  CRM_ASSERT(action->rsc != NULL);
310  CRM_ASSERT(action->node != NULL);
312 
313  remote_rsc = action->node->details->remote_rsc;
314  CRM_ASSERT(remote_rsc != NULL);
315 
316  container = remote_rsc->container;
317  CRM_ASSERT(container != NULL);
318 
319  if (pcmk_is_set(container->flags, pe_rsc_failed)) {
320  pe_fence_node(data_set, action->node, "container failed", FALSE);
321  }
322 
323  crm_trace("Order %s action %s relative to %s%s for %s%s",
324  action->task, action->uuid,
325  pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
326  remote_rsc->id,
327  pcmk_is_set(container->flags, pe_rsc_failed)? "failed " : "",
328  container->id);
329 
331  CRMD_ACTION_MIGRATED, NULL)) {
332  /* Migration ops map to "no_action", but we need to apply the same
333  * ordering as for stop or demote (see get_router_node()).
334  */
335  task = stop_rsc;
336  }
337 
338  switch (task) {
339  case start_rsc:
340  case action_promote:
341  // Force resource recovery if the container is recovered
342  order_start_then_action(container, action, pe_order_implies_then,
343  data_set);
344 
345  // Wait for the connection resource to be up, too
346  order_start_then_action(remote_rsc, action, pe_order_none,
347  data_set);
348  break;
349 
350  case stop_rsc:
351  case action_demote:
352  if (pcmk_is_set(container->flags, pe_rsc_failed)) {
353  /* When the container representing a guest node fails, any stop
354  * or demote actions for resources running on the guest node
355  * are implied by the container stopping. This is similar to
356  * how fencing operations work for cluster nodes and remote
357  * nodes.
358  */
359  } else {
360  /* Ensure the operation happens before the connection is brought
361  * down.
362  *
363  * If we really wanted to, we could order these after the
364  * connection start, IFF the container's current role was
365  * stopped (otherwise we re-introduce an ordering loop when the
366  * connection is restarting).
367  */
368  order_action_then_stop(action, remote_rsc, pe_order_none,
369  data_set);
370  }
371  break;
372 
373  default:
374  /* Wait for the connection resource to be up */
376  /* In case we ever get the recovery logic wrong, force
377  * recurring monitors to be restarted, even if just
378  * the connection was re-established
379  */
380  if(task != no_action) {
381  order_start_then_action(remote_rsc, action,
383  }
384  } else {
385  order_start_then_action(remote_rsc, action, pe_order_none,
386  data_set);
387  }
388  break;
389  }
390 }
391 
398 void
400 {
402  return;
403  }
404 
405  crm_trace("Creating remote connection orderings");
406 
407  for (GList *gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
408  pe_action_t *action = (pe_action_t *) gIter->data;
409  pe_resource_t *remote = NULL;
410 
411  // We are only interested in resource actions
412  if (action->rsc == NULL) {
413  continue;
414  }
415 
416  /* Special case: If we are clearing the failcount of an actual
417  * remote connection resource, then make sure this happens before
418  * any start of the resource in this transition.
419  */
420  if (action->rsc->is_remote_node &&
421  pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
422 
423  pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
424  pcmk__op_key(action->rsc->id, RSC_START, 0),
425  NULL, pe_order_optional, data_set);
426 
427  continue;
428  }
429 
430  // We are only interested in actions allocated to a node
431  if (action->node == NULL) {
432  continue;
433  }
434 
435  if (!pe__is_guest_or_remote_node(action->node)) {
436  continue;
437  }
438 
439  /* We are only interested in real actions.
440  *
441  * @TODO This is probably wrong; pseudo-actions might be converted to
442  * real actions and vice versa later in update_actions() at the end of
443  * pcmk__apply_orderings().
444  */
445  if (pcmk_is_set(action->flags, pe_action_pseudo)) {
446  continue;
447  }
448 
449  remote = action->node->details->remote_rsc;
450  if (remote == NULL) {
451  // Orphaned
452  continue;
453  }
454 
455  /* Another special case: if a resource is moving to a Pacemaker Remote
456  * node, order the stop on the original node after any start of the
457  * remote connection. This ensures that if the connection fails to
458  * start, we leave the resource running on the original node.
459  */
460  if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
461  for (GList *item = action->rsc->actions; item != NULL;
462  item = item->next) {
463  pe_action_t *rsc_action = item->data;
464 
465  if ((rsc_action->node->details != action->node->details)
466  && pcmk__str_eq(rsc_action->task, RSC_STOP, pcmk__str_casei)) {
467  pcmk__new_ordering(remote, start_key(remote), NULL,
468  action->rsc, NULL, rsc_action,
470  }
471  }
472  }
473 
474  /* The action occurs across a remote connection, so create
475  * ordering constraints that guarantee the action occurs while the node
476  * is active (after start, before stop ... things like that).
477  *
478  * This is somewhat brittle in that we need to make sure the results of
479  * this ordering are compatible with the result of get_router_node().
480  * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
481  * of this logic rather than create_graph_action().
482  */
483  if (remote->container) {
484  crm_trace("Container ordering for %s", action->uuid);
485  apply_container_ordering(action, data_set);
486 
487  } else {
488  crm_trace("Remote ordering for %s", action->uuid);
489  apply_remote_ordering(action, data_set);
490  }
491  }
492 }
493 
502 bool
504 {
505  return pe__is_remote_node(node) && (node->details->remote_rsc != NULL)
506  && (get_remote_node_state(node) == remote_state_failed);
507 }
508 
519 bool
521 {
522  return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
523  && (node->details->remote_rsc != NULL)
524  && (node->details->remote_rsc->container == rsc);
525 }
526 
541 pe_node_t *
543 {
544  pe_node_t *began_on = NULL;
545  pe_node_t *ended_on = NULL;
546  bool partial_migration = false;
547  const char *task = action->task;
548 
549  if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)
550  || !pe__is_guest_or_remote_node(action->node)) {
551  return NULL;
552  }
553 
554  CRM_ASSERT(action->node->details->remote_rsc != NULL);
555 
556  began_on = pe__current_node(action->node->details->remote_rsc);
557  ended_on = action->node->details->remote_rsc->allocated_to;
558  if (action->node->details->remote_rsc
559  && (action->node->details->remote_rsc->container == NULL)
560  && action->node->details->remote_rsc->partial_migration_target) {
561  partial_migration = true;
562  }
563 
564  if (began_on == NULL) {
565  crm_trace("Routing %s for %s through remote connection's "
566  "next node %s (starting)%s",
567  action->task, (action->rsc? action->rsc->id : "no resource"),
568  (ended_on? ended_on->details->uname : "none"),
569  partial_migration? " (partial migration)" : "");
570  return ended_on;
571  }
572 
573  if (ended_on == NULL) {
574  crm_trace("Routing %s for %s through remote connection's "
575  "current node %s (stopping)%s",
576  action->task, (action->rsc? action->rsc->id : "no resource"),
577  (began_on? began_on->details->uname : "none"),
578  partial_migration? " (partial migration)" : "");
579  return began_on;
580  }
581 
582  if (began_on->details == ended_on->details) {
583  crm_trace("Routing %s for %s through remote connection's "
584  "current node %s (not moving)%s",
585  action->task, (action->rsc? action->rsc->id : "no resource"),
586  (began_on? began_on->details->uname : "none"),
587  partial_migration? " (partial migration)" : "");
588  return began_on;
589  }
590 
591  /* If we get here, the remote connection is moving during this transition.
592  * This means some actions for resources behind the connection will get
593  * routed through the cluster node the connection resource is currently on,
594  * and others are routed through the cluster node the connection will end up
595  * on.
596  */
597 
598  if (pcmk__str_eq(task, "notify", pcmk__str_casei)) {
599  task = g_hash_table_lookup(action->meta, "notify_operation");
600  }
601 
602  /*
603  * Stop, demote, and migration actions must occur before the connection can
604  * move (these actions are required before the remote resource can stop). In
605  * this case, we know these actions have to be routed through the initial
606  * cluster node the connection resource lived on before the move takes
607  * place.
608  *
609  * The exception is a partial migration of a (non-guest) remote connection
610  * resource; in that case, all actions (even these) will be ordered after
611  * the connection's pseudo-start on the migration target, so the target is
612  * the router node.
613  */
614  if (pcmk__strcase_any_of(task, "cancel", "stop", "demote", "migrate_from",
615  "migrate_to", NULL) && !partial_migration) {
616  crm_trace("Routing %s for %s through remote connection's "
617  "current node %s (moving)%s",
618  action->task, (action->rsc? action->rsc->id : "no resource"),
619  (began_on? began_on->details->uname : "none"),
620  partial_migration? " (partial migration)" : "");
621  return began_on;
622  }
623 
624  /* Everything else (start, promote, monitor, probe, refresh,
625  * clear failcount, delete, ...) must occur after the connection starts on
626  * the node it is moving to.
627  */
628  crm_trace("Routing %s for %s through remote connection's "
629  "next node %s (moving)%s",
630  action->task, (action->rsc? action->rsc->id : "no resource"),
631  (ended_on? ended_on->details->uname : "none"),
632  partial_migration? " (partial migration)" : "");
633  return ended_on;
634 }
635 
648 void
650 {
651  const char *remote_addr = g_hash_table_lookup(params,
653 
654  if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
655  GHashTable *base = pe_rsc_params(rsc, NULL, rsc->cluster);
656 
657  remote_addr = g_hash_table_lookup(base, XML_RSC_ATTR_REMOTE_RA_ADDR);
658  if (remote_addr != NULL) {
659  g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
660  strdup(remote_addr));
661  }
662  }
663 }
664 
676 void
678 {
679  pe_node_t *host = NULL;
680  enum action_tasks task;
681 
682  if (!pe__is_guest_node(action->node)) {
683  return;
684  }
685 
686  task = text2task(action->task);
687  if ((task == action_notify) || (task == action_notified)) {
688  task = text2task(g_hash_table_lookup(action->meta, "notify_operation"));
689  }
690 
691  switch (task) {
692  case stop_rsc:
693  case stopped_rsc:
694  case action_demote:
695  case action_demoted:
696  // "Down" actions take place on guest's current host
697  host = pe__current_node(action->node->details->remote_rsc->container);
698  break;
699 
700  case start_rsc:
701  case started_rsc:
702  case monitor_rsc:
703  case action_promote:
704  case action_promoted:
705  // "Up" actions take place on guest's next host
706  host = action->node->details->remote_rsc->container->allocated_to;
707  break;
708 
709  default:
710  break;
711  }
712 
713  if (host != NULL) {
715  (gpointer) g_hash_table_lookup(action->rsc->meta,
717  (gpointer) args_xml);
719  (gpointer) host->details->uname,
720  (gpointer) args_xml);
721  }
722 }
pcmk__cpg_host_t host
Definition: cpg.c:49
#define RSC_STOP
Definition: crm.h:202
A dumping ground.
#define CRMD_ACTION_MIGRATED
Definition: crm.h:172
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition: remote.c:41
#define CRM_OP_FENCE
Definition: crm.h:144
pe_resource_t * container
Definition: pe_types.h:387
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:928
pe_node_t * partial_migration_source
Definition: pe_types.h:372
pe_resource_t * rsc
Definition: pe_types.h:406
enum rsc_role_e next_role
Definition: pe_types.h:378
pe_resource_t * remote_rsc
Definition: pe_types.h:237
enum action_tasks text2task(const char *task)
Definition: common.c:353
GList * actions
Definition: pe_types.h:171
pe_node_t * partial_migration_target
Definition: pe_types.h:371
#define RSC_START
Definition: crm.h:199
pe_node_t * allocated_to
Definition: pe_types.h:370
gboolean remote_was_fenced
Definition: pe_types.h:232
#define XML_RSC_ATTR_TARGET
Definition: msg_xml.h:224
#define XML_RSC_ATTR_REMOTE_RA_ADDR
Definition: msg_xml.h:250
const char * action
Definition: pcmk_fence.c:30
void pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
bool pe__is_remote_node(const pe_node_t *node)
Definition: remote.c:25
#define CRM_OP_CLEAR_FAILCOUNT
Definition: crm.h:153
#define pe_flag_have_remote_nodes
Definition: pe_types.h:118
guint remote_reconnect_ms
Definition: pe_types.h:352
#define pe_rsc_failed
Definition: pe_types.h:276
bool pe__is_guest_node(const pe_node_t *node)
Definition: remote.c:33
#define stop_key(rsc)
Definition: internal.h:414
char * task
Definition: pe_types.h:410
#define crm_trace(fmt, args...)
Definition: logging.h:365
void pe_fence_node(pe_working_set_t *data_set, pe_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition: unpack.c:95
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:121
struct pe_node_shared_s * details
Definition: pe_types.h:252
pe_node_t * node
Definition: pe_types.h:407
bool pe__shutdown_requested(pe_node_t *node)
Definition: utils.c:688
pe_node_t * pcmk__connection_host_for_action(pe_action_t *action)
unsigned long long flags
Definition: pe_types.h:355
const char * uname
Definition: pe_types.h:216
pe_working_set_t * data_set
Wrappers for and extensions to libxml2.
bool pcmk__is_failed_remote_node(pe_node_t *node)
G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task, pe_action_t *first_action, pe_resource_t *then_rsc, char *then_task, pe_action_t *then_action, uint32_t flags, pe_working_set_t *data_set)
remote_connection_state
G_GNUC_INTERNAL bool pcmk__action_is_recurring(const pe_action_t *action)
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:45
GList * fillers
Definition: pe_types.h:388
bool pcmk__rsc_corresponds_to_guest(pe_resource_t *rsc, pe_node_t *node)
Cluster status and scheduling.
#define CRM_ASSERT(expr)
Definition: results.h:42
Cluster Configuration.
GHashTable * pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node, pe_working_set_t *data_set)
Get a table of resource parameters.
Definition: complex.c:429
#define CRMD_ACTION_MIGRATE
Definition: crm.h:171
void hash2metafield(gpointer key, gpointer value, gpointer user_data)
Set XML attribute based on hash table entry, as meta-attribute name.
Definition: nvpair.c:798
void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params)
GList * running_on
Definition: pe_types.h:373
pe_working_set_t * cluster
Definition: pe_types.h:335
#define pe__set_order_flags(order_flags, flags_to_set)
Definition: internal.h:138
#define start_key(rsc)
Definition: internal.h:420
unsigned long long flags
Definition: pe_types.h:153
#define PCMK__ENV_PHYSICAL_HOST
gboolean unclean
Definition: pe_types.h:224
void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, pe_action_t *action)
Add special bundle meta-attributes to XML.
gboolean online
Definition: pe_types.h:220
action_tasks
Definition: common.h:61
char * id
Definition: pe_types.h:329