pacemaker  2.1.4-dc6eb4362
Scalable High-Availability cluster resource manager
pcmk_sched_remote.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2022 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <sys/param.h>
13 
14 #include <crm/crm.h>
15 #include <crm/cib.h>
16 #include <crm/msg_xml.h>
17 #include <crm/common/xml.h>
19 
20 #include <glib.h>
21 
22 #include <crm/pengine/status.h>
23 #include <pacemaker-internal.h>
24 #include "libpacemaker_private.h"
25 
32 };
33 
34 static const char *
35 state2text(enum remote_connection_state state)
36 {
37  switch (state) {
39  return "unknown";
40  case remote_state_alive:
41  return "alive";
43  return "resting";
45  return "failed";
47  return "stopped";
48  }
49 
50  return "impossible";
51 }
52 
53 /* We always use pe_order_preserve with these convenience functions to exempt
54  * internally generated constraints from the prohibition of user constraints
55  * involving remote connection resources.
56  *
57  * The start ordering additionally uses pe_order_runnable_left so that the
58  * specified action is not runnable if the start is not runnable.
59  */
60 
61 static inline void
62 order_start_then_action(pe_resource_t *lh_rsc, pe_action_t *rh_action,
64 {
65  if ((lh_rsc != NULL) && (rh_action != NULL) && (data_set != NULL)) {
66  pcmk__new_ordering(lh_rsc, start_key(lh_rsc), NULL,
67  rh_action->rsc, NULL, rh_action,
69  data_set);
70  }
71 }
72 
73 static inline void
74 order_action_then_stop(pe_action_t *lh_action, pe_resource_t *rh_rsc,
76 {
77  if ((lh_action != NULL) && (rh_rsc != NULL) && (data_set != NULL)) {
78  pcmk__new_ordering(lh_action->rsc, NULL, lh_action,
79  rh_rsc, stop_key(rh_rsc), NULL,
81  }
82 }
83 
84 static enum remote_connection_state
85 get_remote_node_state(pe_node_t *node)
86 {
87  pe_resource_t *remote_rsc = NULL;
88  pe_node_t *cluster_node = NULL;
89 
90  CRM_ASSERT(node != NULL);
91 
92  remote_rsc = node->details->remote_rsc;
93  CRM_ASSERT(remote_rsc != NULL);
94 
95  cluster_node = pe__current_node(remote_rsc);
96 
97  /* If the cluster node the remote connection resource resides on
98  * is unclean or went offline, we can't process any operations
99  * on that remote node until after it starts elsewhere.
100  */
101  if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
102  || (remote_rsc->allocated_to == NULL)) {
103 
104  // The connection resource is not going to run anywhere
105 
106  if ((cluster_node != NULL) && cluster_node->details->unclean) {
107  /* The remote connection is failed because its resource is on a
108  * failed node and can't be recovered elsewhere, so we must fence.
109  */
110  return remote_state_failed;
111  }
112 
113  if (!pcmk_is_set(remote_rsc->flags, pe_rsc_failed)) {
114  /* Connection resource is cleanly stopped */
115  return remote_state_stopped;
116  }
117 
118  /* Connection resource is failed */
119 
120  if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
121  && remote_rsc->remote_reconnect_ms
122  && node->details->remote_was_fenced
123  && !pe__shutdown_requested(node)) {
124 
125  /* We won't know whether the connection is recoverable until the
126  * reconnect interval expires and we reattempt connection.
127  */
128  return remote_state_unknown;
129  }
130 
131  /* The remote connection is in a failed state. If there are any
132  * resources known to be active on it (stop) or in an unknown state
133  * (probe), we must assume the worst and fence it.
134  */
135  return remote_state_failed;
136 
137  } else if (cluster_node == NULL) {
138  /* Connection is recoverable but not currently running anywhere, so see
139  * if we can recover it first
140  */
141  return remote_state_unknown;
142 
143  } else if (cluster_node->details->unclean
144  || !(cluster_node->details->online)) {
145  // Connection is running on a dead node, see if we can recover it first
146  return remote_state_resting;
147 
148  } else if (pcmk__list_of_multiple(remote_rsc->running_on)
149  && (remote_rsc->partial_migration_source != NULL)
150  && (remote_rsc->partial_migration_target != NULL)) {
151  /* We're in the middle of migrating a connection resource, so wait until
152  * after the migration completes before performing any actions.
153  */
154  return remote_state_resting;
155 
156  }
157  return remote_state_alive;
158 }
159 
160 static int
161 is_recurring_action(pe_action_t *action)
162 {
163  guint interval_ms;
164 
165  if (pcmk__guint_from_hash(action->meta,
167  &interval_ms) != pcmk_rc_ok) {
168  return 0;
169  }
170  return (interval_ms > 0);
171 }
172 
177 static void
178 apply_remote_ordering(pe_action_t *action, pe_working_set_t *data_set)
179 {
180  pe_resource_t *remote_rsc = NULL;
181  enum action_tasks task = text2task(action->task);
182  enum remote_connection_state state = get_remote_node_state(action->node);
183 
184  enum pe_ordering order_opts = pe_order_none;
185 
186  if (action->rsc == NULL) {
187  return;
188  }
189 
191 
192  remote_rsc = action->node->details->remote_rsc;
193  CRM_ASSERT(remote_rsc != NULL);
194 
195  crm_trace("Order %s action %s relative to %s%s (state: %s)",
196  action->task, action->uuid,
197  pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
198  remote_rsc->id, state2text(state));
199 
201  CRMD_ACTION_MIGRATED, NULL)) {
202  /* Migration ops map to "no_action", but we need to apply the same
203  * ordering as for stop or demote (see get_router_node()).
204  */
205  task = stop_rsc;
206  }
207 
208  switch (task) {
209  case start_rsc:
210  case action_promote:
211  order_opts = pe_order_none;
212 
213  if (state == remote_state_failed) {
214  /* Force recovery, by making this action required */
216  }
217 
218  /* Ensure connection is up before running this action */
219  order_start_then_action(remote_rsc, action, order_opts, data_set);
220  break;
221 
222  case stop_rsc:
223  if (state == remote_state_alive) {
224  order_action_then_stop(action, remote_rsc,
226 
227  } else if (state == remote_state_failed) {
228  /* The resource is active on the node, but since we don't have a
229  * valid connection, the only way to stop the resource is by
230  * fencing the node. There is no need to order the stop relative
231  * to the remote connection, since the stop will become implied
232  * by the fencing.
233  */
235  "resources are active but connection is unrecoverable",
236  FALSE);
237 
238  } else if (remote_rsc->next_role == RSC_ROLE_STOPPED) {
239  /* State must be remote_state_unknown or remote_state_stopped.
240  * Since the connection is not coming back up in this
241  * transition, stop this resource first.
242  */
243  order_action_then_stop(action, remote_rsc,
245 
246  } else {
247  /* The connection is going to be started somewhere else, so
248  * stop this resource after that completes.
249  */
250  order_start_then_action(remote_rsc, action, pe_order_none,
251  data_set);
252  }
253  break;
254 
255  case action_demote:
256  /* Only order this demote relative to the connection start if the
257  * connection isn't being torn down. Otherwise, the demote would be
258  * blocked because the connection start would not be allowed.
259  */
260  if ((state == remote_state_resting)
261  || (state == remote_state_unknown)) {
262 
263  order_start_then_action(remote_rsc, action, pe_order_none,
264  data_set);
265  } /* Otherwise we can rely on the stop ordering */
266  break;
267 
268  default:
269  /* Wait for the connection resource to be up */
270  if (is_recurring_action(action)) {
271  /* In case we ever get the recovery logic wrong, force
272  * recurring monitors to be restarted, even if just
273  * the connection was re-established
274  */
275  order_start_then_action(remote_rsc, action,
277 
278  } else {
279  pe_node_t *cluster_node = pe__current_node(remote_rsc);
280 
281  if ((task == monitor_rsc) && (state == remote_state_failed)) {
282  /* We would only be here if we do not know the state of the
283  * resource on the remote node. Since we have no way to find
284  * out, it is necessary to fence the node.
285  */
287  "resources are in unknown state "
288  "and connection is unrecoverable", FALSE);
289  }
290 
291  if ((cluster_node != NULL) && (state == remote_state_stopped)) {
292  /* The connection is currently up, but is going down
293  * permanently. Make sure we check services are actually
294  * stopped _before_ we let the connection get closed.
295  */
296  order_action_then_stop(action, remote_rsc,
298 
299  } else {
300  order_start_then_action(remote_rsc, action, pe_order_none,
301  data_set);
302  }
303  }
304  break;
305  }
306 }
307 
308 static void
309 apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
310 {
311  /* VMs are also classified as containers for these purposes... in
312  * that they both involve a 'thing' running on a real or remote
313  * cluster node.
314  *
315  * This allows us to be smarter about the type and extent of
316  * recovery actions required in various scenarios
317  */
318  pe_resource_t *remote_rsc = NULL;
319  pe_resource_t *container = NULL;
320  enum action_tasks task = text2task(action->task);
321 
322  CRM_ASSERT(action->rsc != NULL);
323  CRM_ASSERT(action->node != NULL);
325 
326  remote_rsc = action->node->details->remote_rsc;
327  CRM_ASSERT(remote_rsc != NULL);
328 
329  container = remote_rsc->container;
330  CRM_ASSERT(container != NULL);
331 
332  if (pcmk_is_set(container->flags, pe_rsc_failed)) {
333  pe_fence_node(data_set, action->node, "container failed", FALSE);
334  }
335 
336  crm_trace("Order %s action %s relative to %s%s for %s%s",
337  action->task, action->uuid,
338  pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
339  remote_rsc->id,
340  pcmk_is_set(container->flags, pe_rsc_failed)? "failed " : "",
341  container->id);
342 
344  CRMD_ACTION_MIGRATED, NULL)) {
345  /* Migration ops map to "no_action", but we need to apply the same
346  * ordering as for stop or demote (see get_router_node()).
347  */
348  task = stop_rsc;
349  }
350 
351  switch (task) {
352  case start_rsc:
353  case action_promote:
354  // Force resource recovery if the container is recovered
355  order_start_then_action(container, action, pe_order_implies_then,
356  data_set);
357 
358  // Wait for the connection resource to be up, too
359  order_start_then_action(remote_rsc, action, pe_order_none,
360  data_set);
361  break;
362 
363  case stop_rsc:
364  case action_demote:
365  if (pcmk_is_set(container->flags, pe_rsc_failed)) {
366  /* When the container representing a guest node fails, any stop
367  * or demote actions for resources running on the guest node
368  * are implied by the container stopping. This is similar to
369  * how fencing operations work for cluster nodes and remote
370  * nodes.
371  */
372  } else {
373  /* Ensure the operation happens before the connection is brought
374  * down.
375  *
376  * If we really wanted to, we could order these after the
377  * connection start, IFF the container's current role was
378  * stopped (otherwise we re-introduce an ordering loop when the
379  * connection is restarting).
380  */
381  order_action_then_stop(action, remote_rsc, pe_order_none,
382  data_set);
383  }
384  break;
385 
386  default:
387  /* Wait for the connection resource to be up */
388  if (is_recurring_action(action)) {
389  /* In case we ever get the recovery logic wrong, force
390  * recurring monitors to be restarted, even if just
391  * the connection was re-established
392  */
393  if(task != no_action) {
394  order_start_then_action(remote_rsc, action,
396  }
397  } else {
398  order_start_then_action(remote_rsc, action, pe_order_none,
399  data_set);
400  }
401  break;
402  }
403 }
404 
411 void
413 {
415  return;
416  }
417 
418  crm_trace("Creating remote connection orderings");
419 
420  for (GList *gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
421  pe_action_t *action = (pe_action_t *) gIter->data;
422  pe_resource_t *remote = NULL;
423 
424  // We are only interested in resource actions
425  if (action->rsc == NULL) {
426  continue;
427  }
428 
429  /* Special case: If we are clearing the failcount of an actual
430  * remote connection resource, then make sure this happens before
431  * any start of the resource in this transition.
432  */
433  if (action->rsc->is_remote_node &&
434  pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
435 
436  pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
437  pcmk__op_key(action->rsc->id, RSC_START, 0),
438  NULL, pe_order_optional, data_set);
439 
440  continue;
441  }
442 
443  // We are only interested in actions allocated to a node
444  if (action->node == NULL) {
445  continue;
446  }
447 
448  if (!pe__is_guest_or_remote_node(action->node)) {
449  continue;
450  }
451 
452  /* We are only interested in real actions.
453  *
454  * @TODO This is probably wrong; pseudo-actions might be converted to
455  * real actions and vice versa later in update_actions() at the end of
456  * pcmk__apply_orderings().
457  */
458  if (pcmk_is_set(action->flags, pe_action_pseudo)) {
459  continue;
460  }
461 
462  remote = action->node->details->remote_rsc;
463  if (remote == NULL) {
464  // Orphaned
465  continue;
466  }
467 
468  /* Another special case: if a resource is moving to a Pacemaker Remote
469  * node, order the stop on the original node after any start of the
470  * remote connection. This ensures that if the connection fails to
471  * start, we leave the resource running on the original node.
472  */
473  if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
474  for (GList *item = action->rsc->actions; item != NULL;
475  item = item->next) {
476  pe_action_t *rsc_action = item->data;
477 
478  if ((rsc_action->node->details != action->node->details)
479  && pcmk__str_eq(rsc_action->task, RSC_STOP, pcmk__str_casei)) {
480  pcmk__new_ordering(remote, start_key(remote), NULL,
481  action->rsc, NULL, rsc_action,
483  }
484  }
485  }
486 
487  /* The action occurs across a remote connection, so create
488  * ordering constraints that guarantee the action occurs while the node
489  * is active (after start, before stop ... things like that).
490  *
491  * This is somewhat brittle in that we need to make sure the results of
492  * this ordering are compatible with the result of get_router_node().
493  * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
494  * of this logic rather than create_graph_action().
495  */
496  if (remote->container) {
497  crm_trace("Container ordering for %s", action->uuid);
498  apply_container_ordering(action, data_set);
499 
500  } else {
501  crm_trace("Remote ordering for %s", action->uuid);
502  apply_remote_ordering(action, data_set);
503  }
504  }
505 }
506 
515 bool
517 {
518  return pe__is_remote_node(node) && (node->details->remote_rsc != NULL)
519  && (get_remote_node_state(node) == remote_state_failed);
520 }
521 
532 bool
534 {
535  return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
536  && (node->details->remote_rsc != NULL)
537  && (node->details->remote_rsc->container == rsc);
538 }
539 
554 pe_node_t *
556 {
557  pe_node_t *began_on = NULL;
558  pe_node_t *ended_on = NULL;
559  bool partial_migration = false;
560  const char *task = action->task;
561 
562  if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)
563  || !pe__is_guest_or_remote_node(action->node)) {
564  return NULL;
565  }
566 
567  CRM_ASSERT(action->node->details->remote_rsc != NULL);
568 
569  began_on = pe__current_node(action->node->details->remote_rsc);
570  ended_on = action->node->details->remote_rsc->allocated_to;
571  if (action->node->details->remote_rsc
572  && (action->node->details->remote_rsc->container == NULL)
573  && action->node->details->remote_rsc->partial_migration_target) {
574  partial_migration = true;
575  }
576 
577  if (began_on == NULL) {
578  crm_trace("Routing %s for %s through remote connection's "
579  "next node %s (starting)%s",
580  action->task, (action->rsc? action->rsc->id : "no resource"),
581  (ended_on? ended_on->details->uname : "none"),
582  partial_migration? " (partial migration)" : "");
583  return ended_on;
584  }
585 
586  if (ended_on == NULL) {
587  crm_trace("Routing %s for %s through remote connection's "
588  "current node %s (stopping)%s",
589  action->task, (action->rsc? action->rsc->id : "no resource"),
590  (began_on? began_on->details->uname : "none"),
591  partial_migration? " (partial migration)" : "");
592  return began_on;
593  }
594 
595  if (began_on->details == ended_on->details) {
596  crm_trace("Routing %s for %s through remote connection's "
597  "current node %s (not moving)%s",
598  action->task, (action->rsc? action->rsc->id : "no resource"),
599  (began_on? began_on->details->uname : "none"),
600  partial_migration? " (partial migration)" : "");
601  return began_on;
602  }
603 
604  /* If we get here, the remote connection is moving during this transition.
605  * This means some actions for resources behind the connection will get
606  * routed through the cluster node the connection resource is currently on,
607  * and others are routed through the cluster node the connection will end up
608  * on.
609  */
610 
611  if (pcmk__str_eq(task, "notify", pcmk__str_casei)) {
612  task = g_hash_table_lookup(action->meta, "notify_operation");
613  }
614 
615  /*
616  * Stop, demote, and migration actions must occur before the connection can
617  * move (these actions are required before the remote resource can stop). In
618  * this case, we know these actions have to be routed through the initial
619  * cluster node the connection resource lived on before the move takes
620  * place.
621  *
622  * The exception is a partial migration of a (non-guest) remote connection
623  * resource; in that case, all actions (even these) will be ordered after
624  * the connection's pseudo-start on the migration target, so the target is
625  * the router node.
626  */
627  if (pcmk__strcase_any_of(task, "cancel", "stop", "demote", "migrate_from",
628  "migrate_to", NULL) && !partial_migration) {
629  crm_trace("Routing %s for %s through remote connection's "
630  "current node %s (moving)%s",
631  action->task, (action->rsc? action->rsc->id : "no resource"),
632  (began_on? began_on->details->uname : "none"),
633  partial_migration? " (partial migration)" : "");
634  return began_on;
635  }
636 
637  /* Everything else (start, promote, monitor, probe, refresh,
638  * clear failcount, delete, ...) must occur after the connection starts on
639  * the node it is moving to.
640  */
641  crm_trace("Routing %s for %s through remote connection's "
642  "next node %s (moving)%s",
643  action->task, (action->rsc? action->rsc->id : "no resource"),
644  (ended_on? ended_on->details->uname : "none"),
645  partial_migration? " (partial migration)" : "");
646  return ended_on;
647 }
648 
662 void
665 {
666  const char *remote_addr = g_hash_table_lookup(params,
668 
669  if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
670  GHashTable *base = pe_rsc_params(rsc, NULL, data_set);
671 
672  remote_addr = g_hash_table_lookup(base, XML_RSC_ATTR_REMOTE_RA_ADDR);
673  if (remote_addr != NULL) {
674  g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
675  strdup(remote_addr));
676  }
677  }
678 }
679 
691 void
693 {
694  pe_node_t *host = NULL;
695  enum action_tasks task;
696 
697  if (!pe__is_guest_node(action->node)) {
698  return;
699  }
700 
701  task = text2task(action->task);
702  if ((task == action_notify) || (task == action_notified)) {
703  task = text2task(g_hash_table_lookup(action->meta, "notify_operation"));
704  }
705 
706  switch (task) {
707  case stop_rsc:
708  case stopped_rsc:
709  case action_demote:
710  case action_demoted:
711  // "Down" actions take place on guest's current host
712  host = pe__current_node(action->node->details->remote_rsc->container);
713  break;
714 
715  case start_rsc:
716  case started_rsc:
717  case monitor_rsc:
718  case action_promote:
719  case action_promoted:
720  // "Up" actions take place on guest's next host
721  host = action->node->details->remote_rsc->container->allocated_to;
722  break;
723 
724  default:
725  break;
726  }
727 
728  if (host != NULL) {
730  (gpointer) g_hash_table_lookup(action->rsc->meta,
732  (gpointer) args_xml);
734  (gpointer) host->details->uname,
735  (gpointer) args_xml);
736  }
737 }
pcmk__cpg_host_t host
Definition: cpg.c:49
#define RSC_STOP
Definition: crm.h:204
A dumping ground.
#define CRMD_ACTION_MIGRATED
Definition: crm.h:174
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition: remote.c:41
#define CRM_OP_FENCE
Definition: crm.h:145
pe_resource_t * container
Definition: pe_types.h:394
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:931
pe_node_t * partial_migration_source
Definition: pe_types.h:379
G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *lh_rsc, char *lh_task, pe_action_t *lh_action, pe_resource_t *rh_rsc, char *rh_task, pe_action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
pe_resource_t * rsc
Definition: pe_types.h:424
enum rsc_role_e next_role
Definition: pe_types.h:385
pe_resource_t * remote_rsc
Definition: pe_types.h:237
enum action_tasks text2task(const char *task)
Definition: common.c:351
GList * actions
Definition: pe_types.h:171
pe_node_t * partial_migration_target
Definition: pe_types.h:378
#define RSC_START
Definition: crm.h:201
pe_node_t * allocated_to
Definition: pe_types.h:377
gboolean remote_was_fenced
Definition: pe_types.h:232
#define XML_RSC_ATTR_TARGET
Definition: msg_xml.h:227
#define XML_RSC_ATTR_REMOTE_RA_ADDR
Definition: msg_xml.h:253
const char * action
Definition: pcmk_fence.c:29
void pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
bool pe__is_remote_node(const pe_node_t *node)
Definition: remote.c:25
#define CRM_OP_CLEAR_FAILCOUNT
Definition: crm.h:155
#define pe_flag_have_remote_nodes
Definition: pe_types.h:118
guint remote_reconnect_ms
Definition: pe_types.h:359
int pcmk__guint_from_hash(GHashTable *table, const char *key, guint default_val, guint *result)
Definition: strings.c:311
#define pe_rsc_failed
Definition: pe_types.h:276
bool pe__is_guest_node(const pe_node_t *node)
Definition: remote.c:33
#define stop_key(rsc)
Definition: internal.h:378
char * task
Definition: pe_types.h:428
#define crm_trace(fmt, args...)
Definition: logging.h:364
void pe_fence_node(pe_working_set_t *data_set, pe_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition: unpack.c:95
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:122
struct pe_node_shared_s * details
Definition: pe_types.h:252
pe_node_t * node
Definition: pe_types.h:425
bool pe__shutdown_requested(pe_node_t *node)
Definition: utils.c:2378
pe_node_t * pcmk__connection_host_for_action(pe_action_t *action)
unsigned long long flags
Definition: pe_types.h:362
const char * uname
Definition: pe_types.h:216
GHashTable * pe_rsc_params(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
Get a table of resource parameters.
Definition: complex.c:457
pe_working_set_t * data_set
Wrappers for and extensions to libxml2.
bool pcmk__is_failed_remote_node(pe_node_t *node)
remote_connection_state
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:45
GList * fillers
Definition: pe_types.h:395
bool pcmk__rsc_corresponds_to_guest(pe_resource_t *rsc, pe_node_t *node)
Cluster status and scheduling.
#define CRM_ASSERT(expr)
Definition: results.h:42
Cluster Configuration.
#define XML_LRM_ATTR_INTERVAL_MS
Definition: msg_xml.h:301
#define CRMD_ACTION_MIGRATE
Definition: crm.h:173
void hash2metafield(gpointer key, gpointer value, gpointer user_data)
Set XML attribute based on hash table entry, as meta-attribute name.
Definition: nvpair.c:810
GList * running_on
Definition: pe_types.h:380
void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params, pe_working_set_t *data_set)
#define pe__set_order_flags(order_flags, flags_to_set)
Definition: internal.h:113
#define start_key(rsc)
Definition: internal.h:384
unsigned long long flags
Definition: pe_types.h:153
#define PCMK__ENV_PHYSICAL_HOST
gboolean unclean
Definition: pe_types.h:224
void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, pe_action_t *action)
Add special bundle meta-attributes to XML.
pe_ordering
Definition: pe_types.h:497
gboolean online
Definition: pe_types.h:220
action_tasks
Definition: common.h:61
char * id
Definition: pe_types.h:336