pacemaker  2.1.6-802a72226b
Scalable High-Availability cluster resource manager
pcmk_sched_remote.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2023 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <sys/param.h>
13 
14 #include <crm/crm.h>
15 #include <crm/cib.h>
16 #include <crm/msg_xml.h>
17 #include <crm/common/xml.h>
19 
20 #include <glib.h>
21 
22 #include <crm/pengine/status.h>
23 #include <pacemaker-internal.h>
24 #include "libpacemaker_private.h"
25 
32 };
33 
34 static const char *
35 state2text(enum remote_connection_state state)
36 {
37  switch (state) {
39  return "unknown";
40  case remote_state_alive:
41  return "alive";
43  return "resting";
45  return "failed";
47  return "stopped";
48  }
49 
50  return "impossible";
51 }
52 
53 /* We always use pe_order_preserve with these convenience functions to exempt
54  * internally generated constraints from the prohibition of user constraints
55  * involving remote connection resources.
56  *
57  * The start ordering additionally uses pe_order_runnable_left so that the
58  * specified action is not runnable if the start is not runnable.
59  */
60 
61 static inline void
62 order_start_then_action(pe_resource_t *first_rsc, pe_action_t *then_action,
63  uint32_t extra, pe_working_set_t *data_set)
64 {
65  if ((first_rsc != NULL) && (then_action != NULL) && (data_set != NULL)) {
66  pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
67  then_action->rsc, NULL, then_action,
69  data_set);
70  }
71 }
72 
73 static inline void
74 order_action_then_stop(pe_action_t *first_action, pe_resource_t *then_rsc,
75  uint32_t extra, pe_working_set_t *data_set)
76 {
77  if ((first_action != NULL) && (then_rsc != NULL) && (data_set != NULL)) {
78  pcmk__new_ordering(first_action->rsc, NULL, first_action,
79  then_rsc, stop_key(then_rsc), NULL,
81  }
82 }
83 
84 static enum remote_connection_state
85 get_remote_node_state(const pe_node_t *node)
86 {
87  const pe_resource_t *remote_rsc = NULL;
88  const pe_node_t *cluster_node = NULL;
89 
90  CRM_ASSERT(node != NULL);
91 
92  remote_rsc = node->details->remote_rsc;
93  CRM_ASSERT(remote_rsc != NULL);
94 
95  cluster_node = pe__current_node(remote_rsc);
96 
97  /* If the cluster node the remote connection resource resides on
98  * is unclean or went offline, we can't process any operations
99  * on that remote node until after it starts elsewhere.
100  */
101  if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
102  || (remote_rsc->allocated_to == NULL)) {
103 
104  // The connection resource is not going to run anywhere
105 
106  if ((cluster_node != NULL) && cluster_node->details->unclean) {
107  /* The remote connection is failed because its resource is on a
108  * failed node and can't be recovered elsewhere, so we must fence.
109  */
110  return remote_state_failed;
111  }
112 
113  if (!pcmk_is_set(remote_rsc->flags, pe_rsc_failed)) {
114  /* Connection resource is cleanly stopped */
115  return remote_state_stopped;
116  }
117 
118  /* Connection resource is failed */
119 
120  if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
121  && remote_rsc->remote_reconnect_ms
122  && node->details->remote_was_fenced
123  && !pe__shutdown_requested(node)) {
124 
125  /* We won't know whether the connection is recoverable until the
126  * reconnect interval expires and we reattempt connection.
127  */
128  return remote_state_unknown;
129  }
130 
131  /* The remote connection is in a failed state. If there are any
132  * resources known to be active on it (stop) or in an unknown state
133  * (probe), we must assume the worst and fence it.
134  */
135  return remote_state_failed;
136 
137  } else if (cluster_node == NULL) {
138  /* Connection is recoverable but not currently running anywhere, so see
139  * if we can recover it first
140  */
141  return remote_state_unknown;
142 
143  } else if (cluster_node->details->unclean
144  || !(cluster_node->details->online)) {
145  // Connection is running on a dead node, see if we can recover it first
146  return remote_state_resting;
147 
148  } else if (pcmk__list_of_multiple(remote_rsc->running_on)
149  && (remote_rsc->partial_migration_source != NULL)
150  && (remote_rsc->partial_migration_target != NULL)) {
151  /* We're in the middle of migrating a connection resource, so wait until
152  * after the migration completes before performing any actions.
153  */
154  return remote_state_resting;
155 
156  }
157  return remote_state_alive;
158 }
159 
166 static void
167 apply_remote_ordering(pe_action_t *action)
168 {
169  pe_resource_t *remote_rsc = NULL;
170  enum action_tasks task = text2task(action->task);
171  enum remote_connection_state state = get_remote_node_state(action->node);
172 
173  uint32_t order_opts = pe_order_none;
174 
175  if (action->rsc == NULL) {
176  return;
177  }
178 
180 
181  remote_rsc = action->node->details->remote_rsc;
182  CRM_ASSERT(remote_rsc != NULL);
183 
184  crm_trace("Order %s action %s relative to %s%s (state: %s)",
185  action->task, action->uuid,
186  pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
187  remote_rsc->id, state2text(state));
188 
190  CRMD_ACTION_MIGRATED, NULL)) {
191  /* Migration ops map to "no_action", but we need to apply the same
192  * ordering as for stop or demote (see get_router_node()).
193  */
194  task = stop_rsc;
195  }
196 
197  switch (task) {
198  case start_rsc:
199  case action_promote:
200  order_opts = pe_order_none;
201 
202  if (state == remote_state_failed) {
203  /* Force recovery, by making this action required */
205  }
206 
207  /* Ensure connection is up before running this action */
208  order_start_then_action(remote_rsc, action, order_opts,
209  remote_rsc->cluster);
210  break;
211 
212  case stop_rsc:
213  if (state == remote_state_alive) {
214  order_action_then_stop(action, remote_rsc,
216  remote_rsc->cluster);
217 
218  } else if (state == remote_state_failed) {
219  /* The resource is active on the node, but since we don't have a
220  * valid connection, the only way to stop the resource is by
221  * fencing the node. There is no need to order the stop relative
222  * to the remote connection, since the stop will become implied
223  * by the fencing.
224  */
225  pe_fence_node(remote_rsc->cluster, action->node,
226  "resources are active but connection is unrecoverable",
227  FALSE);
228 
229  } else if (remote_rsc->next_role == RSC_ROLE_STOPPED) {
230  /* State must be remote_state_unknown or remote_state_stopped.
231  * Since the connection is not coming back up in this
232  * transition, stop this resource first.
233  */
234  order_action_then_stop(action, remote_rsc,
236  remote_rsc->cluster);
237 
238  } else {
239  /* The connection is going to be started somewhere else, so
240  * stop this resource after that completes.
241  */
242  order_start_then_action(remote_rsc, action, pe_order_none,
243  remote_rsc->cluster);
244  }
245  break;
246 
247  case action_demote:
248  /* Only order this demote relative to the connection start if the
249  * connection isn't being torn down. Otherwise, the demote would be
250  * blocked because the connection start would not be allowed.
251  */
252  if ((state == remote_state_resting)
253  || (state == remote_state_unknown)) {
254 
255  order_start_then_action(remote_rsc, action, pe_order_none,
256  remote_rsc->cluster);
257  } /* Otherwise we can rely on the stop ordering */
258  break;
259 
260  default:
261  /* Wait for the connection resource to be up */
263  /* In case we ever get the recovery logic wrong, force
264  * recurring monitors to be restarted, even if just
265  * the connection was re-established
266  */
267  order_start_then_action(remote_rsc, action,
269  remote_rsc->cluster);
270 
271  } else {
272  pe_node_t *cluster_node = pe__current_node(remote_rsc);
273 
274  if ((task == monitor_rsc) && (state == remote_state_failed)) {
275  /* We would only be here if we do not know the state of the
276  * resource on the remote node. Since we have no way to find
277  * out, it is necessary to fence the node.
278  */
279  pe_fence_node(remote_rsc->cluster, action->node,
280  "resources are in unknown state "
281  "and connection is unrecoverable", FALSE);
282  }
283 
284  if ((cluster_node != NULL) && (state == remote_state_stopped)) {
285  /* The connection is currently up, but is going down
286  * permanently. Make sure we check services are actually
287  * stopped _before_ we let the connection get closed.
288  */
289  order_action_then_stop(action, remote_rsc,
291  remote_rsc->cluster);
292 
293  } else {
294  order_start_then_action(remote_rsc, action, pe_order_none,
295  remote_rsc->cluster);
296  }
297  }
298  break;
299  }
300 }
301 
302 static void
303 apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
304 {
305  /* VMs are also classified as containers for these purposes... in
306  * that they both involve a 'thing' running on a real or remote
307  * cluster node.
308  *
309  * This allows us to be smarter about the type and extent of
310  * recovery actions required in various scenarios
311  */
312  pe_resource_t *remote_rsc = NULL;
313  pe_resource_t *container = NULL;
314  enum action_tasks task = text2task(action->task);
315 
316  CRM_ASSERT(action->rsc != NULL);
317  CRM_ASSERT(action->node != NULL);
319 
320  remote_rsc = action->node->details->remote_rsc;
321  CRM_ASSERT(remote_rsc != NULL);
322 
323  container = remote_rsc->container;
324  CRM_ASSERT(container != NULL);
325 
326  if (pcmk_is_set(container->flags, pe_rsc_failed)) {
327  pe_fence_node(data_set, action->node, "container failed", FALSE);
328  }
329 
330  crm_trace("Order %s action %s relative to %s%s for %s%s",
331  action->task, action->uuid,
332  pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
333  remote_rsc->id,
334  pcmk_is_set(container->flags, pe_rsc_failed)? "failed " : "",
335  container->id);
336 
338  CRMD_ACTION_MIGRATED, NULL)) {
339  /* Migration ops map to "no_action", but we need to apply the same
340  * ordering as for stop or demote (see get_router_node()).
341  */
342  task = stop_rsc;
343  }
344 
345  switch (task) {
346  case start_rsc:
347  case action_promote:
348  // Force resource recovery if the container is recovered
349  order_start_then_action(container, action, pe_order_implies_then,
350  data_set);
351 
352  // Wait for the connection resource to be up, too
353  order_start_then_action(remote_rsc, action, pe_order_none,
354  data_set);
355  break;
356 
357  case stop_rsc:
358  case action_demote:
359  if (pcmk_is_set(container->flags, pe_rsc_failed)) {
360  /* When the container representing a guest node fails, any stop
361  * or demote actions for resources running on the guest node
362  * are implied by the container stopping. This is similar to
363  * how fencing operations work for cluster nodes and remote
364  * nodes.
365  */
366  } else {
367  /* Ensure the operation happens before the connection is brought
368  * down.
369  *
370  * If we really wanted to, we could order these after the
371  * connection start, IFF the container's current role was
372  * stopped (otherwise we re-introduce an ordering loop when the
373  * connection is restarting).
374  */
375  order_action_then_stop(action, remote_rsc, pe_order_none,
376  data_set);
377  }
378  break;
379 
380  default:
381  /* Wait for the connection resource to be up */
383  /* In case we ever get the recovery logic wrong, force
384  * recurring monitors to be restarted, even if just
385  * the connection was re-established
386  */
387  if(task != no_action) {
388  order_start_then_action(remote_rsc, action,
390  }
391  } else {
392  order_start_then_action(remote_rsc, action, pe_order_none,
393  data_set);
394  }
395  break;
396  }
397 }
398 
405 void
407 {
409  return;
410  }
411 
412  crm_trace("Creating remote connection orderings");
413 
414  for (GList *gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
415  pe_action_t *action = (pe_action_t *) gIter->data;
416  pe_resource_t *remote = NULL;
417 
418  // We are only interested in resource actions
419  if (action->rsc == NULL) {
420  continue;
421  }
422 
423  /* Special case: If we are clearing the failcount of an actual
424  * remote connection resource, then make sure this happens before
425  * any start of the resource in this transition.
426  */
427  if (action->rsc->is_remote_node &&
428  pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
429 
430  pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
431  pcmk__op_key(action->rsc->id, RSC_START, 0),
432  NULL, pe_order_optional, data_set);
433 
434  continue;
435  }
436 
437  // We are only interested in actions allocated to a node
438  if (action->node == NULL) {
439  continue;
440  }
441 
442  if (!pe__is_guest_or_remote_node(action->node)) {
443  continue;
444  }
445 
446  /* We are only interested in real actions.
447  *
448  * @TODO This is probably wrong; pseudo-actions might be converted to
449  * real actions and vice versa later in update_actions() at the end of
450  * pcmk__apply_orderings().
451  */
452  if (pcmk_is_set(action->flags, pe_action_pseudo)) {
453  continue;
454  }
455 
456  remote = action->node->details->remote_rsc;
457  if (remote == NULL) {
458  // Orphaned
459  continue;
460  }
461 
462  /* Another special case: if a resource is moving to a Pacemaker Remote
463  * node, order the stop on the original node after any start of the
464  * remote connection. This ensures that if the connection fails to
465  * start, we leave the resource running on the original node.
466  */
467  if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
468  for (GList *item = action->rsc->actions; item != NULL;
469  item = item->next) {
470  pe_action_t *rsc_action = item->data;
471 
472  if ((rsc_action->node->details != action->node->details)
473  && pcmk__str_eq(rsc_action->task, RSC_STOP, pcmk__str_casei)) {
474  pcmk__new_ordering(remote, start_key(remote), NULL,
475  action->rsc, NULL, rsc_action,
477  }
478  }
479  }
480 
481  /* The action occurs across a remote connection, so create
482  * ordering constraints that guarantee the action occurs while the node
483  * is active (after start, before stop ... things like that).
484  *
485  * This is somewhat brittle in that we need to make sure the results of
486  * this ordering are compatible with the result of get_router_node().
487  * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
488  * of this logic rather than create_graph_action().
489  */
490  if (remote->container) {
491  crm_trace("Container ordering for %s", action->uuid);
492  apply_container_ordering(action, data_set);
493 
494  } else {
495  crm_trace("Remote ordering for %s", action->uuid);
496  apply_remote_ordering(action);
497  }
498  }
499 }
500 
509 bool
511 {
512  return pe__is_remote_node(node) && (node->details->remote_rsc != NULL)
513  && (get_remote_node_state(node) == remote_state_failed);
514 }
515 
526 bool
528 {
529  return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
530  && (node->details->remote_rsc != NULL)
531  && (node->details->remote_rsc->container == rsc);
532 }
533 
548 pe_node_t *
550 {
551  pe_node_t *began_on = NULL;
552  pe_node_t *ended_on = NULL;
553  bool partial_migration = false;
554  const char *task = action->task;
555 
556  if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)
557  || !pe__is_guest_or_remote_node(action->node)) {
558  return NULL;
559  }
560 
561  CRM_ASSERT(action->node->details->remote_rsc != NULL);
562 
563  began_on = pe__current_node(action->node->details->remote_rsc);
564  ended_on = action->node->details->remote_rsc->allocated_to;
565  if (action->node->details->remote_rsc
566  && (action->node->details->remote_rsc->container == NULL)
567  && action->node->details->remote_rsc->partial_migration_target) {
568  partial_migration = true;
569  }
570 
571  if (began_on == NULL) {
572  crm_trace("Routing %s for %s through remote connection's "
573  "next node %s (starting)%s",
574  action->task, (action->rsc? action->rsc->id : "no resource"),
575  (ended_on? ended_on->details->uname : "none"),
576  partial_migration? " (partial migration)" : "");
577  return ended_on;
578  }
579 
580  if (ended_on == NULL) {
581  crm_trace("Routing %s for %s through remote connection's "
582  "current node %s (stopping)%s",
583  action->task, (action->rsc? action->rsc->id : "no resource"),
584  (began_on? began_on->details->uname : "none"),
585  partial_migration? " (partial migration)" : "");
586  return began_on;
587  }
588 
589  if (began_on->details == ended_on->details) {
590  crm_trace("Routing %s for %s through remote connection's "
591  "current node %s (not moving)%s",
592  action->task, (action->rsc? action->rsc->id : "no resource"),
593  (began_on? began_on->details->uname : "none"),
594  partial_migration? " (partial migration)" : "");
595  return began_on;
596  }
597 
598  /* If we get here, the remote connection is moving during this transition.
599  * This means some actions for resources behind the connection will get
600  * routed through the cluster node the connection resource is currently on,
601  * and others are routed through the cluster node the connection will end up
602  * on.
603  */
604 
605  if (pcmk__str_eq(task, "notify", pcmk__str_casei)) {
606  task = g_hash_table_lookup(action->meta, "notify_operation");
607  }
608 
609  /*
610  * Stop, demote, and migration actions must occur before the connection can
611  * move (these actions are required before the remote resource can stop). In
612  * this case, we know these actions have to be routed through the initial
613  * cluster node the connection resource lived on before the move takes
614  * place.
615  *
616  * The exception is a partial migration of a (non-guest) remote connection
617  * resource; in that case, all actions (even these) will be ordered after
618  * the connection's pseudo-start on the migration target, so the target is
619  * the router node.
620  */
621  if (pcmk__strcase_any_of(task, "cancel", "stop", "demote", "migrate_from",
622  "migrate_to", NULL) && !partial_migration) {
623  crm_trace("Routing %s for %s through remote connection's "
624  "current node %s (moving)%s",
625  action->task, (action->rsc? action->rsc->id : "no resource"),
626  (began_on? began_on->details->uname : "none"),
627  partial_migration? " (partial migration)" : "");
628  return began_on;
629  }
630 
631  /* Everything else (start, promote, monitor, probe, refresh,
632  * clear failcount, delete, ...) must occur after the connection starts on
633  * the node it is moving to.
634  */
635  crm_trace("Routing %s for %s through remote connection's "
636  "next node %s (moving)%s",
637  action->task, (action->rsc? action->rsc->id : "no resource"),
638  (ended_on? ended_on->details->uname : "none"),
639  partial_migration? " (partial migration)" : "");
640  return ended_on;
641 }
642 
655 void
657 {
658  const char *remote_addr = g_hash_table_lookup(params,
660 
661  if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
662  GHashTable *base = pe_rsc_params(rsc, NULL, rsc->cluster);
663 
664  remote_addr = g_hash_table_lookup(base, XML_RSC_ATTR_REMOTE_RA_ADDR);
665  if (remote_addr != NULL) {
666  g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
667  strdup(remote_addr));
668  }
669  }
670 }
671 
683 void
685 {
686  const pe_node_t *host = NULL;
687  enum action_tasks task;
688 
689  if (!pe__is_guest_node(action->node)) {
690  return;
691  }
692 
693  task = text2task(action->task);
694  if ((task == action_notify) || (task == action_notified)) {
695  task = text2task(g_hash_table_lookup(action->meta, "notify_operation"));
696  }
697 
698  switch (task) {
699  case stop_rsc:
700  case stopped_rsc:
701  case action_demote:
702  case action_demoted:
703  // "Down" actions take place on guest's current host
704  host = pe__current_node(action->node->details->remote_rsc->container);
705  break;
706 
707  case start_rsc:
708  case started_rsc:
709  case monitor_rsc:
710  case action_promote:
711  case action_promoted:
712  // "Up" actions take place on guest's next host
713  host = action->node->details->remote_rsc->container->allocated_to;
714  break;
715 
716  default:
717  break;
718  }
719 
720  if (host != NULL) {
722  (gpointer) g_hash_table_lookup(action->rsc->meta,
724  (gpointer) args_xml);
726  (gpointer) host->details->uname,
727  (gpointer) args_xml);
728  }
729 }
pcmk__cpg_host_t host
Definition: cpg.c:49
#define RSC_STOP
Definition: crm.h:202
A dumping ground.
bool pcmk__is_failed_remote_node(const pe_node_t *node)
#define CRMD_ACTION_MIGRATED
Definition: crm.h:172
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition: remote.c:41
void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action)
Add special bundle meta-attributes to XML.
#define CRM_OP_FENCE
Definition: crm.h:144
pe_node_t * pcmk__connection_host_for_action(const pe_action_t *action)
pe_resource_t * container
Definition: pe_types.h:412
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:933
pe_node_t * partial_migration_source
Definition: pe_types.h:397
pe_resource_t * rsc
Definition: pe_types.h:433
enum rsc_role_e next_role
Definition: pe_types.h:403
pe_resource_t * remote_rsc
Definition: pe_types.h:253
bool pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc, const pe_node_t *node)
enum action_tasks text2task(const char *task)
Definition: common.c:349
GList * actions
Definition: pe_types.h:187
pe_node_t * partial_migration_target
Definition: pe_types.h:396
#define RSC_START
Definition: crm.h:199
pe_node_t * allocated_to
Definition: pe_types.h:395
gboolean remote_was_fenced
Definition: pe_types.h:248
#define XML_RSC_ATTR_TARGET
Definition: msg_xml.h:237
#define XML_RSC_ATTR_REMOTE_RA_ADDR
Definition: msg_xml.h:263
bool pe__shutdown_requested(const pe_node_t *node)
Definition: utils.c:700
const char * action
Definition: pcmk_fence.c:30
void pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
bool pe__is_remote_node(const pe_node_t *node)
Definition: remote.c:25
#define CRM_OP_CLEAR_FAILCOUNT
Definition: crm.h:153
#define pe_flag_have_remote_nodes
Definition: pe_types.h:134
guint remote_reconnect_ms
Definition: pe_types.h:370
#define pe_rsc_failed
Definition: pe_types.h:292
bool pe__is_guest_node(const pe_node_t *node)
Definition: remote.c:33
#define stop_key(rsc)
Definition: internal.h:405
char * task
Definition: pe_types.h:437
#define crm_trace(fmt, args...)
Definition: logging.h:383
void pe_fence_node(pe_working_set_t *data_set, pe_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition: unpack.c:113
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:121
struct pe_node_shared_s * details
Definition: pe_types.h:268
pe_node_t * node
Definition: pe_types.h:434
unsigned long long flags
Definition: pe_types.h:373
const char * uname
Definition: pe_types.h:232
pe_working_set_t * data_set
Wrappers for and extensions to libxml2.
G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task, pe_action_t *first_action, pe_resource_t *then_rsc, char *then_task, pe_action_t *then_action, uint32_t flags, pe_working_set_t *data_set)
remote_connection_state
G_GNUC_INTERNAL bool pcmk__action_is_recurring(const pe_action_t *action)
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:42
GList * fillers
Definition: pe_types.h:413
Cluster status and scheduling.
#define CRM_ASSERT(expr)
Definition: results.h:42
Cluster Configuration.
GHashTable * pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node, pe_working_set_t *data_set)
Get a table of resource parameters.
Definition: complex.c:436
#define CRMD_ACTION_MIGRATE
Definition: crm.h:171
void hash2metafield(gpointer key, gpointer value, gpointer user_data)
Set XML attribute based on hash table entry, as meta-attribute name.
Definition: nvpair.c:777
void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params)
GList * running_on
Definition: pe_types.h:398
pe_working_set_t * cluster
Definition: pe_types.h:353
#define pe__set_order_flags(order_flags, flags_to_set)
Definition: internal.h:141
#define start_key(rsc)
Definition: internal.h:411
unsigned long long flags
Definition: pe_types.h:169
#define PCMK__ENV_PHYSICAL_HOST
gboolean unclean
Definition: pe_types.h:240
gboolean online
Definition: pe_types.h:236
action_tasks
Definition: common.h:61
char * id
Definition: pe_types.h:347