pacemaker  2.1.9-49aab99839
Scalable High-Availability cluster resource manager
pcmk_sched_remote.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2024 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <sys/param.h>
13 
14 #include <crm/crm.h>
15 #include <crm/cib.h>
16 #include <crm/common/xml.h>
18 
19 #include <glib.h>
20 
21 #include <crm/pengine/status.h>
22 #include <pacemaker-internal.h>
23 #include "libpacemaker_private.h"
24 
31 };
32 
33 static const char *
34 state2text(enum remote_connection_state state)
35 {
36  switch (state) {
38  return "unknown";
39  case remote_state_alive:
40  return "alive";
42  return "resting";
44  return "failed";
46  return "stopped";
47  }
48 
49  return "impossible";
50 }
51 
52 /* We always use pcmk__ar_guest_allowed with these convenience functions to
53  * exempt internally generated constraints from the prohibition of user
54  * constraints involving remote connection resources.
55  *
56  * The start ordering additionally uses pcmk__ar_unrunnable_first_blocks so that
57  * the specified action is not runnable if the start is not runnable.
58  */
59 
60 static inline void
61 order_start_then_action(pcmk_resource_t *first_rsc, pcmk_action_t *then_action,
62  uint32_t extra)
63 {
64  if ((first_rsc != NULL) && (then_action != NULL)) {
65  pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
66  then_action->rsc, NULL, then_action,
69  |extra,
70  first_rsc->cluster);
71  }
72 }
73 
74 static inline void
75 order_action_then_stop(pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
76  uint32_t extra)
77 {
78  if ((first_action != NULL) && (then_rsc != NULL)) {
79  pcmk__new_ordering(first_action->rsc, NULL, first_action,
80  then_rsc, stop_key(then_rsc), NULL,
81  pcmk__ar_guest_allowed|extra, then_rsc->cluster);
82  }
83 }
84 
85 static enum remote_connection_state
86 get_remote_node_state(const pcmk_node_t *node)
87 {
88  const pcmk_resource_t *remote_rsc = NULL;
89  const pcmk_node_t *cluster_node = NULL;
90 
91  pcmk__assert(node != NULL);
92 
93  remote_rsc = node->details->remote_rsc;
94  pcmk__assert(remote_rsc != NULL);
95 
96  cluster_node = pcmk__current_node(remote_rsc);
97 
98  /* If the cluster node the remote connection resource resides on
99  * is unclean or went offline, we can't process any operations
100  * on that remote node until after it starts elsewhere.
101  */
102  if ((remote_rsc->next_role == pcmk_role_stopped)
103  || (remote_rsc->allocated_to == NULL)) {
104 
105  // The connection resource is not going to run anywhere
106 
107  if ((cluster_node != NULL) && cluster_node->details->unclean) {
108  /* The remote connection is failed because its resource is on a
109  * failed node and can't be recovered elsewhere, so we must fence.
110  */
111  return remote_state_failed;
112  }
113 
114  if (!pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)) {
115  /* Connection resource is cleanly stopped */
116  return remote_state_stopped;
117  }
118 
119  /* Connection resource is failed */
120 
121  if ((remote_rsc->next_role == pcmk_role_stopped)
122  && remote_rsc->remote_reconnect_ms
123  && node->details->remote_was_fenced
124  && !pe__shutdown_requested(node)) {
125 
126  /* We won't know whether the connection is recoverable until the
127  * reconnect interval expires and we reattempt connection.
128  */
129  return remote_state_unknown;
130  }
131 
132  /* The remote connection is in a failed state. If there are any
133  * resources known to be active on it (stop) or in an unknown state
134  * (probe), we must assume the worst and fence it.
135  */
136  return remote_state_failed;
137 
138  } else if (cluster_node == NULL) {
139  /* Connection is recoverable but not currently running anywhere, so see
140  * if we can recover it first
141  */
142  return remote_state_unknown;
143 
144  } else if (cluster_node->details->unclean
145  || !(cluster_node->details->online)) {
146  // Connection is running on a dead node, see if we can recover it first
147  return remote_state_resting;
148 
149  } else if (pcmk__list_of_multiple(remote_rsc->running_on)
150  && (remote_rsc->partial_migration_source != NULL)
151  && (remote_rsc->partial_migration_target != NULL)) {
152  /* We're in the middle of migrating a connection resource, so wait until
153  * after the migration completes before performing any actions.
154  */
155  return remote_state_resting;
156 
157  }
158  return remote_state_alive;
159 }
160 
167 static void
168 apply_remote_ordering(pcmk_action_t *action)
169 {
170  pcmk_resource_t *remote_rsc = NULL;
171  enum action_tasks task = pcmk_parse_action(action->task);
172  enum remote_connection_state state = get_remote_node_state(action->node);
173 
174  uint32_t order_opts = pcmk__ar_none;
175 
176  if (action->rsc == NULL) {
177  return;
178  }
179 
180  pcmk__assert(pcmk__is_pacemaker_remote_node(action->node));
181 
182  remote_rsc = action->node->details->remote_rsc;
183  pcmk__assert(remote_rsc != NULL);
184 
185  crm_trace("Order %s action %s relative to %s%s (state: %s)",
186  action->task, action->uuid,
187  pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
188  remote_rsc->id, state2text(state));
189 
191  PCMK_ACTION_MIGRATE_FROM, NULL)) {
192  /* Migration ops map to pcmk_action_unspecified, but we need to apply
193  * the same ordering as for stop or demote (see get_router_node()).
194  */
195  task = pcmk_action_stop;
196  }
197 
198  switch (task) {
199  case pcmk_action_start:
200  case pcmk_action_promote:
201  order_opts = pcmk__ar_none;
202 
203  if (state == remote_state_failed) {
204  /* Force recovery, by making this action required */
205  pcmk__set_relation_flags(order_opts,
207  }
208 
209  /* Ensure connection is up before running this action */
210  order_start_then_action(remote_rsc, action, order_opts);
211  break;
212 
213  case pcmk_action_stop:
214  if (state == remote_state_alive) {
215  order_action_then_stop(action, remote_rsc,
217 
218  } else if (state == remote_state_failed) {
219  /* The resource is active on the node, but since we don't have a
220  * valid connection, the only way to stop the resource is by
221  * fencing the node. There is no need to order the stop relative
222  * to the remote connection, since the stop will become implied
223  * by the fencing.
224  */
225  pe_fence_node(remote_rsc->cluster, action->node,
226  "resources are active but "
227  "connection is unrecoverable",
228  FALSE);
229 
230  } else if (remote_rsc->next_role == pcmk_role_stopped) {
231  /* State must be remote_state_unknown or remote_state_stopped.
232  * Since the connection is not coming back up in this
233  * transition, stop this resource first.
234  */
235  order_action_then_stop(action, remote_rsc,
237 
238  } else {
239  /* The connection is going to be started somewhere else, so
240  * stop this resource after that completes.
241  */
242  order_start_then_action(remote_rsc, action, pcmk__ar_none);
243  }
244  break;
245 
246  case pcmk_action_demote:
247  /* Only order this demote relative to the connection start if the
248  * connection isn't being torn down. Otherwise, the demote would be
249  * blocked because the connection start would not be allowed.
250  */
251  if ((state == remote_state_resting)
252  || (state == remote_state_unknown)) {
253 
254  order_start_then_action(remote_rsc, action, pcmk__ar_none);
255  } /* Otherwise we can rely on the stop ordering */
256  break;
257 
258  default:
259  /* Wait for the connection resource to be up */
261  /* In case we ever get the recovery logic wrong, force
262  * recurring monitors to be restarted, even if just
263  * the connection was re-established
264  */
265  order_start_then_action(remote_rsc, action,
267 
268  } else {
269  pcmk_node_t *cluster_node = pcmk__current_node(remote_rsc);
270 
271  if ((task == pcmk_action_monitor) && (state == remote_state_failed)) {
272  /* We would only be here if we do not know the state of the
273  * resource on the remote node. Since we have no way to find
274  * out, it is necessary to fence the node.
275  */
276  pe_fence_node(remote_rsc->cluster, action->node,
277  "resources are in unknown state "
278  "and connection is unrecoverable", FALSE);
279  }
280 
281  if ((cluster_node != NULL) && (state == remote_state_stopped)) {
282  /* The connection is currently up, but is going down
283  * permanently. Make sure we check services are actually
284  * stopped _before_ we let the connection get closed.
285  */
286  order_action_then_stop(action, remote_rsc,
288 
289  } else {
290  order_start_then_action(remote_rsc, action, pcmk__ar_none);
291  }
292  }
293  break;
294  }
295 }
296 
297 static void
298 apply_container_ordering(pcmk_action_t *action)
299 {
300  /* VMs are also classified as containers for these purposes... in
301  * that they both involve a 'thing' running on a real or remote
302  * cluster node.
303  *
304  * This allows us to be smarter about the type and extent of
305  * recovery actions required in various scenarios
306  */
307  pcmk_resource_t *remote_rsc = NULL;
308  pcmk_resource_t *container = NULL;
309  enum action_tasks task = pcmk_parse_action(action->task);
310 
311  pcmk__assert(action->rsc != NULL);
312  pcmk__assert(action->node != NULL);
313  pcmk__assert(pcmk__is_pacemaker_remote_node(action->node));
314 
315  remote_rsc = action->node->details->remote_rsc;
316  pcmk__assert(remote_rsc != NULL);
317 
318  container = remote_rsc->container;
319  pcmk__assert(container != NULL);
320 
321  if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
322  pe_fence_node(action->rsc->cluster, action->node, "container failed",
323  FALSE);
324  }
325 
326  crm_trace("Order %s action %s relative to %s%s for %s%s",
327  action->task, action->uuid,
328  pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
329  remote_rsc->id,
330  pcmk_is_set(container->flags, pcmk_rsc_failed)? "failed " : "",
331  container->id);
332 
334  PCMK_ACTION_MIGRATE_FROM, NULL)) {
335  /* Migration ops map to pcmk_action_unspecified, but we need to apply
336  * the same ordering as for stop or demote (see get_router_node()).
337  */
338  task = pcmk_action_stop;
339  }
340 
341  switch (task) {
342  case pcmk_action_start:
343  case pcmk_action_promote:
344  // Force resource recovery if the container is recovered
345  order_start_then_action(container, action,
347 
348  // Wait for the connection resource to be up, too
349  order_start_then_action(remote_rsc, action, pcmk__ar_none);
350  break;
351 
352  case pcmk_action_stop:
353  case pcmk_action_demote:
354  if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
355  /* When the container representing a guest node fails, any stop
356  * or demote actions for resources running on the guest node
357  * are implied by the container stopping. This is similar to
358  * how fencing operations work for cluster nodes and remote
359  * nodes.
360  */
361  } else {
362  /* Ensure the operation happens before the connection is brought
363  * down.
364  *
365  * If we really wanted to, we could order these after the
366  * connection start, IFF the container's current role was
367  * stopped (otherwise we re-introduce an ordering loop when the
368  * connection is restarting).
369  */
370  order_action_then_stop(action, remote_rsc, pcmk__ar_none);
371  }
372  break;
373 
374  default:
375  /* Wait for the connection resource to be up */
377  /* In case we ever get the recovery logic wrong, force
378  * recurring monitors to be restarted, even if just
379  * the connection was re-established
380  */
381  if (task != pcmk_action_unspecified) {
382  order_start_then_action(remote_rsc, action,
384  }
385  } else {
386  order_start_then_action(remote_rsc, action, pcmk__ar_none);
387  }
388  break;
389  }
390 }
391 
398 void
400 {
402  return;
403  }
404 
405  crm_trace("Creating remote connection orderings");
406 
407  for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
408  pcmk_action_t *action = iter->data;
409  pcmk_resource_t *remote = NULL;
410 
411  // We are only interested in resource actions
412  if (action->rsc == NULL) {
413  continue;
414  }
415 
416  /* Special case: If we are clearing the failcount of an actual
417  * remote connection resource, then make sure this happens before
418  * any start of the resource in this transition.
419  */
420  if (action->rsc->is_remote_node &&
421  pcmk__str_eq(action->task, PCMK_ACTION_CLEAR_FAILCOUNT,
422  pcmk__str_none)) {
423 
424  pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
426  0),
427  NULL, pcmk__ar_ordered, scheduler);
428 
429  continue;
430  }
431 
432  // We are only interested in actions assigned to a node
433  if (action->node == NULL) {
434  continue;
435  }
436 
437  if (!pcmk__is_pacemaker_remote_node(action->node)) {
438  continue;
439  }
440 
441  /* We are only interested in real actions.
442  *
443  * @TODO This is probably wrong; pseudo-actions might be converted to
444  * real actions and vice versa later in update_actions() at the end of
445  * pcmk__apply_orderings().
446  */
447  if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
448  continue;
449  }
450 
451  remote = action->node->details->remote_rsc;
452  if (remote == NULL) {
453  // Orphaned
454  continue;
455  }
456 
457  /* Another special case: if a resource is moving to a Pacemaker Remote
458  * node, order the stop on the original node after any start of the
459  * remote connection. This ensures that if the connection fails to
460  * start, we leave the resource running on the original node.
461  */
462  if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none)) {
463  for (GList *item = action->rsc->actions; item != NULL;
464  item = item->next) {
465  pcmk_action_t *rsc_action = item->data;
466 
467  if (!pcmk__same_node(rsc_action->node, action->node)
468  && pcmk__str_eq(rsc_action->task, PCMK_ACTION_STOP,
469  pcmk__str_none)) {
470  pcmk__new_ordering(remote, start_key(remote), NULL,
471  action->rsc, NULL, rsc_action,
473  }
474  }
475  }
476 
477  /* The action occurs across a remote connection, so create
478  * ordering constraints that guarantee the action occurs while the node
479  * is active (after start, before stop ... things like that).
480  *
481  * This is somewhat brittle in that we need to make sure the results of
482  * this ordering are compatible with the result of get_router_node().
483  * It would probably be better to add PCMK__XA_ROUTER_NODE as part of
484  * this logic rather than create_graph_action().
485  */
486  if (remote->container) {
487  crm_trace("Container ordering for %s", action->uuid);
488  apply_container_ordering(action);
489 
490  } else {
491  crm_trace("Remote ordering for %s", action->uuid);
492  apply_remote_ordering(action);
493  }
494  }
495 }
496 
505 bool
507 {
508  return pcmk__is_remote_node(node) && (node->details->remote_rsc != NULL)
509  && (get_remote_node_state(node) == remote_state_failed);
510 }
511 
522 bool
524  const pcmk_node_t *node)
525 {
526  return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
527  && (node->details->remote_rsc != NULL)
528  && (node->details->remote_rsc->container == rsc);
529 }
530 
545 pcmk_node_t *
547 {
548  pcmk_node_t *began_on = NULL;
549  pcmk_node_t *ended_on = NULL;
550  bool partial_migration = false;
551  const char *task = action->task;
552 
553  if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_none)
554  || !pcmk__is_pacemaker_remote_node(action->node)) {
555  return NULL;
556  }
557 
558  pcmk__assert(action->node->details->remote_rsc != NULL);
559 
560  began_on = pcmk__current_node(action->node->details->remote_rsc);
561  ended_on = action->node->details->remote_rsc->allocated_to;
562  if (action->node->details->remote_rsc
563  && (action->node->details->remote_rsc->container == NULL)
564  && action->node->details->remote_rsc->partial_migration_target) {
565  partial_migration = true;
566  }
567 
568  if (began_on == NULL) {
569  crm_trace("Routing %s for %s through remote connection's "
570  "next node %s (starting)%s",
571  action->task, (action->rsc? action->rsc->id : "no resource"),
572  (ended_on? ended_on->details->uname : "none"),
573  partial_migration? " (partial migration)" : "");
574  return ended_on;
575  }
576 
577  if (ended_on == NULL) {
578  crm_trace("Routing %s for %s through remote connection's "
579  "current node %s (stopping)%s",
580  action->task, (action->rsc? action->rsc->id : "no resource"),
581  (began_on? began_on->details->uname : "none"),
582  partial_migration? " (partial migration)" : "");
583  return began_on;
584  }
585 
586  if (pcmk__same_node(began_on, ended_on)) {
587  crm_trace("Routing %s for %s through remote connection's "
588  "current node %s (not moving)%s",
589  action->task, (action->rsc? action->rsc->id : "no resource"),
590  (began_on? began_on->details->uname : "none"),
591  partial_migration? " (partial migration)" : "");
592  return began_on;
593  }
594 
595  /* If we get here, the remote connection is moving during this transition.
596  * This means some actions for resources behind the connection will get
597  * routed through the cluster node the connection resource is currently on,
598  * and others are routed through the cluster node the connection will end up
599  * on.
600  */
601 
602  if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
603  task = g_hash_table_lookup(action->meta, "notify_operation");
604  }
605 
606  /*
607  * Stop, demote, and migration actions must occur before the connection can
608  * move (these actions are required before the remote resource can stop). In
609  * this case, we know these actions have to be routed through the initial
610  * cluster node the connection resource lived on before the move takes
611  * place.
612  *
613  * The exception is a partial migration of a (non-guest) remote connection
614  * resource; in that case, all actions (even these) will be ordered after
615  * the connection's pseudo-start on the migration target, so the target is
616  * the router node.
617  */
621  && !partial_migration) {
622  crm_trace("Routing %s for %s through remote connection's "
623  "current node %s (moving)%s",
624  action->task, (action->rsc? action->rsc->id : "no resource"),
625  (began_on? began_on->details->uname : "none"),
626  partial_migration? " (partial migration)" : "");
627  return began_on;
628  }
629 
630  /* Everything else (start, promote, monitor, probe, refresh,
631  * clear failcount, delete, ...) must occur after the connection starts on
632  * the node it is moving to.
633  */
634  crm_trace("Routing %s for %s through remote connection's "
635  "next node %s (moving)%s",
636  action->task, (action->rsc? action->rsc->id : "no resource"),
637  (ended_on? ended_on->details->uname : "none"),
638  partial_migration? " (partial migration)" : "");
639  return ended_on;
640 }
641 
654 void
656 {
657  const char *remote_addr = g_hash_table_lookup(params, PCMK_REMOTE_RA_ADDR);
658 
659  if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
660  GHashTable *base = pe_rsc_params(rsc, NULL, rsc->cluster);
661 
662  remote_addr = g_hash_table_lookup(base, PCMK_REMOTE_RA_ADDR);
663  if (remote_addr != NULL) {
664  pcmk__insert_dup(params, PCMK_REMOTE_RA_ADDR, remote_addr);
665  }
666  }
667 }
668 
684 void
686 {
687  const pcmk_node_t *guest = action->node;
688  const pcmk_node_t *host = NULL;
689  enum action_tasks task;
690 
691  if (!pcmk__is_guest_or_bundle_node(guest)) {
692  return;
693  }
694 
695  task = pcmk_parse_action(action->task);
696  if ((task == pcmk_action_notify) || (task == pcmk_action_notified)) {
697  task = pcmk_parse_action(g_hash_table_lookup(action->meta,
698  "notify_operation"));
699  }
700 
701  switch (task) {
702  case pcmk_action_stop:
703  case pcmk_action_stopped:
704  case pcmk_action_demote:
705  case pcmk_action_demoted:
706  // "Down" actions take place on guest's current host
707  host = pcmk__current_node(guest->details->remote_rsc->container);
708  break;
709 
710  case pcmk_action_start:
711  case pcmk_action_started:
712  case pcmk_action_monitor:
713  case pcmk_action_promote:
715  // "Up" actions take place on guest's next host
717  break;
718 
719  default:
720  break;
721  }
722 
723  if (host != NULL) {
724  gpointer target =
725  g_hash_table_lookup(action->rsc->meta,
727 
729  target,
730  (gpointer) args_xml);
732  (gpointer) host->details->uname,
733  (gpointer) args_xml);
734  }
735 }
pcmk__cpg_host_t host
Definition: cpg.c:52
bool pe__shutdown_requested(const pcmk_node_t *node)
Definition: utils.c:677
A dumping ground.
&#39;then&#39; is runnable (and migratable) only if &#39;first&#39; is runnable
pcmk_scheduler_t * cluster
Definition: resources.h:408
enum action_tasks pcmk_parse_action(const char *action_name)
Parse an action type from an action name.
Definition: actions.c:92
pcmk_node_t * partial_migration_target
Definition: resources.h:450
G_GNUC_INTERNAL void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task, pcmk_action_t *first_action, pcmk_resource_t *then_rsc, char *then_task, pcmk_action_t *then_action, uint32_t flags, pcmk_scheduler_t *sched)
Stopped.
Definition: roles.h:36
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:1038
#define PCMK_META_CONTAINER_ATTRIBUTE_TARGET
Definition: options.h:85
enum rsc_role_e next_role
Definition: resources.h:465
#define PCMK_REMOTE_RA_ADDR
Definition: options.h:122
#define PCMK_ACTION_MIGRATE_TO
Definition: actions.h:59
action_tasks
Definition: actions.h:83
void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition: unpack.c:112
pcmk_node_t * pcmk__connection_host_for_action(const pcmk_action_t *action)
GList * actions
Definition: scheduler.h:239
#define PCMK_ACTION_CLEAR_FAILCOUNT
Definition: actions.h:46
G_GNUC_INTERNAL bool pcmk__action_is_recurring(const pcmk_action_t *action)
pcmk_resource_t * container
Definition: resources.h:476
gboolean remote_was_fenced
Definition: nodes.h:119
Ordering applies even if &#39;first&#39; runs on guest node created by &#39;then&#39;.
#define pcmk__set_relation_flags(ar_flags, flags_to_set)
const char * action
Definition: pcmk_fence.c:30
#define PCMK_ACTION_DEMOTE
Definition: actions.h:49
guint remote_reconnect_ms
Definition: resources.h:423
pcmk_node_t * node
Definition: actions.h:341
Actions are ordered (optionally, if no other flags are set)
#define stop_key(rsc)
Definition: internal.h:213
char * task
Definition: actions.h:343
#define crm_trace(fmt, args...)
Definition: logging.h:404
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:94
struct pe_node_shared_s * details
Definition: nodes.h:168
#define PCMK_ACTION_START
Definition: actions.h:72
unsigned long long flags
Definition: resources.h:428
const char * uname
Definition: nodes.h:74
Wrappers for and extensions to libxml2.
#define PCMK__META_PHYSICAL_HOST
#define PCMK_ACTION_STOP
Definition: actions.h:75
#define PCMK_ACTION_STONITH
Definition: actions.h:74
remote_connection_state
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: actions.c:196
#define PCMK_ACTION_CANCEL
Definition: actions.h:45
void pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params)
void pcmk__add_guest_meta_to_xml(xmlNode *args_xml, const pcmk_action_t *action)
Add special guest node meta-attributes to XML.
#define pcmk__assert(expr)
const char * target
Definition: pcmk_fence.c:29
GList * fillers
Definition: resources.h:477
void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
Cluster status and scheduling.
pcmk_scheduler_t * scheduler
Cluster Configuration.
#define PCMK_ACTION_MIGRATE_FROM
Definition: actions.h:58
bool pcmk__is_failed_remote_node(const pcmk_node_t *node)
pcmk_node_t * allocated_to
Definition: resources.h:447
void hash2metafield(gpointer key, gpointer value, gpointer user_data)
Set XML attribute based on hash table entry, as meta-attribute name.
Definition: nvpair.c:848
GList * running_on
Definition: resources.h:456
pcmk_resource_t * rsc
Definition: actions.h:340
GHashTable * pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
Get a table of resource parameters.
Definition: complex.c:484
#define start_key(rsc)
Definition: internal.h:219
unsigned long long flags
Definition: scheduler.h:211
gboolean unclean
Definition: nodes.h:92
void pcmk__insert_dup(GHashTable *table, const char *name, const char *value)
Definition: strings.c:713
bool pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc, const pcmk_node_t *node)
gboolean online
Definition: nodes.h:81
pcmk_resource_t * remote_rsc
Definition: nodes.h:136
pcmk_node_t * partial_migration_source
Definition: resources.h:453
#define PCMK_ACTION_NOTIFY
Definition: actions.h:62
No relation (compare with equality rather than bit set)