pacemaker  2.1.7-0f7f88312f
Scalable High-Availability cluster resource manager
pcmk_sched_remote.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2023 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <sys/param.h>
13 
14 #include <crm/crm.h>
15 #include <crm/cib.h>
16 #include <crm/msg_xml.h>
17 #include <crm/common/xml.h>
19 
20 #include <glib.h>
21 
22 #include <crm/pengine/status.h>
23 #include <pacemaker-internal.h>
24 #include "libpacemaker_private.h"
25 
32 };
33 
34 static const char *
35 state2text(enum remote_connection_state state)
36 {
37  switch (state) {
39  return "unknown";
40  case remote_state_alive:
41  return "alive";
43  return "resting";
45  return "failed";
47  return "stopped";
48  }
49 
50  return "impossible";
51 }
52 
53 /* We always use pcmk__ar_guest_allowed with these convenience functions to
54  * exempt internally generated constraints from the prohibition of user
55  * constraints involving remote connection resources.
56  *
57  * The start ordering additionally uses pcmk__ar_unrunnable_first_blocks so that
58  * the specified action is not runnable if the start is not runnable.
59  */
60 
61 static inline void
62 order_start_then_action(pcmk_resource_t *first_rsc, pcmk_action_t *then_action,
63  uint32_t extra)
64 {
65  if ((first_rsc != NULL) && (then_action != NULL)) {
66  pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
67  then_action->rsc, NULL, then_action,
70  |extra,
71  first_rsc->cluster);
72  }
73 }
74 
75 static inline void
76 order_action_then_stop(pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
77  uint32_t extra)
78 {
79  if ((first_action != NULL) && (then_rsc != NULL)) {
80  pcmk__new_ordering(first_action->rsc, NULL, first_action,
81  then_rsc, stop_key(then_rsc), NULL,
82  pcmk__ar_guest_allowed|extra, then_rsc->cluster);
83  }
84 }
85 
86 static enum remote_connection_state
87 get_remote_node_state(const pcmk_node_t *node)
88 {
89  const pcmk_resource_t *remote_rsc = NULL;
90  const pcmk_node_t *cluster_node = NULL;
91 
92  CRM_ASSERT(node != NULL);
93 
94  remote_rsc = node->details->remote_rsc;
95  CRM_ASSERT(remote_rsc != NULL);
96 
97  cluster_node = pe__current_node(remote_rsc);
98 
99  /* If the cluster node the remote connection resource resides on
100  * is unclean or went offline, we can't process any operations
101  * on that remote node until after it starts elsewhere.
102  */
103  if ((remote_rsc->next_role == pcmk_role_stopped)
104  || (remote_rsc->allocated_to == NULL)) {
105 
106  // The connection resource is not going to run anywhere
107 
108  if ((cluster_node != NULL) && cluster_node->details->unclean) {
109  /* The remote connection is failed because its resource is on a
110  * failed node and can't be recovered elsewhere, so we must fence.
111  */
112  return remote_state_failed;
113  }
114 
115  if (!pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)) {
116  /* Connection resource is cleanly stopped */
117  return remote_state_stopped;
118  }
119 
120  /* Connection resource is failed */
121 
122  if ((remote_rsc->next_role == pcmk_role_stopped)
123  && remote_rsc->remote_reconnect_ms
124  && node->details->remote_was_fenced
125  && !pe__shutdown_requested(node)) {
126 
127  /* We won't know whether the connection is recoverable until the
128  * reconnect interval expires and we reattempt connection.
129  */
130  return remote_state_unknown;
131  }
132 
133  /* The remote connection is in a failed state. If there are any
134  * resources known to be active on it (stop) or in an unknown state
135  * (probe), we must assume the worst and fence it.
136  */
137  return remote_state_failed;
138 
139  } else if (cluster_node == NULL) {
140  /* Connection is recoverable but not currently running anywhere, so see
141  * if we can recover it first
142  */
143  return remote_state_unknown;
144 
145  } else if (cluster_node->details->unclean
146  || !(cluster_node->details->online)) {
147  // Connection is running on a dead node, see if we can recover it first
148  return remote_state_resting;
149 
150  } else if (pcmk__list_of_multiple(remote_rsc->running_on)
151  && (remote_rsc->partial_migration_source != NULL)
152  && (remote_rsc->partial_migration_target != NULL)) {
153  /* We're in the middle of migrating a connection resource, so wait until
154  * after the migration completes before performing any actions.
155  */
156  return remote_state_resting;
157 
158  }
159  return remote_state_alive;
160 }
161 
168 static void
169 apply_remote_ordering(pcmk_action_t *action)
170 {
171  pcmk_resource_t *remote_rsc = NULL;
172  enum action_tasks task = text2task(action->task);
173  enum remote_connection_state state = get_remote_node_state(action->node);
174 
175  uint32_t order_opts = pcmk__ar_none;
176 
177  if (action->rsc == NULL) {
178  return;
179  }
180 
182 
183  remote_rsc = action->node->details->remote_rsc;
184  CRM_ASSERT(remote_rsc != NULL);
185 
186  crm_trace("Order %s action %s relative to %s%s (state: %s)",
187  action->task, action->uuid,
188  pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
189  remote_rsc->id, state2text(state));
190 
192  PCMK_ACTION_MIGRATE_FROM, NULL)) {
193  /* Migration ops map to pcmk_action_unspecified, but we need to apply
194  * the same ordering as for stop or demote (see get_router_node()).
195  */
196  task = pcmk_action_stop;
197  }
198 
199  switch (task) {
200  case pcmk_action_start:
201  case pcmk_action_promote:
202  order_opts = pcmk__ar_none;
203 
204  if (state == remote_state_failed) {
205  /* Force recovery, by making this action required */
207  }
208 
209  /* Ensure connection is up before running this action */
210  order_start_then_action(remote_rsc, action, order_opts);
211  break;
212 
213  case pcmk_action_stop:
214  if (state == remote_state_alive) {
215  order_action_then_stop(action, remote_rsc,
217 
218  } else if (state == remote_state_failed) {
219  /* The resource is active on the node, but since we don't have a
220  * valid connection, the only way to stop the resource is by
221  * fencing the node. There is no need to order the stop relative
222  * to the remote connection, since the stop will become implied
223  * by the fencing.
224  */
225  pe_fence_node(remote_rsc->cluster, action->node,
226  "resources are active but "
227  "connection is unrecoverable",
228  FALSE);
229 
230  } else if (remote_rsc->next_role == pcmk_role_stopped) {
231  /* State must be remote_state_unknown or remote_state_stopped.
232  * Since the connection is not coming back up in this
233  * transition, stop this resource first.
234  */
235  order_action_then_stop(action, remote_rsc,
237 
238  } else {
239  /* The connection is going to be started somewhere else, so
240  * stop this resource after that completes.
241  */
242  order_start_then_action(remote_rsc, action, pcmk__ar_none);
243  }
244  break;
245 
246  case pcmk_action_demote:
247  /* Only order this demote relative to the connection start if the
248  * connection isn't being torn down. Otherwise, the demote would be
249  * blocked because the connection start would not be allowed.
250  */
251  if ((state == remote_state_resting)
252  || (state == remote_state_unknown)) {
253 
254  order_start_then_action(remote_rsc, action, pcmk__ar_none);
255  } /* Otherwise we can rely on the stop ordering */
256  break;
257 
258  default:
259  /* Wait for the connection resource to be up */
261  /* In case we ever get the recovery logic wrong, force
262  * recurring monitors to be restarted, even if just
263  * the connection was re-established
264  */
265  order_start_then_action(remote_rsc, action,
267 
268  } else {
269  pcmk_node_t *cluster_node = pe__current_node(remote_rsc);
270 
271  if ((task == pcmk_action_monitor) && (state == remote_state_failed)) {
272  /* We would only be here if we do not know the state of the
273  * resource on the remote node. Since we have no way to find
274  * out, it is necessary to fence the node.
275  */
276  pe_fence_node(remote_rsc->cluster, action->node,
277  "resources are in unknown state "
278  "and connection is unrecoverable", FALSE);
279  }
280 
281  if ((cluster_node != NULL) && (state == remote_state_stopped)) {
282  /* The connection is currently up, but is going down
283  * permanently. Make sure we check services are actually
284  * stopped _before_ we let the connection get closed.
285  */
286  order_action_then_stop(action, remote_rsc,
288 
289  } else {
290  order_start_then_action(remote_rsc, action, pcmk__ar_none);
291  }
292  }
293  break;
294  }
295 }
296 
297 static void
298 apply_container_ordering(pcmk_action_t *action)
299 {
300  /* VMs are also classified as containers for these purposes... in
301  * that they both involve a 'thing' running on a real or remote
302  * cluster node.
303  *
304  * This allows us to be smarter about the type and extent of
305  * recovery actions required in various scenarios
306  */
307  pcmk_resource_t *remote_rsc = NULL;
308  pcmk_resource_t *container = NULL;
309  enum action_tasks task = text2task(action->task);
310 
311  CRM_ASSERT(action->rsc != NULL);
312  CRM_ASSERT(action->node != NULL);
314 
315  remote_rsc = action->node->details->remote_rsc;
316  CRM_ASSERT(remote_rsc != NULL);
317 
318  container = remote_rsc->container;
319  CRM_ASSERT(container != NULL);
320 
321  if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
322  pe_fence_node(action->rsc->cluster, action->node, "container failed",
323  FALSE);
324  }
325 
326  crm_trace("Order %s action %s relative to %s%s for %s%s",
327  action->task, action->uuid,
328  pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
329  remote_rsc->id,
330  pcmk_is_set(container->flags, pcmk_rsc_failed)? "failed " : "",
331  container->id);
332 
334  PCMK_ACTION_MIGRATE_FROM, NULL)) {
335  /* Migration ops map to pcmk_action_unspecified, but we need to apply
336  * the same ordering as for stop or demote (see get_router_node()).
337  */
338  task = pcmk_action_stop;
339  }
340 
341  switch (task) {
342  case pcmk_action_start:
343  case pcmk_action_promote:
344  // Force resource recovery if the container is recovered
345  order_start_then_action(container, action,
347 
348  // Wait for the connection resource to be up, too
349  order_start_then_action(remote_rsc, action, pcmk__ar_none);
350  break;
351 
352  case pcmk_action_stop:
353  case pcmk_action_demote:
354  if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
355  /* When the container representing a guest node fails, any stop
356  * or demote actions for resources running on the guest node
357  * are implied by the container stopping. This is similar to
358  * how fencing operations work for cluster nodes and remote
359  * nodes.
360  */
361  } else {
362  /* Ensure the operation happens before the connection is brought
363  * down.
364  *
365  * If we really wanted to, we could order these after the
366  * connection start, IFF the container's current role was
367  * stopped (otherwise we re-introduce an ordering loop when the
368  * connection is restarting).
369  */
370  order_action_then_stop(action, remote_rsc, pcmk__ar_none);
371  }
372  break;
373 
374  default:
375  /* Wait for the connection resource to be up */
377  /* In case we ever get the recovery logic wrong, force
378  * recurring monitors to be restarted, even if just
379  * the connection was re-established
380  */
381  if (task != pcmk_action_unspecified) {
382  order_start_then_action(remote_rsc, action,
384  }
385  } else {
386  order_start_then_action(remote_rsc, action, pcmk__ar_none);
387  }
388  break;
389  }
390 }
391 
398 void
400 {
402  return;
403  }
404 
405  crm_trace("Creating remote connection orderings");
406 
407  for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
408  pcmk_action_t *action = iter->data;
409  pcmk_resource_t *remote = NULL;
410 
411  // We are only interested in resource actions
412  if (action->rsc == NULL) {
413  continue;
414  }
415 
416  /* Special case: If we are clearing the failcount of an actual
417  * remote connection resource, then make sure this happens before
418  * any start of the resource in this transition.
419  */
420  if (action->rsc->is_remote_node &&
421  pcmk__str_eq(action->task, PCMK_ACTION_CLEAR_FAILCOUNT,
422  pcmk__str_none)) {
423 
424  pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
426  0),
427  NULL, pcmk__ar_ordered, scheduler);
428 
429  continue;
430  }
431 
432  // We are only interested in actions assigned to a node
433  if (action->node == NULL) {
434  continue;
435  }
436 
437  if (!pe__is_guest_or_remote_node(action->node)) {
438  continue;
439  }
440 
441  /* We are only interested in real actions.
442  *
443  * @TODO This is probably wrong; pseudo-actions might be converted to
444  * real actions and vice versa later in update_actions() at the end of
445  * pcmk__apply_orderings().
446  */
447  if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
448  continue;
449  }
450 
451  remote = action->node->details->remote_rsc;
452  if (remote == NULL) {
453  // Orphaned
454  continue;
455  }
456 
457  /* Another special case: if a resource is moving to a Pacemaker Remote
458  * node, order the stop on the original node after any start of the
459  * remote connection. This ensures that if the connection fails to
460  * start, we leave the resource running on the original node.
461  */
462  if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none)) {
463  for (GList *item = action->rsc->actions; item != NULL;
464  item = item->next) {
465  pcmk_action_t *rsc_action = item->data;
466 
467  if (!pe__same_node(rsc_action->node, action->node)
468  && pcmk__str_eq(rsc_action->task, PCMK_ACTION_STOP,
469  pcmk__str_none)) {
470  pcmk__new_ordering(remote, start_key(remote), NULL,
471  action->rsc, NULL, rsc_action,
473  }
474  }
475  }
476 
477  /* The action occurs across a remote connection, so create
478  * ordering constraints that guarantee the action occurs while the node
479  * is active (after start, before stop ... things like that).
480  *
481  * This is somewhat brittle in that we need to make sure the results of
482  * this ordering are compatible with the result of get_router_node().
483  * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
484  * of this logic rather than create_graph_action().
485  */
486  if (remote->container) {
487  crm_trace("Container ordering for %s", action->uuid);
488  apply_container_ordering(action);
489 
490  } else {
491  crm_trace("Remote ordering for %s", action->uuid);
492  apply_remote_ordering(action);
493  }
494  }
495 }
496 
505 bool
507 {
508  return pe__is_remote_node(node) && (node->details->remote_rsc != NULL)
509  && (get_remote_node_state(node) == remote_state_failed);
510 }
511 
522 bool
524  const pcmk_node_t *node)
525 {
526  return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
527  && (node->details->remote_rsc != NULL)
528  && (node->details->remote_rsc->container == rsc);
529 }
530 
545 pcmk_node_t *
547 {
548  pcmk_node_t *began_on = NULL;
549  pcmk_node_t *ended_on = NULL;
550  bool partial_migration = false;
551  const char *task = action->task;
552 
553  if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_none)
554  || !pe__is_guest_or_remote_node(action->node)) {
555  return NULL;
556  }
557 
558  CRM_ASSERT(action->node->details->remote_rsc != NULL);
559 
560  began_on = pe__current_node(action->node->details->remote_rsc);
561  ended_on = action->node->details->remote_rsc->allocated_to;
562  if (action->node->details->remote_rsc
563  && (action->node->details->remote_rsc->container == NULL)
564  && action->node->details->remote_rsc->partial_migration_target) {
565  partial_migration = true;
566  }
567 
568  if (began_on == NULL) {
569  crm_trace("Routing %s for %s through remote connection's "
570  "next node %s (starting)%s",
571  action->task, (action->rsc? action->rsc->id : "no resource"),
572  (ended_on? ended_on->details->uname : "none"),
573  partial_migration? " (partial migration)" : "");
574  return ended_on;
575  }
576 
577  if (ended_on == NULL) {
578  crm_trace("Routing %s for %s through remote connection's "
579  "current node %s (stopping)%s",
580  action->task, (action->rsc? action->rsc->id : "no resource"),
581  (began_on? began_on->details->uname : "none"),
582  partial_migration? " (partial migration)" : "");
583  return began_on;
584  }
585 
586  if (pe__same_node(began_on, ended_on)) {
587  crm_trace("Routing %s for %s through remote connection's "
588  "current node %s (not moving)%s",
589  action->task, (action->rsc? action->rsc->id : "no resource"),
590  (began_on? began_on->details->uname : "none"),
591  partial_migration? " (partial migration)" : "");
592  return began_on;
593  }
594 
595  /* If we get here, the remote connection is moving during this transition.
596  * This means some actions for resources behind the connection will get
597  * routed through the cluster node the connection resource is currently on,
598  * and others are routed through the cluster node the connection will end up
599  * on.
600  */
601 
602  if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
603  task = g_hash_table_lookup(action->meta, "notify_operation");
604  }
605 
606  /*
607  * Stop, demote, and migration actions must occur before the connection can
608  * move (these actions are required before the remote resource can stop). In
609  * this case, we know these actions have to be routed through the initial
610  * cluster node the connection resource lived on before the move takes
611  * place.
612  *
613  * The exception is a partial migration of a (non-guest) remote connection
614  * resource; in that case, all actions (even these) will be ordered after
615  * the connection's pseudo-start on the migration target, so the target is
616  * the router node.
617  */
621  && !partial_migration) {
622  crm_trace("Routing %s for %s through remote connection's "
623  "current node %s (moving)%s",
624  action->task, (action->rsc? action->rsc->id : "no resource"),
625  (began_on? began_on->details->uname : "none"),
626  partial_migration? " (partial migration)" : "");
627  return began_on;
628  }
629 
630  /* Everything else (start, promote, monitor, probe, refresh,
631  * clear failcount, delete, ...) must occur after the connection starts on
632  * the node it is moving to.
633  */
634  crm_trace("Routing %s for %s through remote connection's "
635  "next node %s (moving)%s",
636  action->task, (action->rsc? action->rsc->id : "no resource"),
637  (ended_on? ended_on->details->uname : "none"),
638  partial_migration? " (partial migration)" : "");
639  return ended_on;
640 }
641 
654 void
656 {
657  const char *remote_addr = g_hash_table_lookup(params,
659 
660  if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
661  GHashTable *base = pe_rsc_params(rsc, NULL, rsc->cluster);
662 
663  remote_addr = g_hash_table_lookup(base, XML_RSC_ATTR_REMOTE_RA_ADDR);
664  if (remote_addr != NULL) {
665  g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
666  strdup(remote_addr));
667  }
668  }
669 }
670 
682 void
684 {
685  const pcmk_node_t *guest = action->node;
686  const pcmk_node_t *host = NULL;
687  enum action_tasks task;
688 
689  if (!pe__is_guest_node(guest)) {
690  return;
691  }
692 
693  task = text2task(action->task);
694  if ((task == pcmk_action_notify) || (task == pcmk_action_notified)) {
695  task = text2task(g_hash_table_lookup(action->meta, "notify_operation"));
696  }
697 
698  switch (task) {
699  case pcmk_action_stop:
700  case pcmk_action_stopped:
701  case pcmk_action_demote:
702  case pcmk_action_demoted:
703  // "Down" actions take place on guest's current host
704  host = pe__current_node(guest->details->remote_rsc->container);
705  break;
706 
707  case pcmk_action_start:
708  case pcmk_action_started:
709  case pcmk_action_monitor:
710  case pcmk_action_promote:
712  // "Up" actions take place on guest's next host
714  break;
715 
716  default:
717  break;
718  }
719 
720  if (host != NULL) {
722  (gpointer) g_hash_table_lookup(action->rsc->meta,
724  (gpointer) args_xml);
726  (gpointer) host->details->uname,
727  (gpointer) args_xml);
728  }
729 }
pcmk__cpg_host_t host
Definition: cpg.c:49
Demoted.
Definition: actions.h:98
bool pe__shutdown_requested(const pcmk_node_t *node)
Definition: utils.c:666
A dumping ground.
&#39;then&#39; is runnable (and migratable) only if &#39;first&#39; is runnable
bool pe__is_guest_or_remote_node(const pcmk_node_t *node)
Definition: remote.c:41
pcmk_scheduler_t * cluster
Cluster that resource is part of.
Definition: resources.h:412
pcmk_node_t * partial_migration_target
The destination node, if migrate_to completed but migrate_from has not.
Definition: resources.h:454
Monitor.
Definition: actions.h:81
G_GNUC_INTERNAL void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task, pcmk_action_t *first_action, pcmk_resource_t *then_rsc, char *then_task, pcmk_action_t *then_action, uint32_t flags, pcmk_scheduler_t *sched)
Stopped.
Definition: roles.h:29
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:933
Notify.
Definition: actions.h:91
enum rsc_role_e next_role
Resource&#39;s scheduled next role.
Definition: resources.h:469
Implementation of pcmk_action_t.
Definition: actions.h:390
void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pcmk_action_t *action)
Add special bundle meta-attributes to XML.
#define PCMK_ACTION_MIGRATE_TO
Definition: actions.h:58
action_tasks
Possible actions (including some pseudo-actions)
Definition: actions.h:79
void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition: unpack.c:110
enum action_tasks text2task(const char *task)
Definition: common.c:360
pcmk_node_t * pcmk__connection_host_for_action(const pcmk_action_t *action)
GList * actions
Scheduled actions.
Definition: scheduler.h:204
#define PCMK_ACTION_CLEAR_FAILCOUNT
Definition: actions.h:46
G_GNUC_INTERNAL bool pcmk__action_is_recurring(const pcmk_action_t *action)
pcmk_resource_t * container
Resource containing this one, if any.
Definition: resources.h:480
gboolean remote_was_fenced
Definition: nodes.h:94
Implementation of pcmk_scheduler_t.
Definition: scheduler.h:172
#define XML_RSC_ATTR_TARGET
Definition: msg_xml.h:242
Ordering applies even if &#39;first&#39; runs on guest node created by &#39;then&#39;.
#define XML_RSC_ATTR_REMOTE_RA_ADDR
Definition: msg_xml.h:261
const char * action
Definition: pcmk_fence.c:30
Notify completed.
Definition: actions.h:92
#define PCMK_ACTION_DEMOTE
Definition: actions.h:49
guint remote_reconnect_ms
Retry interval for remote connections.
Definition: resources.h:427
Promote.
Definition: actions.h:94
pcmk_node_t * node
Node to execute action on, if any.
Definition: actions.h:401
Implementation of pcmk_resource_t.
Definition: resources.h:399
Actions are ordered (optionally, if no other flags are set)
Demote.
Definition: actions.h:97
#define stop_key(rsc)
Definition: internal.h:378
Whether the cluster includes any Pacemaker Remote nodes (via CIB)
Definition: scheduler.h:134
char * task
Action name.
Definition: actions.h:403
Whether resource is considered failed.
Definition: resources.h:151
#define crm_trace(fmt, args...)
Definition: logging.h:387
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:99
struct pe_node_shared_s * details
Basic node information.
Definition: nodes.h:134
#define PCMK_ACTION_START
Definition: actions.h:71
unsigned long long flags
Group of enum pcmk_rsc_flags.
Definition: resources.h:429
const char * uname
Node name in cluster.
Definition: nodes.h:68
Wrappers for and extensions to libxml2.
#define PCMK_ACTION_STOP
Definition: actions.h:74
#define PCMK_ACTION_STONITH
Definition: actions.h:73
remote_connection_state
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: actions.c:42
Implementation of pcmk_node_t.
Definition: nodes.h:130
#define PCMK_ACTION_CANCEL
Definition: actions.h:45
void pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params)
GList * fillers
Resources contained by this one, if any.
Definition: resources.h:481
Unspecified or unknown action.
Definition: actions.h:80
void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
bool pe__is_guest_node(const pcmk_node_t *node)
Definition: remote.c:33
Cluster status and scheduling.
pcmk_scheduler_t * scheduler
Whether action does not require invoking an agent.
Definition: actions.h:238
#define CRM_ASSERT(expr)
Definition: results.h:42
Cluster Configuration.
#define PCMK_ACTION_MIGRATE_FROM
Definition: actions.h:57
bool pcmk__is_failed_remote_node(const pcmk_node_t *node)
pcmk_node_t * allocated_to
Node resource is assigned to.
Definition: resources.h:451
void hash2metafield(gpointer key, gpointer value, gpointer user_data)
Set XML attribute based on hash table entry, as meta-attribute name.
Definition: nvpair.c:728
bool pe__is_remote_node(const pcmk_node_t *node)
Definition: remote.c:25
GList * running_on
Nodes where resource may be active.
Definition: resources.h:460
Start completed.
Definition: actions.h:89
pcmk_resource_t * rsc
Resource to apply action to, if any.
Definition: actions.h:400
Stop completed.
Definition: actions.h:86
GHashTable * pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
Get a table of resource parameters.
Definition: complex.c:446
#define pe__set_order_flags(order_flags, flags_to_set)
Definition: internal.h:128
#define start_key(rsc)
Definition: internal.h:384
unsigned long long flags
Group of enum pcmk_scheduler_flags.
Definition: scheduler.h:183
#define PCMK__ENV_PHYSICAL_HOST
Promoted.
Definition: actions.h:95
gboolean unclean
Whether node requires fencing.
Definition: nodes.h:76
bool pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc, const pcmk_node_t *node)
gboolean online
Whether online.
Definition: nodes.h:72
pcmk_resource_t * remote_rsc
Remote connection resource for node, if it is a Pacemaker Remote node.
Definition: nodes.h:111
pcmk_node_t * partial_migration_source
The source node, if migrate_to completed but migrate_from has not.
Definition: resources.h:457
#define PCMK_ACTION_NOTIFY
Definition: actions.h:61
No relation (compare with equality rather than bit set)
char * id
Resource ID in configuration.
Definition: resources.h:400