1 /*
2 * Copyright 2004-2023 the Pacemaker project contributors
3 *
4 * The version control history for this file may have further details.
5 *
6 * This source code is licensed under the GNU General Public License version 2
7 * or later (GPLv2+) WITHOUT ANY WARRANTY.
8 */
9
10 #include <crm_internal.h>
11
12 #include <sys/param.h>
13
14 #include <crm/crm.h>
15 #include <crm/cib.h>
16 #include <crm/msg_xml.h>
17 #include <crm/common/xml.h>
18 #include <crm/common/xml_internal.h>
19
20 #include <glib.h>
21
22 #include <crm/pengine/status.h>
23 #include <pacemaker-internal.h>
24 #include "libpacemaker_private.h"
25
26 enum remote_connection_state {
27 remote_state_unknown = 0,
28 remote_state_alive = 1,
29 remote_state_resting = 2,
30 remote_state_failed = 3,
31 remote_state_stopped = 4
32 };
33
34 static const char *
35 state2text(enum remote_connection_state state)
/* ![[previous]](../icons/n_left.png)
![[next]](../icons/right.png)
![[first]](../icons/n_first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
36 {
37 switch (state) {
38 case remote_state_unknown:
39 return "unknown";
40 case remote_state_alive:
41 return "alive";
42 case remote_state_resting:
43 return "resting";
44 case remote_state_failed:
45 return "failed";
46 case remote_state_stopped:
47 return "stopped";
48 }
49
50 return "impossible";
51 }
52
53 /* We always use pe_order_preserve with these convenience functions to exempt
54 * internally generated constraints from the prohibition of user constraints
55 * involving remote connection resources.
56 *
57 * The start ordering additionally uses pe_order_runnable_left so that the
58 * specified action is not runnable if the start is not runnable.
59 */
60
61 static inline void
62 order_start_then_action(pe_resource_t *first_rsc, pe_action_t *then_action,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
63 uint32_t extra, pe_working_set_t *data_set)
64 {
65 if ((first_rsc != NULL) && (then_action != NULL) && (data_set != NULL)) {
66 pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
67 then_action->rsc, NULL, then_action,
68 pe_order_preserve|pe_order_runnable_left|extra,
69 data_set);
70 }
71 }
72
73 static inline void
74 order_action_then_stop(pe_action_t *first_action, pe_resource_t *then_rsc,
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
75 uint32_t extra, pe_working_set_t *data_set)
76 {
77 if ((first_action != NULL) && (then_rsc != NULL) && (data_set != NULL)) {
78 pcmk__new_ordering(first_action->rsc, NULL, first_action,
79 then_rsc, stop_key(then_rsc), NULL,
80 pe_order_preserve|extra, data_set);
81 }
82 }
83
84 static enum remote_connection_state
85 get_remote_node_state(const pe_node_t *node)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
86 {
87 const pe_resource_t *remote_rsc = NULL;
88 const pe_node_t *cluster_node = NULL;
89
90 CRM_ASSERT(node != NULL);
91
92 remote_rsc = node->details->remote_rsc;
93 CRM_ASSERT(remote_rsc != NULL);
94
95 cluster_node = pe__current_node(remote_rsc);
96
97 /* If the cluster node the remote connection resource resides on
98 * is unclean or went offline, we can't process any operations
99 * on that remote node until after it starts elsewhere.
100 */
101 if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
102 || (remote_rsc->allocated_to == NULL)) {
103
104 // The connection resource is not going to run anywhere
105
106 if ((cluster_node != NULL) && cluster_node->details->unclean) {
107 /* The remote connection is failed because its resource is on a
108 * failed node and can't be recovered elsewhere, so we must fence.
109 */
110 return remote_state_failed;
111 }
112
113 if (!pcmk_is_set(remote_rsc->flags, pe_rsc_failed)) {
114 /* Connection resource is cleanly stopped */
115 return remote_state_stopped;
116 }
117
118 /* Connection resource is failed */
119
120 if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
121 && remote_rsc->remote_reconnect_ms
122 && node->details->remote_was_fenced
123 && !pe__shutdown_requested(node)) {
124
125 /* We won't know whether the connection is recoverable until the
126 * reconnect interval expires and we reattempt connection.
127 */
128 return remote_state_unknown;
129 }
130
131 /* The remote connection is in a failed state. If there are any
132 * resources known to be active on it (stop) or in an unknown state
133 * (probe), we must assume the worst and fence it.
134 */
135 return remote_state_failed;
136
137 } else if (cluster_node == NULL) {
138 /* Connection is recoverable but not currently running anywhere, so see
139 * if we can recover it first
140 */
141 return remote_state_unknown;
142
143 } else if (cluster_node->details->unclean
144 || !(cluster_node->details->online)) {
145 // Connection is running on a dead node, see if we can recover it first
146 return remote_state_resting;
147
148 } else if (pcmk__list_of_multiple(remote_rsc->running_on)
149 && (remote_rsc->partial_migration_source != NULL)
150 && (remote_rsc->partial_migration_target != NULL)) {
151 /* We're in the middle of migrating a connection resource, so wait until
152 * after the migration completes before performing any actions.
153 */
154 return remote_state_resting;
155
156 }
157 return remote_state_alive;
158 }
159
160 /*!
161 * \internal
162 * \brief Order actions on remote node relative to actions for the connection
163 *
164 * \param[in,out] action An action scheduled on a Pacemaker Remote node
165 */
166 static void
167 apply_remote_ordering(pe_action_t *action)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
168 {
169 pe_resource_t *remote_rsc = NULL;
170 enum action_tasks task = text2task(action->task);
171 enum remote_connection_state state = get_remote_node_state(action->node);
172
173 uint32_t order_opts = pe_order_none;
174
175 if (action->rsc == NULL) {
176 return;
177 }
178
179 CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
180
181 remote_rsc = action->node->details->remote_rsc;
182 CRM_ASSERT(remote_rsc != NULL);
183
184 crm_trace("Order %s action %s relative to %s%s (state: %s)",
185 action->task, action->uuid,
186 pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
187 remote_rsc->id, state2text(state));
188
189 if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE,
190 CRMD_ACTION_MIGRATED, NULL)) {
191 /* Migration ops map to "no_action", but we need to apply the same
192 * ordering as for stop or demote (see get_router_node()).
193 */
194 task = stop_rsc;
195 }
196
197 switch (task) {
198 case start_rsc:
199 case action_promote:
200 order_opts = pe_order_none;
201
202 if (state == remote_state_failed) {
203 /* Force recovery, by making this action required */
204 pe__set_order_flags(order_opts, pe_order_implies_then);
205 }
206
207 /* Ensure connection is up before running this action */
208 order_start_then_action(remote_rsc, action, order_opts,
209 remote_rsc->cluster);
210 break;
211
212 case stop_rsc:
213 if (state == remote_state_alive) {
214 order_action_then_stop(action, remote_rsc,
215 pe_order_implies_first,
216 remote_rsc->cluster);
217
218 } else if (state == remote_state_failed) {
219 /* The resource is active on the node, but since we don't have a
220 * valid connection, the only way to stop the resource is by
221 * fencing the node. There is no need to order the stop relative
222 * to the remote connection, since the stop will become implied
223 * by the fencing.
224 */
225 pe_fence_node(remote_rsc->cluster, action->node,
226 "resources are active but connection is unrecoverable",
227 FALSE);
228
229 } else if (remote_rsc->next_role == RSC_ROLE_STOPPED) {
230 /* State must be remote_state_unknown or remote_state_stopped.
231 * Since the connection is not coming back up in this
232 * transition, stop this resource first.
233 */
234 order_action_then_stop(action, remote_rsc,
235 pe_order_implies_first,
236 remote_rsc->cluster);
237
238 } else {
239 /* The connection is going to be started somewhere else, so
240 * stop this resource after that completes.
241 */
242 order_start_then_action(remote_rsc, action, pe_order_none,
243 remote_rsc->cluster);
244 }
245 break;
246
247 case action_demote:
248 /* Only order this demote relative to the connection start if the
249 * connection isn't being torn down. Otherwise, the demote would be
250 * blocked because the connection start would not be allowed.
251 */
252 if ((state == remote_state_resting)
253 || (state == remote_state_unknown)) {
254
255 order_start_then_action(remote_rsc, action, pe_order_none,
256 remote_rsc->cluster);
257 } /* Otherwise we can rely on the stop ordering */
258 break;
259
260 default:
261 /* Wait for the connection resource to be up */
262 if (pcmk__action_is_recurring(action)) {
263 /* In case we ever get the recovery logic wrong, force
264 * recurring monitors to be restarted, even if just
265 * the connection was re-established
266 */
267 order_start_then_action(remote_rsc, action,
268 pe_order_implies_then,
269 remote_rsc->cluster);
270
271 } else {
272 pe_node_t *cluster_node = pe__current_node(remote_rsc);
273
274 if ((task == monitor_rsc) && (state == remote_state_failed)) {
275 /* We would only be here if we do not know the state of the
276 * resource on the remote node. Since we have no way to find
277 * out, it is necessary to fence the node.
278 */
279 pe_fence_node(remote_rsc->cluster, action->node,
280 "resources are in unknown state "
281 "and connection is unrecoverable", FALSE);
282 }
283
284 if ((cluster_node != NULL) && (state == remote_state_stopped)) {
285 /* The connection is currently up, but is going down
286 * permanently. Make sure we check services are actually
287 * stopped _before_ we let the connection get closed.
288 */
289 order_action_then_stop(action, remote_rsc,
290 pe_order_runnable_left,
291 remote_rsc->cluster);
292
293 } else {
294 order_start_then_action(remote_rsc, action, pe_order_none,
295 remote_rsc->cluster);
296 }
297 }
298 break;
299 }
300 }
301
302 static void
303 apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
304 {
305 /* VMs are also classified as containers for these purposes... in
306 * that they both involve a 'thing' running on a real or remote
307 * cluster node.
308 *
309 * This allows us to be smarter about the type and extent of
310 * recovery actions required in various scenarios
311 */
312 pe_resource_t *remote_rsc = NULL;
313 pe_resource_t *container = NULL;
314 enum action_tasks task = text2task(action->task);
315
316 CRM_ASSERT(action->rsc != NULL);
317 CRM_ASSERT(action->node != NULL);
318 CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
319
320 remote_rsc = action->node->details->remote_rsc;
321 CRM_ASSERT(remote_rsc != NULL);
322
323 container = remote_rsc->container;
324 CRM_ASSERT(container != NULL);
325
326 if (pcmk_is_set(container->flags, pe_rsc_failed)) {
327 pe_fence_node(data_set, action->node, "container failed", FALSE);
328 }
329
330 crm_trace("Order %s action %s relative to %s%s for %s%s",
331 action->task, action->uuid,
332 pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
333 remote_rsc->id,
334 pcmk_is_set(container->flags, pe_rsc_failed)? "failed " : "",
335 container->id);
336
337 if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE,
338 CRMD_ACTION_MIGRATED, NULL)) {
339 /* Migration ops map to "no_action", but we need to apply the same
340 * ordering as for stop or demote (see get_router_node()).
341 */
342 task = stop_rsc;
343 }
344
345 switch (task) {
346 case start_rsc:
347 case action_promote:
348 // Force resource recovery if the container is recovered
349 order_start_then_action(container, action, pe_order_implies_then,
350 data_set);
351
352 // Wait for the connection resource to be up, too
353 order_start_then_action(remote_rsc, action, pe_order_none,
354 data_set);
355 break;
356
357 case stop_rsc:
358 case action_demote:
359 if (pcmk_is_set(container->flags, pe_rsc_failed)) {
360 /* When the container representing a guest node fails, any stop
361 * or demote actions for resources running on the guest node
362 * are implied by the container stopping. This is similar to
363 * how fencing operations work for cluster nodes and remote
364 * nodes.
365 */
366 } else {
367 /* Ensure the operation happens before the connection is brought
368 * down.
369 *
370 * If we really wanted to, we could order these after the
371 * connection start, IFF the container's current role was
372 * stopped (otherwise we re-introduce an ordering loop when the
373 * connection is restarting).
374 */
375 order_action_then_stop(action, remote_rsc, pe_order_none,
376 data_set);
377 }
378 break;
379
380 default:
381 /* Wait for the connection resource to be up */
382 if (pcmk__action_is_recurring(action)) {
383 /* In case we ever get the recovery logic wrong, force
384 * recurring monitors to be restarted, even if just
385 * the connection was re-established
386 */
387 if(task != no_action) {
388 order_start_then_action(remote_rsc, action,
389 pe_order_implies_then, data_set);
390 }
391 } else {
392 order_start_then_action(remote_rsc, action, pe_order_none,
393 data_set);
394 }
395 break;
396 }
397 }
398
399 /*!
400 * \internal
401 * \brief Order all relevant actions relative to remote connection actions
402 *
403 * \param[in,out] data_set Cluster working set
404 */
405 void
406 pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
407 {
408 if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
409 return;
410 }
411
412 crm_trace("Creating remote connection orderings");
413
414 for (GList *gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
415 pe_action_t *action = (pe_action_t *) gIter->data;
416 pe_resource_t *remote = NULL;
417
418 // We are only interested in resource actions
419 if (action->rsc == NULL) {
420 continue;
421 }
422
423 /* Special case: If we are clearing the failcount of an actual
424 * remote connection resource, then make sure this happens before
425 * any start of the resource in this transition.
426 */
427 if (action->rsc->is_remote_node &&
428 pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
429
430 pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
431 pcmk__op_key(action->rsc->id, RSC_START, 0),
432 NULL, pe_order_optional, data_set);
433
434 continue;
435 }
436
437 // We are only interested in actions allocated to a node
438 if (action->node == NULL) {
439 continue;
440 }
441
442 if (!pe__is_guest_or_remote_node(action->node)) {
443 continue;
444 }
445
446 /* We are only interested in real actions.
447 *
448 * @TODO This is probably wrong; pseudo-actions might be converted to
449 * real actions and vice versa later in update_actions() at the end of
450 * pcmk__apply_orderings().
451 */
452 if (pcmk_is_set(action->flags, pe_action_pseudo)) {
453 continue;
454 }
455
456 remote = action->node->details->remote_rsc;
457 if (remote == NULL) {
458 // Orphaned
459 continue;
460 }
461
462 /* Another special case: if a resource is moving to a Pacemaker Remote
463 * node, order the stop on the original node after any start of the
464 * remote connection. This ensures that if the connection fails to
465 * start, we leave the resource running on the original node.
466 */
467 if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
468 for (GList *item = action->rsc->actions; item != NULL;
469 item = item->next) {
470 pe_action_t *rsc_action = item->data;
471
472 if ((rsc_action->node->details != action->node->details)
473 && pcmk__str_eq(rsc_action->task, RSC_STOP, pcmk__str_casei)) {
474 pcmk__new_ordering(remote, start_key(remote), NULL,
475 action->rsc, NULL, rsc_action,
476 pe_order_optional, data_set);
477 }
478 }
479 }
480
481 /* The action occurs across a remote connection, so create
482 * ordering constraints that guarantee the action occurs while the node
483 * is active (after start, before stop ... things like that).
484 *
485 * This is somewhat brittle in that we need to make sure the results of
486 * this ordering are compatible with the result of get_router_node().
487 * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
488 * of this logic rather than create_graph_action().
489 */
490 if (remote->container) {
491 crm_trace("Container ordering for %s", action->uuid);
492 apply_container_ordering(action, data_set);
493
494 } else {
495 crm_trace("Remote ordering for %s", action->uuid);
496 apply_remote_ordering(action);
497 }
498 }
499 }
500
501 /*!
502 * \internal
503 * \brief Check whether a node is a failed remote node
504 *
505 * \param[in] node Node to check
506 *
507 * \return true if \p node is a failed remote node, false otherwise
508 */
509 bool
510 pcmk__is_failed_remote_node(const pe_node_t *node)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
511 {
512 return pe__is_remote_node(node) && (node->details->remote_rsc != NULL)
513 && (get_remote_node_state(node) == remote_state_failed);
514 }
515
516 /*!
517 * \internal
518 * \brief Check whether a given resource corresponds to a given node as guest
519 *
520 * \param[in] rsc Resource to check
521 * \param[in] node Node to check
522 *
523 * \return true if \p node is a guest node and \p rsc is its containing
524 * resource, otherwise false
525 */
526 bool
527 pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc, const pe_node_t *node)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
528 {
529 return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
530 && (node->details->remote_rsc != NULL)
531 && (node->details->remote_rsc->container == rsc);
532 }
533
534 /*!
535 * \internal
536 * \brief Get proper connection host that a remote action must be routed through
537 *
538 * A remote connection resource might be starting, stopping, or migrating in the
539 * same transition that an action needs to be executed on its Pacemaker Remote
540 * node. Determine the proper node that the remote action should be routed
541 * through.
542 *
543 * \param[in] action (Potentially remote) action to route
544 *
545 * \return Connection host that action should be routed through if remote,
546 * otherwise NULL
547 */
548 pe_node_t *
549 pcmk__connection_host_for_action(const pe_action_t *action)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
550 {
551 pe_node_t *began_on = NULL;
552 pe_node_t *ended_on = NULL;
553 bool partial_migration = false;
554 const char *task = action->task;
555
556 if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)
557 || !pe__is_guest_or_remote_node(action->node)) {
558 return NULL;
559 }
560
561 CRM_ASSERT(action->node->details->remote_rsc != NULL);
562
563 began_on = pe__current_node(action->node->details->remote_rsc);
564 ended_on = action->node->details->remote_rsc->allocated_to;
565 if (action->node->details->remote_rsc
566 && (action->node->details->remote_rsc->container == NULL)
567 && action->node->details->remote_rsc->partial_migration_target) {
568 partial_migration = true;
569 }
570
571 if (began_on == NULL) {
572 crm_trace("Routing %s for %s through remote connection's "
573 "next node %s (starting)%s",
574 action->task, (action->rsc? action->rsc->id : "no resource"),
575 (ended_on? ended_on->details->uname : "none"),
576 partial_migration? " (partial migration)" : "");
577 return ended_on;
578 }
579
580 if (ended_on == NULL) {
581 crm_trace("Routing %s for %s through remote connection's "
582 "current node %s (stopping)%s",
583 action->task, (action->rsc? action->rsc->id : "no resource"),
584 (began_on? began_on->details->uname : "none"),
585 partial_migration? " (partial migration)" : "");
586 return began_on;
587 }
588
589 if (began_on->details == ended_on->details) {
590 crm_trace("Routing %s for %s through remote connection's "
591 "current node %s (not moving)%s",
592 action->task, (action->rsc? action->rsc->id : "no resource"),
593 (began_on? began_on->details->uname : "none"),
594 partial_migration? " (partial migration)" : "");
595 return began_on;
596 }
597
598 /* If we get here, the remote connection is moving during this transition.
599 * This means some actions for resources behind the connection will get
600 * routed through the cluster node the connection resource is currently on,
601 * and others are routed through the cluster node the connection will end up
602 * on.
603 */
604
605 if (pcmk__str_eq(task, "notify", pcmk__str_casei)) {
606 task = g_hash_table_lookup(action->meta, "notify_operation");
607 }
608
609 /*
610 * Stop, demote, and migration actions must occur before the connection can
611 * move (these actions are required before the remote resource can stop). In
612 * this case, we know these actions have to be routed through the initial
613 * cluster node the connection resource lived on before the move takes
614 * place.
615 *
616 * The exception is a partial migration of a (non-guest) remote connection
617 * resource; in that case, all actions (even these) will be ordered after
618 * the connection's pseudo-start on the migration target, so the target is
619 * the router node.
620 */
621 if (pcmk__strcase_any_of(task, "cancel", "stop", "demote", "migrate_from",
622 "migrate_to", NULL) && !partial_migration) {
623 crm_trace("Routing %s for %s through remote connection's "
624 "current node %s (moving)%s",
625 action->task, (action->rsc? action->rsc->id : "no resource"),
626 (began_on? began_on->details->uname : "none"),
627 partial_migration? " (partial migration)" : "");
628 return began_on;
629 }
630
631 /* Everything else (start, promote, monitor, probe, refresh,
632 * clear failcount, delete, ...) must occur after the connection starts on
633 * the node it is moving to.
634 */
635 crm_trace("Routing %s for %s through remote connection's "
636 "next node %s (moving)%s",
637 action->task, (action->rsc? action->rsc->id : "no resource"),
638 (ended_on? ended_on->details->uname : "none"),
639 partial_migration? " (partial migration)" : "");
640 return ended_on;
641 }
642
643 /*!
644 * \internal
645 * \brief Replace remote connection's addr="#uname" with actual address
646 *
647 * REMOTE_CONTAINER_HACK: If a given resource is a remote connection resource
648 * with its "addr" parameter set to "#uname", pull the actual value from the
649 * parameters evaluated without a node (which was put there earlier in
650 * pcmk__create_graph() when the bundle's expand() method was called).
651 *
652 * \param[in,out] rsc Resource to check
653 * \param[in,out] params Resource parameters evaluated per node
654 */
655 void
656 pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params)
/* ![[previous]](../icons/left.png)
![[next]](../icons/right.png)
![[first]](../icons/first.png)
![[last]](../icons/last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
657 {
658 const char *remote_addr = g_hash_table_lookup(params,
659 XML_RSC_ATTR_REMOTE_RA_ADDR);
660
661 if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
662 GHashTable *base = pe_rsc_params(rsc, NULL, rsc->cluster);
663
664 remote_addr = g_hash_table_lookup(base, XML_RSC_ATTR_REMOTE_RA_ADDR);
665 if (remote_addr != NULL) {
666 g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
667 strdup(remote_addr));
668 }
669 }
670 }
671
672 /*!
673 * \brief Add special bundle meta-attributes to XML
674 *
675 * If a given action will be executed on a guest node (including a bundle),
676 * add the special bundle meta-attribute "container-attribute-target" and
677 * environment variable "physical_host" as XML attributes (using meta-attribute
678 * naming).
679 *
680 * \param[in,out] args_xml XML to add attributes to
681 * \param[in] action Action to check
682 */
683 void
684 pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action)
/* ![[previous]](../icons/left.png)
![[next]](../icons/n_right.png)
![[first]](../icons/first.png)
![[last]](../icons/n_last.png)
![[top]](../icons/top.png)
![[bottom]](../icons/bottom.png)
![[index]](../icons/index.png)
*/
685 {
686 const pe_node_t *host = NULL;
687 enum action_tasks task;
688
689 if (!pe__is_guest_node(action->node)) {
690 return;
691 }
692
693 task = text2task(action->task);
694 if ((task == action_notify) || (task == action_notified)) {
695 task = text2task(g_hash_table_lookup(action->meta, "notify_operation"));
696 }
697
698 switch (task) {
699 case stop_rsc:
700 case stopped_rsc:
701 case action_demote:
702 case action_demoted:
703 // "Down" actions take place on guest's current host
704 host = pe__current_node(action->node->details->remote_rsc->container);
705 break;
706
707 case start_rsc:
708 case started_rsc:
709 case monitor_rsc:
710 case action_promote:
711 case action_promoted:
712 // "Up" actions take place on guest's next host
713 host = action->node->details->remote_rsc->container->allocated_to;
714 break;
715
716 default:
717 break;
718 }
719
720 if (host != NULL) {
721 hash2metafield((gpointer) XML_RSC_ATTR_TARGET,
722 (gpointer) g_hash_table_lookup(action->rsc->meta,
723 XML_RSC_ATTR_TARGET),
724 (gpointer) args_xml);
725 hash2metafield((gpointer) PCMK__ENV_PHYSICAL_HOST,
726 (gpointer) host->details->uname,
727 (gpointer) args_xml);
728 }
729 }