root/lib/pacemaker/pcmk_sched_remote.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. state2text
  2. order_start_then_action
  3. order_action_then_stop
  4. get_remote_node_state
  5. apply_remote_ordering
  6. apply_container_ordering
  7. pcmk__order_remote_connection_actions
  8. pcmk__is_failed_remote_node
  9. pcmk__rsc_corresponds_to_guest
  10. pcmk__connection_host_for_action
  11. pcmk__substitute_remote_addr
  12. pcmk__add_bundle_meta_to_xml

   1 /*
   2  * Copyright 2004-2023 the Pacemaker project contributors
   3  *
   4  * The version control history for this file may have further details.
   5  *
   6  * This source code is licensed under the GNU General Public License version 2
   7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
   8  */
   9 
  10 #include <crm_internal.h>
  11 
  12 #include <sys/param.h>
  13 
  14 #include <crm/crm.h>
  15 #include <crm/cib.h>
  16 #include <crm/msg_xml.h>
  17 #include <crm/common/xml.h>
  18 #include <crm/common/xml_internal.h>
  19 
  20 #include <glib.h>
  21 
  22 #include <crm/pengine/status.h>
  23 #include <pacemaker-internal.h>
  24 #include "libpacemaker_private.h"
  25 
  26 enum remote_connection_state {
  27     remote_state_unknown = 0,
  28     remote_state_alive = 1,
  29     remote_state_resting = 2,
  30     remote_state_failed = 3,
  31     remote_state_stopped = 4
  32 };
  33 
  34 static const char *
  35 state2text(enum remote_connection_state state)
     /* [previous][next][first][last][top][bottom][index][help] */
  36 {
  37     switch (state) {
  38         case remote_state_unknown:
  39             return "unknown";
  40         case remote_state_alive:
  41             return "alive";
  42         case remote_state_resting:
  43             return "resting";
  44         case remote_state_failed:
  45             return "failed";
  46         case remote_state_stopped:
  47             return "stopped";
  48     }
  49 
  50     return "impossible";
  51 }
  52 
  53 /* We always use pcmk__ar_guest_allowed with these convenience functions to
  54  * exempt internally generated constraints from the prohibition of user
  55  * constraints involving remote connection resources.
  56  *
  57  * The start ordering additionally uses pcmk__ar_unrunnable_first_blocks so that
  58  * the specified action is not runnable if the start is not runnable.
  59  */
  60 
  61 static inline void
  62 order_start_then_action(pcmk_resource_t *first_rsc, pcmk_action_t *then_action,
     /* [previous][next][first][last][top][bottom][index][help] */
  63                         uint32_t extra)
  64 {
  65     if ((first_rsc != NULL) && (then_action != NULL)) {
  66         pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
  67                            then_action->rsc, NULL, then_action,
  68                            pcmk__ar_guest_allowed
  69                            |pcmk__ar_unrunnable_first_blocks
  70                            |extra,
  71                            first_rsc->cluster);
  72     }
  73 }
  74 
  75 static inline void
  76 order_action_then_stop(pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
  77                        uint32_t extra)
  78 {
  79     if ((first_action != NULL) && (then_rsc != NULL)) {
  80         pcmk__new_ordering(first_action->rsc, NULL, first_action,
  81                            then_rsc, stop_key(then_rsc), NULL,
  82                            pcmk__ar_guest_allowed|extra, then_rsc->cluster);
  83     }
  84 }
  85 
  86 static enum remote_connection_state
  87 get_remote_node_state(const pcmk_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
  88 {
  89     const pcmk_resource_t *remote_rsc = NULL;
  90     const pcmk_node_t *cluster_node = NULL;
  91 
  92     CRM_ASSERT(node != NULL);
  93 
  94     remote_rsc = node->details->remote_rsc;
  95     CRM_ASSERT(remote_rsc != NULL);
  96 
  97     cluster_node = pe__current_node(remote_rsc);
  98 
  99     /* If the cluster node the remote connection resource resides on
 100      * is unclean or went offline, we can't process any operations
 101      * on that remote node until after it starts elsewhere.
 102      */
 103     if ((remote_rsc->next_role == pcmk_role_stopped)
 104         || (remote_rsc->allocated_to == NULL)) {
 105 
 106         // The connection resource is not going to run anywhere
 107 
 108         if ((cluster_node != NULL) && cluster_node->details->unclean) {
 109             /* The remote connection is failed because its resource is on a
 110              * failed node and can't be recovered elsewhere, so we must fence.
 111              */
 112             return remote_state_failed;
 113         }
 114 
 115         if (!pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)) {
 116             /* Connection resource is cleanly stopped */
 117             return remote_state_stopped;
 118         }
 119 
 120         /* Connection resource is failed */
 121 
 122         if ((remote_rsc->next_role == pcmk_role_stopped)
 123             && remote_rsc->remote_reconnect_ms
 124             && node->details->remote_was_fenced
 125             && !pe__shutdown_requested(node)) {
 126 
 127             /* We won't know whether the connection is recoverable until the
 128              * reconnect interval expires and we reattempt connection.
 129              */
 130             return remote_state_unknown;
 131         }
 132 
 133         /* The remote connection is in a failed state. If there are any
 134          * resources known to be active on it (stop) or in an unknown state
 135          * (probe), we must assume the worst and fence it.
 136          */
 137         return remote_state_failed;
 138 
 139     } else if (cluster_node == NULL) {
 140         /* Connection is recoverable but not currently running anywhere, so see
 141          * if we can recover it first
 142          */
 143         return remote_state_unknown;
 144 
 145     } else if (cluster_node->details->unclean
 146                || !(cluster_node->details->online)) {
 147         // Connection is running on a dead node, see if we can recover it first
 148         return remote_state_resting;
 149 
 150     } else if (pcmk__list_of_multiple(remote_rsc->running_on)
 151                && (remote_rsc->partial_migration_source != NULL)
 152                && (remote_rsc->partial_migration_target != NULL)) {
 153         /* We're in the middle of migrating a connection resource, so wait until
 154          * after the migration completes before performing any actions.
 155          */
 156         return remote_state_resting;
 157 
 158     }
 159     return remote_state_alive;
 160 }
 161 
 162 /*!
 163  * \internal
 164  * \brief Order actions on remote node relative to actions for the connection
 165  *
 166  * \param[in,out] action    An action scheduled on a Pacemaker Remote node
 167  */
 168 static void
 169 apply_remote_ordering(pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 170 {
 171     pcmk_resource_t *remote_rsc = NULL;
 172     enum action_tasks task = text2task(action->task);
 173     enum remote_connection_state state = get_remote_node_state(action->node);
 174 
 175     uint32_t order_opts = pcmk__ar_none;
 176 
 177     if (action->rsc == NULL) {
 178         return;
 179     }
 180 
 181     CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
 182 
 183     remote_rsc = action->node->details->remote_rsc;
 184     CRM_ASSERT(remote_rsc != NULL);
 185 
 186     crm_trace("Order %s action %s relative to %s%s (state: %s)",
 187               action->task, action->uuid,
 188               pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
 189               remote_rsc->id, state2text(state));
 190 
 191     if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
 192                              PCMK_ACTION_MIGRATE_FROM, NULL)) {
 193         /* Migration ops map to pcmk_action_unspecified, but we need to apply
 194          * the same ordering as for stop or demote (see get_router_node()).
 195          */
 196         task = pcmk_action_stop;
 197     }
 198 
 199     switch (task) {
 200         case pcmk_action_start:
 201         case pcmk_action_promote:
 202             order_opts = pcmk__ar_none;
 203 
 204             if (state == remote_state_failed) {
 205                 /* Force recovery, by making this action required */
 206                 pe__set_order_flags(order_opts, pcmk__ar_first_implies_then);
 207             }
 208 
 209             /* Ensure connection is up before running this action */
 210             order_start_then_action(remote_rsc, action, order_opts);
 211             break;
 212 
 213         case pcmk_action_stop:
 214             if (state == remote_state_alive) {
 215                 order_action_then_stop(action, remote_rsc,
 216                                        pcmk__ar_then_implies_first);
 217 
 218             } else if (state == remote_state_failed) {
 219                 /* The resource is active on the node, but since we don't have a
 220                  * valid connection, the only way to stop the resource is by
 221                  * fencing the node. There is no need to order the stop relative
 222                  * to the remote connection, since the stop will become implied
 223                  * by the fencing.
 224                  */
 225                 pe_fence_node(remote_rsc->cluster, action->node,
 226                               "resources are active but "
 227                               "connection is unrecoverable",
 228                               FALSE);
 229 
 230             } else if (remote_rsc->next_role == pcmk_role_stopped) {
 231                 /* State must be remote_state_unknown or remote_state_stopped.
 232                  * Since the connection is not coming back up in this
 233                  * transition, stop this resource first.
 234                  */
 235                 order_action_then_stop(action, remote_rsc,
 236                                        pcmk__ar_then_implies_first);
 237 
 238             } else {
 239                 /* The connection is going to be started somewhere else, so
 240                  * stop this resource after that completes.
 241                  */
 242                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
 243             }
 244             break;
 245 
 246         case pcmk_action_demote:
 247             /* Only order this demote relative to the connection start if the
 248              * connection isn't being torn down. Otherwise, the demote would be
 249              * blocked because the connection start would not be allowed.
 250              */
 251             if ((state == remote_state_resting)
 252                 || (state == remote_state_unknown)) {
 253 
 254                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
 255             } /* Otherwise we can rely on the stop ordering */
 256             break;
 257 
 258         default:
 259             /* Wait for the connection resource to be up */
 260             if (pcmk__action_is_recurring(action)) {
 261                 /* In case we ever get the recovery logic wrong, force
 262                  * recurring monitors to be restarted, even if just
 263                  * the connection was re-established
 264                  */
 265                 order_start_then_action(remote_rsc, action,
 266                                         pcmk__ar_first_implies_then);
 267 
 268             } else {
 269                 pcmk_node_t *cluster_node = pe__current_node(remote_rsc);
 270 
 271                 if ((task == pcmk_action_monitor) && (state == remote_state_failed)) {
 272                     /* We would only be here if we do not know the state of the
 273                      * resource on the remote node. Since we have no way to find
 274                      * out, it is necessary to fence the node.
 275                      */
 276                     pe_fence_node(remote_rsc->cluster, action->node,
 277                                   "resources are in unknown state "
 278                                   "and connection is unrecoverable", FALSE);
 279                 }
 280 
 281                 if ((cluster_node != NULL) && (state == remote_state_stopped)) {
 282                     /* The connection is currently up, but is going down
 283                      * permanently. Make sure we check services are actually
 284                      * stopped _before_ we let the connection get closed.
 285                      */
 286                     order_action_then_stop(action, remote_rsc,
 287                                            pcmk__ar_unrunnable_first_blocks);
 288 
 289                 } else {
 290                     order_start_then_action(remote_rsc, action, pcmk__ar_none);
 291                 }
 292             }
 293             break;
 294     }
 295 }
 296 
 297 static void
 298 apply_container_ordering(pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 299 {
 300     /* VMs are also classified as containers for these purposes... in
 301      * that they both involve a 'thing' running on a real or remote
 302      * cluster node.
 303      *
 304      * This allows us to be smarter about the type and extent of
 305      * recovery actions required in various scenarios
 306      */
 307     pcmk_resource_t *remote_rsc = NULL;
 308     pcmk_resource_t *container = NULL;
 309     enum action_tasks task = text2task(action->task);
 310 
 311     CRM_ASSERT(action->rsc != NULL);
 312     CRM_ASSERT(action->node != NULL);
 313     CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
 314 
 315     remote_rsc = action->node->details->remote_rsc;
 316     CRM_ASSERT(remote_rsc != NULL);
 317 
 318     container = remote_rsc->container;
 319     CRM_ASSERT(container != NULL);
 320 
 321     if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
 322         pe_fence_node(action->rsc->cluster, action->node, "container failed",
 323                       FALSE);
 324     }
 325 
 326     crm_trace("Order %s action %s relative to %s%s for %s%s",
 327               action->task, action->uuid,
 328               pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
 329               remote_rsc->id,
 330               pcmk_is_set(container->flags, pcmk_rsc_failed)? "failed " : "",
 331               container->id);
 332 
 333     if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
 334                              PCMK_ACTION_MIGRATE_FROM, NULL)) {
 335         /* Migration ops map to pcmk_action_unspecified, but we need to apply
 336          * the same ordering as for stop or demote (see get_router_node()).
 337          */
 338         task = pcmk_action_stop;
 339     }
 340 
 341     switch (task) {
 342         case pcmk_action_start:
 343         case pcmk_action_promote:
 344             // Force resource recovery if the container is recovered
 345             order_start_then_action(container, action,
 346                                     pcmk__ar_first_implies_then);
 347 
 348             // Wait for the connection resource to be up, too
 349             order_start_then_action(remote_rsc, action, pcmk__ar_none);
 350             break;
 351 
 352         case pcmk_action_stop:
 353         case pcmk_action_demote:
 354             if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
 355                 /* When the container representing a guest node fails, any stop
 356                  * or demote actions for resources running on the guest node
 357                  * are implied by the container stopping. This is similar to
 358                  * how fencing operations work for cluster nodes and remote
 359                  * nodes.
 360                  */
 361             } else {
 362                 /* Ensure the operation happens before the connection is brought
 363                  * down.
 364                  *
 365                  * If we really wanted to, we could order these after the
 366                  * connection start, IFF the container's current role was
 367                  * stopped (otherwise we re-introduce an ordering loop when the
 368                  * connection is restarting).
 369                  */
 370                 order_action_then_stop(action, remote_rsc, pcmk__ar_none);
 371             }
 372             break;
 373 
 374         default:
 375             /* Wait for the connection resource to be up */
 376             if (pcmk__action_is_recurring(action)) {
 377                 /* In case we ever get the recovery logic wrong, force
 378                  * recurring monitors to be restarted, even if just
 379                  * the connection was re-established
 380                  */
 381                 if (task != pcmk_action_unspecified) {
 382                     order_start_then_action(remote_rsc, action,
 383                                             pcmk__ar_first_implies_then);
 384                 }
 385             } else {
 386                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
 387             }
 388             break;
 389     }
 390 }
 391 
 392 /*!
 393  * \internal
 394  * \brief Order all relevant actions relative to remote connection actions
 395  *
 396  * \param[in,out] scheduler  Scheduler data
 397  */
 398 void
 399 pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
     /* [previous][next][first][last][top][bottom][index][help] */
 400 {
 401     if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
 402         return;
 403     }
 404 
 405     crm_trace("Creating remote connection orderings");
 406 
 407     for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
 408         pcmk_action_t *action = iter->data;
 409         pcmk_resource_t *remote = NULL;
 410 
 411         // We are only interested in resource actions
 412         if (action->rsc == NULL) {
 413             continue;
 414         }
 415 
 416         /* Special case: If we are clearing the failcount of an actual
 417          * remote connection resource, then make sure this happens before
 418          * any start of the resource in this transition.
 419          */
 420         if (action->rsc->is_remote_node &&
 421             pcmk__str_eq(action->task, PCMK_ACTION_CLEAR_FAILCOUNT,
 422                          pcmk__str_none)) {
 423 
 424             pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
 425                                pcmk__op_key(action->rsc->id, PCMK_ACTION_START,
 426                                             0),
 427                                NULL, pcmk__ar_ordered, scheduler);
 428 
 429             continue;
 430         }
 431 
 432         // We are only interested in actions assigned to a node
 433         if (action->node == NULL) {
 434             continue;
 435         }
 436 
 437         if (!pe__is_guest_or_remote_node(action->node)) {
 438             continue;
 439         }
 440 
 441         /* We are only interested in real actions.
 442          *
 443          * @TODO This is probably wrong; pseudo-actions might be converted to
 444          * real actions and vice versa later in update_actions() at the end of
 445          * pcmk__apply_orderings().
 446          */
 447         if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
 448             continue;
 449         }
 450 
 451         remote = action->node->details->remote_rsc;
 452         if (remote == NULL) {
 453             // Orphaned
 454             continue;
 455         }
 456 
 457         /* Another special case: if a resource is moving to a Pacemaker Remote
 458          * node, order the stop on the original node after any start of the
 459          * remote connection. This ensures that if the connection fails to
 460          * start, we leave the resource running on the original node.
 461          */
 462         if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none)) {
 463             for (GList *item = action->rsc->actions; item != NULL;
 464                  item = item->next) {
 465                 pcmk_action_t *rsc_action = item->data;
 466 
 467                 if (!pe__same_node(rsc_action->node, action->node)
 468                     && pcmk__str_eq(rsc_action->task, PCMK_ACTION_STOP,
 469                                     pcmk__str_none)) {
 470                     pcmk__new_ordering(remote, start_key(remote), NULL,
 471                                        action->rsc, NULL, rsc_action,
 472                                        pcmk__ar_ordered, scheduler);
 473                 }
 474             }
 475         }
 476 
 477         /* The action occurs across a remote connection, so create
 478          * ordering constraints that guarantee the action occurs while the node
 479          * is active (after start, before stop ... things like that).
 480          *
 481          * This is somewhat brittle in that we need to make sure the results of
 482          * this ordering are compatible with the result of get_router_node().
 483          * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
 484          * of this logic rather than create_graph_action().
 485          */
 486         if (remote->container) {
 487             crm_trace("Container ordering for %s", action->uuid);
 488             apply_container_ordering(action);
 489 
 490         } else {
 491             crm_trace("Remote ordering for %s", action->uuid);
 492             apply_remote_ordering(action);
 493         }
 494     }
 495 }
 496 
 497 /*!
 498  * \internal
 499  * \brief Check whether a node is a failed remote node
 500  *
 501  * \param[in] node  Node to check
 502  *
 503  * \return true if \p node is a failed remote node, false otherwise
 504  */
 505 bool
 506 pcmk__is_failed_remote_node(const pcmk_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
 507 {
 508     return pe__is_remote_node(node) && (node->details->remote_rsc != NULL)
 509            && (get_remote_node_state(node) == remote_state_failed);
 510 }
 511 
 512 /*!
 513  * \internal
 514  * \brief Check whether a given resource corresponds to a given node as guest
 515  *
 516  * \param[in] rsc   Resource to check
 517  * \param[in] node  Node to check
 518  *
 519  * \return true if \p node is a guest node and \p rsc is its containing
 520  *         resource, otherwise false
 521  */
 522 bool
 523 pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
 524                                const pcmk_node_t *node)
 525 {
 526     return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
 527             && (node->details->remote_rsc != NULL)
 528             && (node->details->remote_rsc->container == rsc);
 529 }
 530 
 531 /*!
 532  * \internal
 533  * \brief Get proper connection host that a remote action must be routed through
 534  *
 535  * A remote connection resource might be starting, stopping, or migrating in the
 536  * same transition that an action needs to be executed on its Pacemaker Remote
 537  * node. Determine the proper node that the remote action should be routed
 538  * through.
 539  *
 540  * \param[in] action  (Potentially remote) action to route
 541  *
 542  * \return Connection host that action should be routed through if remote,
 543  *         otherwise NULL
 544  */
 545 pcmk_node_t *
 546 pcmk__connection_host_for_action(const pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 547 {
 548     pcmk_node_t *began_on = NULL;
 549     pcmk_node_t *ended_on = NULL;
 550     bool partial_migration = false;
 551     const char *task = action->task;
 552 
 553     if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_none)
 554         || !pe__is_guest_or_remote_node(action->node)) {
 555         return NULL;
 556     }
 557 
 558     CRM_ASSERT(action->node->details->remote_rsc != NULL);
 559 
 560     began_on = pe__current_node(action->node->details->remote_rsc);
 561     ended_on = action->node->details->remote_rsc->allocated_to;
 562     if (action->node->details->remote_rsc
 563         && (action->node->details->remote_rsc->container == NULL)
 564         && action->node->details->remote_rsc->partial_migration_target) {
 565         partial_migration = true;
 566     }
 567 
 568     if (began_on == NULL) {
 569         crm_trace("Routing %s for %s through remote connection's "
 570                   "next node %s (starting)%s",
 571                   action->task, (action->rsc? action->rsc->id : "no resource"),
 572                   (ended_on? ended_on->details->uname : "none"),
 573                   partial_migration? " (partial migration)" : "");
 574         return ended_on;
 575     }
 576 
 577     if (ended_on == NULL) {
 578         crm_trace("Routing %s for %s through remote connection's "
 579                   "current node %s (stopping)%s",
 580                   action->task, (action->rsc? action->rsc->id : "no resource"),
 581                   (began_on? began_on->details->uname : "none"),
 582                   partial_migration? " (partial migration)" : "");
 583         return began_on;
 584     }
 585 
 586     if (pe__same_node(began_on, ended_on)) {
 587         crm_trace("Routing %s for %s through remote connection's "
 588                   "current node %s (not moving)%s",
 589                   action->task, (action->rsc? action->rsc->id : "no resource"),
 590                   (began_on? began_on->details->uname : "none"),
 591                   partial_migration? " (partial migration)" : "");
 592         return began_on;
 593     }
 594 
 595     /* If we get here, the remote connection is moving during this transition.
 596      * This means some actions for resources behind the connection will get
 597      * routed through the cluster node the connection resource is currently on,
 598      * and others are routed through the cluster node the connection will end up
 599      * on.
 600      */
 601 
 602     if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
 603         task = g_hash_table_lookup(action->meta, "notify_operation");
 604     }
 605 
 606     /*
 607      * Stop, demote, and migration actions must occur before the connection can
 608      * move (these actions are required before the remote resource can stop). In
 609      * this case, we know these actions have to be routed through the initial
 610      * cluster node the connection resource lived on before the move takes
 611      * place.
 612      *
 613      * The exception is a partial migration of a (non-guest) remote connection
 614      * resource; in that case, all actions (even these) will be ordered after
 615      * the connection's pseudo-start on the migration target, so the target is
 616      * the router node.
 617      */
 618     if (pcmk__strcase_any_of(task, PCMK_ACTION_CANCEL, PCMK_ACTION_STOP,
 619                              PCMK_ACTION_DEMOTE, PCMK_ACTION_MIGRATE_FROM,
 620                              PCMK_ACTION_MIGRATE_TO, NULL)
 621         && !partial_migration) {
 622         crm_trace("Routing %s for %s through remote connection's "
 623                   "current node %s (moving)%s",
 624                   action->task, (action->rsc? action->rsc->id : "no resource"),
 625                   (began_on? began_on->details->uname : "none"),
 626                   partial_migration? " (partial migration)" : "");
 627         return began_on;
 628     }
 629 
 630     /* Everything else (start, promote, monitor, probe, refresh,
 631      * clear failcount, delete, ...) must occur after the connection starts on
 632      * the node it is moving to.
 633      */
 634     crm_trace("Routing %s for %s through remote connection's "
 635               "next node %s (moving)%s",
 636               action->task, (action->rsc? action->rsc->id : "no resource"),
 637               (ended_on? ended_on->details->uname : "none"),
 638               partial_migration? " (partial migration)" : "");
 639     return ended_on;
 640 }
 641 
 642 /*!
 643  * \internal
 644  * \brief Replace remote connection's addr="#uname" with actual address
 645  *
 646  * REMOTE_CONTAINER_HACK: If a given resource is a remote connection resource
 647  * with its "addr" parameter set to "#uname", pull the actual value from the
 648  * parameters evaluated without a node (which was put there earlier in
 649  * pcmk__create_graph() when the bundle's expand() method was called).
 650  *
 651  * \param[in,out] rsc     Resource to check
 652  * \param[in,out] params  Resource parameters evaluated per node
 653  */
 654 void
 655 pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params)
     /* [previous][next][first][last][top][bottom][index][help] */
 656 {
 657     const char *remote_addr = g_hash_table_lookup(params,
 658                                                   XML_RSC_ATTR_REMOTE_RA_ADDR);
 659 
 660     if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
 661         GHashTable *base = pe_rsc_params(rsc, NULL, rsc->cluster);
 662 
 663         remote_addr = g_hash_table_lookup(base, XML_RSC_ATTR_REMOTE_RA_ADDR);
 664         if (remote_addr != NULL) {
 665             g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
 666                                 strdup(remote_addr));
 667         }
 668     }
 669 }
 670 
 671 /*!
 672  * \brief Add special bundle meta-attributes to XML
 673  *
 674  * If a given action will be executed on a guest node (including a bundle),
 675  * add the special bundle meta-attribute "container-attribute-target" and
 676  * environment variable "physical_host" as XML attributes (using meta-attribute
 677  * naming).
 678  *
 679  * \param[in,out] args_xml  XML to add attributes to
 680  * \param[in]     action    Action to check
 681  */
 682 void
 683 pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 684 {
 685     const pcmk_node_t *guest = action->node;
 686     const pcmk_node_t *host = NULL;
 687     enum action_tasks task;
 688 
 689     if (!pe__is_guest_node(guest)) {
 690         return;
 691     }
 692 
 693     task = text2task(action->task);
 694     if ((task == pcmk_action_notify) || (task == pcmk_action_notified)) {
 695         task = text2task(g_hash_table_lookup(action->meta, "notify_operation"));
 696     }
 697 
 698     switch (task) {
 699         case pcmk_action_stop:
 700         case pcmk_action_stopped:
 701         case pcmk_action_demote:
 702         case pcmk_action_demoted:
 703             // "Down" actions take place on guest's current host
 704             host = pe__current_node(guest->details->remote_rsc->container);
 705             break;
 706 
 707         case pcmk_action_start:
 708         case pcmk_action_started:
 709         case pcmk_action_monitor:
 710         case pcmk_action_promote:
 711         case pcmk_action_promoted:
 712             // "Up" actions take place on guest's next host
 713             host = guest->details->remote_rsc->container->allocated_to;
 714             break;
 715 
 716         default:
 717             break;
 718     }
 719 
 720     if (host != NULL) {
 721         hash2metafield((gpointer) XML_RSC_ATTR_TARGET,
 722                        (gpointer) g_hash_table_lookup(action->rsc->meta,
 723                                                       XML_RSC_ATTR_TARGET),
 724                        (gpointer) args_xml);
 725         hash2metafield((gpointer) PCMK__ENV_PHYSICAL_HOST,
 726                        (gpointer) host->details->uname,
 727                        (gpointer) args_xml);
 728     }
 729 }

/* [previous][next][first][last][top][bottom][index][help] */