root/lib/pacemaker/pcmk_sched_remote.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. state2text
  2. order_start_then_action
  3. order_action_then_stop
  4. get_remote_node_state
  5. apply_remote_ordering
  6. apply_container_ordering
  7. pcmk__order_remote_connection_actions
  8. pcmk__is_failed_remote_node
  9. pcmk__rsc_corresponds_to_guest
  10. pcmk__connection_host_for_action
  11. pcmk__substitute_remote_addr
  12. pcmk__add_guest_meta_to_xml

   1 /*
   2  * Copyright 2004-2024 the Pacemaker project contributors
   3  *
   4  * The version control history for this file may have further details.
   5  *
   6  * This source code is licensed under the GNU General Public License version 2
   7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
   8  */
   9 
  10 #include <crm_internal.h>
  11 
  12 #include <sys/param.h>
  13 
  14 #include <crm/crm.h>
  15 #include <crm/cib.h>
  16 #include <crm/common/xml.h>
  17 #include <crm/common/xml_internal.h>
  18 
  19 #include <glib.h>
  20 
  21 #include <crm/pengine/status.h>
  22 #include <pacemaker-internal.h>
  23 #include "libpacemaker_private.h"
  24 
  25 enum remote_connection_state {
  26     remote_state_unknown = 0,
  27     remote_state_alive = 1,
  28     remote_state_resting = 2,
  29     remote_state_failed = 3,
  30     remote_state_stopped = 4
  31 };
  32 
  33 static const char *
  34 state2text(enum remote_connection_state state)
     /* [previous][next][first][last][top][bottom][index][help] */
  35 {
  36     switch (state) {
  37         case remote_state_unknown:
  38             return "unknown";
  39         case remote_state_alive:
  40             return "alive";
  41         case remote_state_resting:
  42             return "resting";
  43         case remote_state_failed:
  44             return "failed";
  45         case remote_state_stopped:
  46             return "stopped";
  47     }
  48 
  49     return "impossible";
  50 }
  51 
  52 /* We always use pcmk__ar_guest_allowed with these convenience functions to
  53  * exempt internally generated constraints from the prohibition of user
  54  * constraints involving remote connection resources.
  55  *
  56  * The start ordering additionally uses pcmk__ar_unrunnable_first_blocks so that
  57  * the specified action is not runnable if the start is not runnable.
  58  */
  59 
  60 static inline void
  61 order_start_then_action(pcmk_resource_t *first_rsc, pcmk_action_t *then_action,
     /* [previous][next][first][last][top][bottom][index][help] */
  62                         uint32_t extra)
  63 {
  64     if ((first_rsc != NULL) && (then_action != NULL)) {
  65         pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
  66                            then_action->rsc, NULL, then_action,
  67                            pcmk__ar_guest_allowed
  68                            |pcmk__ar_unrunnable_first_blocks
  69                            |extra,
  70                            first_rsc->cluster);
  71     }
  72 }
  73 
  74 static inline void
  75 order_action_then_stop(pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
  76                        uint32_t extra)
  77 {
  78     if ((first_action != NULL) && (then_rsc != NULL)) {
  79         pcmk__new_ordering(first_action->rsc, NULL, first_action,
  80                            then_rsc, stop_key(then_rsc), NULL,
  81                            pcmk__ar_guest_allowed|extra, then_rsc->cluster);
  82     }
  83 }
  84 
  85 static enum remote_connection_state
  86 get_remote_node_state(const pcmk_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
  87 {
  88     const pcmk_resource_t *remote_rsc = NULL;
  89     const pcmk_node_t *cluster_node = NULL;
  90 
  91     pcmk__assert(node != NULL);
  92 
  93     remote_rsc = node->details->remote_rsc;
  94     pcmk__assert(remote_rsc != NULL);
  95 
  96     cluster_node = pcmk__current_node(remote_rsc);
  97 
  98     /* If the cluster node the remote connection resource resides on
  99      * is unclean or went offline, we can't process any operations
 100      * on that remote node until after it starts elsewhere.
 101      */
 102     if ((remote_rsc->next_role == pcmk_role_stopped)
 103         || (remote_rsc->allocated_to == NULL)) {
 104 
 105         // The connection resource is not going to run anywhere
 106 
 107         if ((cluster_node != NULL) && cluster_node->details->unclean) {
 108             /* The remote connection is failed because its resource is on a
 109              * failed node and can't be recovered elsewhere, so we must fence.
 110              */
 111             return remote_state_failed;
 112         }
 113 
 114         if (!pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)) {
 115             /* Connection resource is cleanly stopped */
 116             return remote_state_stopped;
 117         }
 118 
 119         /* Connection resource is failed */
 120 
 121         if ((remote_rsc->next_role == pcmk_role_stopped)
 122             && remote_rsc->remote_reconnect_ms
 123             && node->details->remote_was_fenced
 124             && !pe__shutdown_requested(node)) {
 125 
 126             /* We won't know whether the connection is recoverable until the
 127              * reconnect interval expires and we reattempt connection.
 128              */
 129             return remote_state_unknown;
 130         }
 131 
 132         /* The remote connection is in a failed state. If there are any
 133          * resources known to be active on it (stop) or in an unknown state
 134          * (probe), we must assume the worst and fence it.
 135          */
 136         return remote_state_failed;
 137 
 138     } else if (cluster_node == NULL) {
 139         /* Connection is recoverable but not currently running anywhere, so see
 140          * if we can recover it first
 141          */
 142         return remote_state_unknown;
 143 
 144     } else if (cluster_node->details->unclean
 145                || !(cluster_node->details->online)) {
 146         // Connection is running on a dead node, see if we can recover it first
 147         return remote_state_resting;
 148 
 149     } else if (pcmk__list_of_multiple(remote_rsc->running_on)
 150                && (remote_rsc->partial_migration_source != NULL)
 151                && (remote_rsc->partial_migration_target != NULL)) {
 152         /* We're in the middle of migrating a connection resource, so wait until
 153          * after the migration completes before performing any actions.
 154          */
 155         return remote_state_resting;
 156 
 157     }
 158     return remote_state_alive;
 159 }
 160 
 161 /*!
 162  * \internal
 163  * \brief Order actions on remote node relative to actions for the connection
 164  *
 165  * \param[in,out] action    An action scheduled on a Pacemaker Remote node
 166  */
 167 static void
 168 apply_remote_ordering(pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 169 {
 170     pcmk_resource_t *remote_rsc = NULL;
 171     enum action_tasks task = pcmk_parse_action(action->task);
 172     enum remote_connection_state state = get_remote_node_state(action->node);
 173 
 174     uint32_t order_opts = pcmk__ar_none;
 175 
 176     if (action->rsc == NULL) {
 177         return;
 178     }
 179 
 180     pcmk__assert(pcmk__is_pacemaker_remote_node(action->node));
 181 
 182     remote_rsc = action->node->details->remote_rsc;
 183     pcmk__assert(remote_rsc != NULL);
 184 
 185     crm_trace("Order %s action %s relative to %s%s (state: %s)",
 186               action->task, action->uuid,
 187               pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
 188               remote_rsc->id, state2text(state));
 189 
 190     if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
 191                              PCMK_ACTION_MIGRATE_FROM, NULL)) {
 192         /* Migration ops map to pcmk_action_unspecified, but we need to apply
 193          * the same ordering as for stop or demote (see get_router_node()).
 194          */
 195         task = pcmk_action_stop;
 196     }
 197 
 198     switch (task) {
 199         case pcmk_action_start:
 200         case pcmk_action_promote:
 201             order_opts = pcmk__ar_none;
 202 
 203             if (state == remote_state_failed) {
 204                 /* Force recovery, by making this action required */
 205                 pcmk__set_relation_flags(order_opts,
 206                                          pcmk__ar_first_implies_then);
 207             }
 208 
 209             /* Ensure connection is up before running this action */
 210             order_start_then_action(remote_rsc, action, order_opts);
 211             break;
 212 
 213         case pcmk_action_stop:
 214             if (state == remote_state_alive) {
 215                 order_action_then_stop(action, remote_rsc,
 216                                        pcmk__ar_then_implies_first);
 217 
 218             } else if (state == remote_state_failed) {
 219                 /* The resource is active on the node, but since we don't have a
 220                  * valid connection, the only way to stop the resource is by
 221                  * fencing the node. There is no need to order the stop relative
 222                  * to the remote connection, since the stop will become implied
 223                  * by the fencing.
 224                  */
 225                 pe_fence_node(remote_rsc->cluster, action->node,
 226                               "resources are active but "
 227                               "connection is unrecoverable",
 228                               FALSE);
 229 
 230             } else if (remote_rsc->next_role == pcmk_role_stopped) {
 231                 /* State must be remote_state_unknown or remote_state_stopped.
 232                  * Since the connection is not coming back up in this
 233                  * transition, stop this resource first.
 234                  */
 235                 order_action_then_stop(action, remote_rsc,
 236                                        pcmk__ar_then_implies_first);
 237 
 238             } else {
 239                 /* The connection is going to be started somewhere else, so
 240                  * stop this resource after that completes.
 241                  */
 242                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
 243             }
 244             break;
 245 
 246         case pcmk_action_demote:
 247             /* Only order this demote relative to the connection start if the
 248              * connection isn't being torn down. Otherwise, the demote would be
 249              * blocked because the connection start would not be allowed.
 250              */
 251             if ((state == remote_state_resting)
 252                 || (state == remote_state_unknown)) {
 253 
 254                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
 255             } /* Otherwise we can rely on the stop ordering */
 256             break;
 257 
 258         default:
 259             /* Wait for the connection resource to be up */
 260             if (pcmk__action_is_recurring(action)) {
 261                 /* In case we ever get the recovery logic wrong, force
 262                  * recurring monitors to be restarted, even if just
 263                  * the connection was re-established
 264                  */
 265                 order_start_then_action(remote_rsc, action,
 266                                         pcmk__ar_first_implies_then);
 267 
 268             } else {
 269                 pcmk_node_t *cluster_node = pcmk__current_node(remote_rsc);
 270 
 271                 if ((task == pcmk_action_monitor) && (state == remote_state_failed)) {
 272                     /* We would only be here if we do not know the state of the
 273                      * resource on the remote node. Since we have no way to find
 274                      * out, it is necessary to fence the node.
 275                      */
 276                     pe_fence_node(remote_rsc->cluster, action->node,
 277                                   "resources are in unknown state "
 278                                   "and connection is unrecoverable", FALSE);
 279                 }
 280 
 281                 if ((cluster_node != NULL) && (state == remote_state_stopped)) {
 282                     /* The connection is currently up, but is going down
 283                      * permanently. Make sure we check services are actually
 284                      * stopped _before_ we let the connection get closed.
 285                      */
 286                     order_action_then_stop(action, remote_rsc,
 287                                            pcmk__ar_unrunnable_first_blocks);
 288 
 289                 } else {
 290                     order_start_then_action(remote_rsc, action, pcmk__ar_none);
 291                 }
 292             }
 293             break;
 294     }
 295 }
 296 
 297 static void
 298 apply_container_ordering(pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 299 {
 300     /* VMs are also classified as containers for these purposes... in
 301      * that they both involve a 'thing' running on a real or remote
 302      * cluster node.
 303      *
 304      * This allows us to be smarter about the type and extent of
 305      * recovery actions required in various scenarios
 306      */
 307     pcmk_resource_t *remote_rsc = NULL;
 308     pcmk_resource_t *container = NULL;
 309     enum action_tasks task = pcmk_parse_action(action->task);
 310 
 311     pcmk__assert(action->rsc != NULL);
 312     pcmk__assert(action->node != NULL);
 313     pcmk__assert(pcmk__is_pacemaker_remote_node(action->node));
 314 
 315     remote_rsc = action->node->details->remote_rsc;
 316     pcmk__assert(remote_rsc != NULL);
 317 
 318     container = remote_rsc->container;
 319     pcmk__assert(container != NULL);
 320 
 321     if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
 322         pe_fence_node(action->rsc->cluster, action->node, "container failed",
 323                       FALSE);
 324     }
 325 
 326     crm_trace("Order %s action %s relative to %s%s for %s%s",
 327               action->task, action->uuid,
 328               pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
 329               remote_rsc->id,
 330               pcmk_is_set(container->flags, pcmk_rsc_failed)? "failed " : "",
 331               container->id);
 332 
 333     if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
 334                              PCMK_ACTION_MIGRATE_FROM, NULL)) {
 335         /* Migration ops map to pcmk_action_unspecified, but we need to apply
 336          * the same ordering as for stop or demote (see get_router_node()).
 337          */
 338         task = pcmk_action_stop;
 339     }
 340 
 341     switch (task) {
 342         case pcmk_action_start:
 343         case pcmk_action_promote:
 344             // Force resource recovery if the container is recovered
 345             order_start_then_action(container, action,
 346                                     pcmk__ar_first_implies_then);
 347 
 348             // Wait for the connection resource to be up, too
 349             order_start_then_action(remote_rsc, action, pcmk__ar_none);
 350             break;
 351 
 352         case pcmk_action_stop:
 353         case pcmk_action_demote:
 354             if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
 355                 /* When the container representing a guest node fails, any stop
 356                  * or demote actions for resources running on the guest node
 357                  * are implied by the container stopping. This is similar to
 358                  * how fencing operations work for cluster nodes and remote
 359                  * nodes.
 360                  */
 361             } else {
 362                 /* Ensure the operation happens before the connection is brought
 363                  * down.
 364                  *
 365                  * If we really wanted to, we could order these after the
 366                  * connection start, IFF the container's current role was
 367                  * stopped (otherwise we re-introduce an ordering loop when the
 368                  * connection is restarting).
 369                  */
 370                 order_action_then_stop(action, remote_rsc, pcmk__ar_none);
 371             }
 372             break;
 373 
 374         default:
 375             /* Wait for the connection resource to be up */
 376             if (pcmk__action_is_recurring(action)) {
 377                 /* In case we ever get the recovery logic wrong, force
 378                  * recurring monitors to be restarted, even if just
 379                  * the connection was re-established
 380                  */
 381                 if (task != pcmk_action_unspecified) {
 382                     order_start_then_action(remote_rsc, action,
 383                                             pcmk__ar_first_implies_then);
 384                 }
 385             } else {
 386                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
 387             }
 388             break;
 389     }
 390 }
 391 
 392 /*!
 393  * \internal
 394  * \brief Order all relevant actions relative to remote connection actions
 395  *
 396  * \param[in,out] scheduler  Scheduler data
 397  */
 398 void
 399 pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
     /* [previous][next][first][last][top][bottom][index][help] */
 400 {
 401     if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
 402         return;
 403     }
 404 
 405     crm_trace("Creating remote connection orderings");
 406 
 407     for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
 408         pcmk_action_t *action = iter->data;
 409         pcmk_resource_t *remote = NULL;
 410 
 411         // We are only interested in resource actions
 412         if (action->rsc == NULL) {
 413             continue;
 414         }
 415 
 416         /* Special case: If we are clearing the failcount of an actual
 417          * remote connection resource, then make sure this happens before
 418          * any start of the resource in this transition.
 419          */
 420         if (action->rsc->is_remote_node &&
 421             pcmk__str_eq(action->task, PCMK_ACTION_CLEAR_FAILCOUNT,
 422                          pcmk__str_none)) {
 423 
 424             pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
 425                                pcmk__op_key(action->rsc->id, PCMK_ACTION_START,
 426                                             0),
 427                                NULL, pcmk__ar_ordered, scheduler);
 428 
 429             continue;
 430         }
 431 
 432         // We are only interested in actions assigned to a node
 433         if (action->node == NULL) {
 434             continue;
 435         }
 436 
 437         if (!pcmk__is_pacemaker_remote_node(action->node)) {
 438             continue;
 439         }
 440 
 441         /* We are only interested in real actions.
 442          *
 443          * @TODO This is probably wrong; pseudo-actions might be converted to
 444          * real actions and vice versa later in update_actions() at the end of
 445          * pcmk__apply_orderings().
 446          */
 447         if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
 448             continue;
 449         }
 450 
 451         remote = action->node->details->remote_rsc;
 452         if (remote == NULL) {
 453             // Orphaned
 454             continue;
 455         }
 456 
 457         /* Another special case: if a resource is moving to a Pacemaker Remote
 458          * node, order the stop on the original node after any start of the
 459          * remote connection. This ensures that if the connection fails to
 460          * start, we leave the resource running on the original node.
 461          */
 462         if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none)) {
 463             for (GList *item = action->rsc->actions; item != NULL;
 464                  item = item->next) {
 465                 pcmk_action_t *rsc_action = item->data;
 466 
 467                 if (!pcmk__same_node(rsc_action->node, action->node)
 468                     && pcmk__str_eq(rsc_action->task, PCMK_ACTION_STOP,
 469                                     pcmk__str_none)) {
 470                     pcmk__new_ordering(remote, start_key(remote), NULL,
 471                                        action->rsc, NULL, rsc_action,
 472                                        pcmk__ar_ordered, scheduler);
 473                 }
 474             }
 475         }
 476 
 477         /* The action occurs across a remote connection, so create
 478          * ordering constraints that guarantee the action occurs while the node
 479          * is active (after start, before stop ... things like that).
 480          *
 481          * This is somewhat brittle in that we need to make sure the results of
 482          * this ordering are compatible with the result of get_router_node().
 483          * It would probably be better to add PCMK__XA_ROUTER_NODE as part of
 484          * this logic rather than create_graph_action().
 485          */
 486         if (remote->container) {
 487             crm_trace("Container ordering for %s", action->uuid);
 488             apply_container_ordering(action);
 489 
 490         } else {
 491             crm_trace("Remote ordering for %s", action->uuid);
 492             apply_remote_ordering(action);
 493         }
 494     }
 495 }
 496 
 497 /*!
 498  * \internal
 499  * \brief Check whether a node is a failed remote node
 500  *
 501  * \param[in] node  Node to check
 502  *
 503  * \return true if \p node is a failed remote node, false otherwise
 504  */
 505 bool
 506 pcmk__is_failed_remote_node(const pcmk_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
 507 {
 508     return pcmk__is_remote_node(node) && (node->details->remote_rsc != NULL)
 509            && (get_remote_node_state(node) == remote_state_failed);
 510 }
 511 
 512 /*!
 513  * \internal
 514  * \brief Check whether a given resource corresponds to a given node as guest
 515  *
 516  * \param[in] rsc   Resource to check
 517  * \param[in] node  Node to check
 518  *
 519  * \return true if \p node is a guest node and \p rsc is its containing
 520  *         resource, otherwise false
 521  */
 522 bool
 523 pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
 524                                const pcmk_node_t *node)
 525 {
 526     return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
 527             && (node->details->remote_rsc != NULL)
 528             && (node->details->remote_rsc->container == rsc);
 529 }
 530 
 531 /*!
 532  * \internal
 533  * \brief Get proper connection host that a remote action must be routed through
 534  *
 535  * A remote connection resource might be starting, stopping, or migrating in the
 536  * same transition that an action needs to be executed on its Pacemaker Remote
 537  * node. Determine the proper node that the remote action should be routed
 538  * through.
 539  *
 540  * \param[in] action  (Potentially remote) action to route
 541  *
 542  * \return Connection host that action should be routed through if remote,
 543  *         otherwise NULL
 544  */
 545 pcmk_node_t *
 546 pcmk__connection_host_for_action(const pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 547 {
 548     pcmk_node_t *began_on = NULL;
 549     pcmk_node_t *ended_on = NULL;
 550     bool partial_migration = false;
 551     const char *task = action->task;
 552 
 553     if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_none)
 554         || !pcmk__is_pacemaker_remote_node(action->node)) {
 555         return NULL;
 556     }
 557 
 558     pcmk__assert(action->node->details->remote_rsc != NULL);
 559 
 560     began_on = pcmk__current_node(action->node->details->remote_rsc);
 561     ended_on = action->node->details->remote_rsc->allocated_to;
 562     if (action->node->details->remote_rsc
 563         && (action->node->details->remote_rsc->container == NULL)
 564         && action->node->details->remote_rsc->partial_migration_target) {
 565         partial_migration = true;
 566     }
 567 
 568     if (began_on == NULL) {
 569         crm_trace("Routing %s for %s through remote connection's "
 570                   "next node %s (starting)%s",
 571                   action->task, (action->rsc? action->rsc->id : "no resource"),
 572                   (ended_on? ended_on->details->uname : "none"),
 573                   partial_migration? " (partial migration)" : "");
 574         return ended_on;
 575     }
 576 
 577     if (ended_on == NULL) {
 578         crm_trace("Routing %s for %s through remote connection's "
 579                   "current node %s (stopping)%s",
 580                   action->task, (action->rsc? action->rsc->id : "no resource"),
 581                   (began_on? began_on->details->uname : "none"),
 582                   partial_migration? " (partial migration)" : "");
 583         return began_on;
 584     }
 585 
 586     if (pcmk__same_node(began_on, ended_on)) {
 587         crm_trace("Routing %s for %s through remote connection's "
 588                   "current node %s (not moving)%s",
 589                   action->task, (action->rsc? action->rsc->id : "no resource"),
 590                   (began_on? began_on->details->uname : "none"),
 591                   partial_migration? " (partial migration)" : "");
 592         return began_on;
 593     }
 594 
 595     /* If we get here, the remote connection is moving during this transition.
 596      * This means some actions for resources behind the connection will get
 597      * routed through the cluster node the connection resource is currently on,
 598      * and others are routed through the cluster node the connection will end up
 599      * on.
 600      */
 601 
 602     if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
 603         task = g_hash_table_lookup(action->meta, "notify_operation");
 604     }
 605 
 606     /*
 607      * Stop, demote, and migration actions must occur before the connection can
 608      * move (these actions are required before the remote resource can stop). In
 609      * this case, we know these actions have to be routed through the initial
 610      * cluster node the connection resource lived on before the move takes
 611      * place.
 612      *
 613      * The exception is a partial migration of a (non-guest) remote connection
 614      * resource; in that case, all actions (even these) will be ordered after
 615      * the connection's pseudo-start on the migration target, so the target is
 616      * the router node.
 617      */
 618     if (pcmk__strcase_any_of(task, PCMK_ACTION_CANCEL, PCMK_ACTION_STOP,
 619                              PCMK_ACTION_DEMOTE, PCMK_ACTION_MIGRATE_FROM,
 620                              PCMK_ACTION_MIGRATE_TO, NULL)
 621         && !partial_migration) {
 622         crm_trace("Routing %s for %s through remote connection's "
 623                   "current node %s (moving)%s",
 624                   action->task, (action->rsc? action->rsc->id : "no resource"),
 625                   (began_on? began_on->details->uname : "none"),
 626                   partial_migration? " (partial migration)" : "");
 627         return began_on;
 628     }
 629 
 630     /* Everything else (start, promote, monitor, probe, refresh,
 631      * clear failcount, delete, ...) must occur after the connection starts on
 632      * the node it is moving to.
 633      */
 634     crm_trace("Routing %s for %s through remote connection's "
 635               "next node %s (moving)%s",
 636               action->task, (action->rsc? action->rsc->id : "no resource"),
 637               (ended_on? ended_on->details->uname : "none"),
 638               partial_migration? " (partial migration)" : "");
 639     return ended_on;
 640 }
 641 
 642 /*!
 643  * \internal
 644  * \brief Replace remote connection's addr="#uname" with actual address
 645  *
 646  * REMOTE_CONTAINER_HACK: If a given resource is a remote connection resource
 647  * with its "addr" parameter set to "#uname", pull the actual value from the
 648  * parameters evaluated without a node (which was put there earlier in
 649  * pcmk__create_graph() when the bundle's expand() method was called).
 650  *
 651  * \param[in,out] rsc     Resource to check
 652  * \param[in,out] params  Resource parameters evaluated per node
 653  */
 654 void
 655 pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params)
     /* [previous][next][first][last][top][bottom][index][help] */
 656 {
 657     const char *remote_addr = g_hash_table_lookup(params, PCMK_REMOTE_RA_ADDR);
 658 
 659     if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
 660         GHashTable *base = pe_rsc_params(rsc, NULL, rsc->cluster);
 661 
 662         remote_addr = g_hash_table_lookup(base, PCMK_REMOTE_RA_ADDR);
 663         if (remote_addr != NULL) {
 664             pcmk__insert_dup(params, PCMK_REMOTE_RA_ADDR, remote_addr);
 665         }
 666     }
 667 }
 668 
 669 /*!
 670  * \brief Add special guest node meta-attributes to XML
 671  *
 672  * If a given action will be executed on a guest node, add the following as XML
 673  * attributes (using meta-attribute naming):
 674  * * The resource's \c PCMK_META_CONTAINER_ATTRIBUTE_TARGET meta-attribute
 675  *   (usually set only for bundles), as \c PCMK_META_CONTAINER_ATTRIBUTE_TARGET
 676  * * The guest's physical host (current host for "down" actions, next host for
 677  *   "up" actions), as \c PCMK__META_PHYSICAL_HOST
 678  *
 679  * If the guest node has no physical host, then don't add either attribute.
 680  *
 681  * \param[in,out] args_xml  XML to add attributes to
 682  * \param[in]     action    Action to check
 683  */
 684 void
 685 pcmk__add_guest_meta_to_xml(xmlNode *args_xml, const pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 686 {
 687     const pcmk_node_t *guest = action->node;
 688     const pcmk_node_t *host = NULL;
 689     enum action_tasks task;
 690 
 691     if (!pcmk__is_guest_or_bundle_node(guest)) {
 692         return;
 693     }
 694 
 695     task = pcmk_parse_action(action->task);
 696     if ((task == pcmk_action_notify) || (task == pcmk_action_notified)) {
 697         task = pcmk_parse_action(g_hash_table_lookup(action->meta,
 698                                                      "notify_operation"));
 699     }
 700 
 701     switch (task) {
 702         case pcmk_action_stop:
 703         case pcmk_action_stopped:
 704         case pcmk_action_demote:
 705         case pcmk_action_demoted:
 706             // "Down" actions take place on guest's current host
 707             host = pcmk__current_node(guest->details->remote_rsc->container);
 708             break;
 709 
 710         case pcmk_action_start:
 711         case pcmk_action_started:
 712         case pcmk_action_monitor:
 713         case pcmk_action_promote:
 714         case pcmk_action_promoted:
 715             // "Up" actions take place on guest's next host
 716             host = guest->details->remote_rsc->container->allocated_to;
 717             break;
 718 
 719         default:
 720             break;
 721     }
 722 
 723     if (host != NULL) {
 724         gpointer target =
 725             g_hash_table_lookup(action->rsc->meta,
 726                                 PCMK_META_CONTAINER_ATTRIBUTE_TARGET);
 727 
 728         hash2metafield((gpointer) PCMK_META_CONTAINER_ATTRIBUTE_TARGET,
 729                        target,
 730                        (gpointer) args_xml);
 731         hash2metafield((gpointer) PCMK__META_PHYSICAL_HOST,
 732                        (gpointer) host->details->uname,
 733                        (gpointer) args_xml);
 734     }
 735 }

/* [previous][next][first][last][top][bottom][index][help] */