root/lib/pacemaker/pcmk_sched_remote.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. state2text
  2. order_start_then_action
  3. order_action_then_stop
  4. get_remote_node_state
  5. apply_remote_ordering
  6. apply_launcher_ordering
  7. pcmk__order_remote_connection_actions
  8. pcmk__is_failed_remote_node
  9. pcmk__rsc_corresponds_to_guest
  10. pcmk__connection_host_for_action
  11. pcmk__substitute_remote_addr
  12. pcmk__add_guest_meta_to_xml

   1 /*
   2  * Copyright 2004-2024 the Pacemaker project contributors
   3  *
   4  * The version control history for this file may have further details.
   5  *
   6  * This source code is licensed under the GNU General Public License version 2
   7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
   8  */
   9 
  10 #include <crm_internal.h>
  11 
  12 #include <sys/param.h>
  13 
  14 #include <crm/crm.h>
  15 #include <crm/cib.h>
  16 #include <crm/common/xml.h>
  17 #include <crm/common/xml_internal.h>
  18 
  19 #include <glib.h>
  20 
  21 #include <crm/pengine/status.h>
  22 #include <pacemaker-internal.h>
  23 #include "libpacemaker_private.h"
  24 
  25 enum remote_connection_state {
  26     remote_state_unknown = 0,
  27     remote_state_alive = 1,
  28     remote_state_resting = 2,
  29     remote_state_failed = 3,
  30     remote_state_stopped = 4
  31 };
  32 
  33 static const char *
  34 state2text(enum remote_connection_state state)
     /* [previous][next][first][last][top][bottom][index][help] */
  35 {
  36     switch (state) {
  37         case remote_state_unknown:
  38             return "unknown";
  39         case remote_state_alive:
  40             return "alive";
  41         case remote_state_resting:
  42             return "resting";
  43         case remote_state_failed:
  44             return "failed";
  45         case remote_state_stopped:
  46             return "stopped";
  47     }
  48 
  49     return "impossible";
  50 }
  51 
  52 /* We always use pcmk__ar_guest_allowed with these convenience functions to
  53  * exempt internally generated constraints from the prohibition of user
  54  * constraints involving remote connection resources.
  55  *
  56  * The start ordering additionally uses pcmk__ar_unrunnable_first_blocks so that
  57  * the specified action is not runnable if the start is not runnable.
  58  */
  59 
  60 static inline void
  61 order_start_then_action(pcmk_resource_t *first_rsc, pcmk_action_t *then_action,
     /* [previous][next][first][last][top][bottom][index][help] */
  62                         uint32_t extra)
  63 {
  64     if ((first_rsc != NULL) && (then_action != NULL)) {
  65 
  66         pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
  67                            then_action->rsc, NULL, then_action,
  68                            pcmk__ar_guest_allowed
  69                            |pcmk__ar_unrunnable_first_blocks
  70                            |extra,
  71                            first_rsc->priv->scheduler);
  72     }
  73 }
  74 
  75 static inline void
  76 order_action_then_stop(pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
  77                        uint32_t extra)
  78 {
  79     if ((first_action != NULL) && (then_rsc != NULL)) {
  80         pcmk__new_ordering(first_action->rsc, NULL, first_action,
  81                            then_rsc, stop_key(then_rsc), NULL,
  82                            pcmk__ar_guest_allowed|extra,
  83                            then_rsc->priv->scheduler);
  84     }
  85 }
  86 
  87 static enum remote_connection_state
  88 get_remote_node_state(const pcmk_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
  89 {
  90     const pcmk_resource_t *remote_rsc = NULL;
  91     const pcmk_node_t *cluster_node = NULL;
  92 
  93     pcmk__assert(node != NULL);
  94 
  95     remote_rsc = node->priv->remote;
  96     pcmk__assert(remote_rsc != NULL);
  97 
  98     cluster_node = pcmk__current_node(remote_rsc);
  99 
 100     /* If the cluster node the remote connection resource resides on
 101      * is unclean or went offline, we can't process any operations
 102      * on that remote node until after it starts elsewhere.
 103      */
 104     if ((remote_rsc->priv->next_role == pcmk_role_stopped)
 105         || (remote_rsc->priv->assigned_node == NULL)) {
 106 
 107         // The connection resource is not going to run anywhere
 108 
 109         if ((cluster_node != NULL) && cluster_node->details->unclean) {
 110             /* The remote connection is failed because its resource is on a
 111              * failed node and can't be recovered elsewhere, so we must fence.
 112              */
 113             return remote_state_failed;
 114         }
 115 
 116         if (!pcmk_is_set(remote_rsc->flags, pcmk__rsc_failed)) {
 117             /* Connection resource is cleanly stopped */
 118             return remote_state_stopped;
 119         }
 120 
 121         /* Connection resource is failed */
 122 
 123         if ((remote_rsc->priv->next_role == pcmk_role_stopped)
 124             && (remote_rsc->priv->remote_reconnect_ms > 0U)
 125             && pcmk_is_set(node->priv->flags, pcmk__node_remote_fenced)
 126             && !pe__shutdown_requested(node)) {
 127 
 128             /* We won't know whether the connection is recoverable until the
 129              * reconnect interval expires and we reattempt connection.
 130              */
 131             return remote_state_unknown;
 132         }
 133 
 134         /* The remote connection is in a failed state. If there are any
 135          * resources known to be active on it (stop) or in an unknown state
 136          * (probe), we must assume the worst and fence it.
 137          */
 138         return remote_state_failed;
 139 
 140     } else if (cluster_node == NULL) {
 141         /* Connection is recoverable but not currently running anywhere, so see
 142          * if we can recover it first
 143          */
 144         return remote_state_unknown;
 145 
 146     } else if (cluster_node->details->unclean
 147                || !(cluster_node->details->online)) {
 148         // Connection is running on a dead node, see if we can recover it first
 149         return remote_state_resting;
 150 
 151     } else if (pcmk__list_of_multiple(remote_rsc->priv->active_nodes)
 152                && (remote_rsc->priv->partial_migration_source != NULL)
 153                && (remote_rsc->priv->partial_migration_target != NULL)) {
 154         /* We're in the middle of migrating a connection resource, so wait until
 155          * after the migration completes before performing any actions.
 156          */
 157         return remote_state_resting;
 158 
 159     }
 160     return remote_state_alive;
 161 }
 162 
 163 /*!
 164  * \internal
 165  * \brief Order actions on remote node relative to actions for the connection
 166  *
 167  * \param[in,out] action    An action scheduled on a Pacemaker Remote node
 168  */
 169 static void
 170 apply_remote_ordering(pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 171 {
 172     pcmk_resource_t *remote_rsc = NULL;
 173     enum pcmk__action_type task = pcmk__parse_action(action->task);
 174     enum remote_connection_state state = get_remote_node_state(action->node);
 175 
 176     uint32_t order_opts = pcmk__ar_none;
 177 
 178     if (action->rsc == NULL) {
 179         return;
 180     }
 181 
 182     pcmk__assert(pcmk__is_pacemaker_remote_node(action->node));
 183 
 184     remote_rsc = action->node->priv->remote;
 185     pcmk__assert(remote_rsc != NULL);
 186 
 187     crm_trace("Order %s action %s relative to %s%s (state: %s)",
 188               action->task, action->uuid,
 189               pcmk_is_set(remote_rsc->flags, pcmk__rsc_failed)? "failed " : "",
 190               remote_rsc->id, state2text(state));
 191 
 192     if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
 193                              PCMK_ACTION_MIGRATE_FROM, NULL)) {
 194         /* Migration ops map to pcmk__action_unspecified, but we need to apply
 195          * the same ordering as for stop or demote (see get_router_node()).
 196          */
 197         task = pcmk__action_stop;
 198     }
 199 
 200     switch (task) {
 201         case pcmk__action_start:
 202         case pcmk__action_promote:
 203             order_opts = pcmk__ar_none;
 204 
 205             if (state == remote_state_failed) {
 206                 /* Force recovery, by making this action required */
 207                 pcmk__set_relation_flags(order_opts,
 208                                          pcmk__ar_first_implies_then);
 209             }
 210 
 211             /* Ensure connection is up before running this action */
 212             order_start_then_action(remote_rsc, action, order_opts);
 213             break;
 214 
 215         case pcmk__action_stop:
 216             if (state == remote_state_alive) {
 217                 order_action_then_stop(action, remote_rsc,
 218                                        pcmk__ar_then_implies_first);
 219 
 220             } else if (state == remote_state_failed) {
 221                 /* The resource is active on the node, but since we don't have a
 222                  * valid connection, the only way to stop the resource is by
 223                  * fencing the node. There is no need to order the stop relative
 224                  * to the remote connection, since the stop will become implied
 225                  * by the fencing.
 226                  */
 227                 pe_fence_node(remote_rsc->priv->scheduler, action->node,
 228                               "resources are active but "
 229                               "connection is unrecoverable",
 230                               FALSE);
 231 
 232             } else if (remote_rsc->priv->next_role == pcmk_role_stopped) {
 233                 /* State must be remote_state_unknown or remote_state_stopped.
 234                  * Since the connection is not coming back up in this
 235                  * transition, stop this resource first.
 236                  */
 237                 order_action_then_stop(action, remote_rsc,
 238                                        pcmk__ar_then_implies_first);
 239 
 240             } else {
 241                 /* The connection is going to be started somewhere else, so
 242                  * stop this resource after that completes.
 243                  */
 244                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
 245             }
 246             break;
 247 
 248         case pcmk__action_demote:
 249             /* Only order this demote relative to the connection start if the
 250              * connection isn't being torn down. Otherwise, the demote would be
 251              * blocked because the connection start would not be allowed.
 252              */
 253             if ((state == remote_state_resting)
 254                 || (state == remote_state_unknown)) {
 255 
 256                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
 257             } /* Otherwise we can rely on the stop ordering */
 258             break;
 259 
 260         default:
 261             /* Wait for the connection resource to be up */
 262             if (pcmk__action_is_recurring(action)) {
 263                 /* In case we ever get the recovery logic wrong, force
 264                  * recurring monitors to be restarted, even if just
 265                  * the connection was re-established
 266                  */
 267                 order_start_then_action(remote_rsc, action,
 268                                         pcmk__ar_first_implies_then);
 269 
 270             } else {
 271                 pcmk_node_t *cluster_node = pcmk__current_node(remote_rsc);
 272 
 273                 if ((task == pcmk__action_monitor)
 274                     && (state == remote_state_failed)) {
 275                     /* We would only be here if we do not know the state of the
 276                      * resource on the remote node. Since we have no way to find
 277                      * out, it is necessary to fence the node.
 278                      */
 279                     pe_fence_node(remote_rsc->priv->scheduler, action->node,
 280                                   "resources are in unknown state "
 281                                   "and connection is unrecoverable", FALSE);
 282                 }
 283 
 284                 if ((cluster_node != NULL) && (state == remote_state_stopped)) {
 285                     /* The connection is currently up, but is going down
 286                      * permanently. Make sure we check services are actually
 287                      * stopped _before_ we let the connection get closed.
 288                      */
 289                     order_action_then_stop(action, remote_rsc,
 290                                            pcmk__ar_unrunnable_first_blocks);
 291 
 292                 } else {
 293                     order_start_then_action(remote_rsc, action, pcmk__ar_none);
 294                 }
 295             }
 296             break;
 297     }
 298 }
 299 
 300 static void
 301 apply_launcher_ordering(pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 302 {
 303     pcmk_resource_t *remote_rsc = NULL;
 304     pcmk_resource_t *launcher = NULL;
 305     enum pcmk__action_type task = pcmk__parse_action(action->task);
 306 
 307     pcmk__assert(action->rsc != NULL);
 308     pcmk__assert(pcmk__is_pacemaker_remote_node(action->node));
 309 
 310     remote_rsc = action->node->priv->remote;
 311     pcmk__assert(remote_rsc != NULL);
 312 
 313     launcher = remote_rsc->priv->launcher;
 314     pcmk__assert(launcher != NULL);
 315 
 316     if (pcmk_is_set(launcher->flags, pcmk__rsc_failed)) {
 317         pe_fence_node(action->rsc->priv->scheduler, action->node,
 318                       "container failed", FALSE);
 319     }
 320 
 321     crm_trace("Order %s action %s relative to %s%s for %s%s",
 322               action->task, action->uuid,
 323               pcmk_is_set(remote_rsc->flags, pcmk__rsc_failed)? "failed " : "",
 324               remote_rsc->id,
 325               pcmk_is_set(launcher->flags, pcmk__rsc_failed)? "failed " : "",
 326               launcher->id);
 327 
 328     if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
 329                              PCMK_ACTION_MIGRATE_FROM, NULL)) {
 330         /* Migration ops map to pcmk__action_unspecified, but we need to apply
 331          * the same ordering as for stop or demote (see get_router_node()).
 332          */
 333         task = pcmk__action_stop;
 334     }
 335 
 336     switch (task) {
 337         case pcmk__action_start:
 338         case pcmk__action_promote:
 339             // Force resource recovery if the launcher is recovered
 340             order_start_then_action(launcher, action,
 341                                     pcmk__ar_first_implies_then);
 342 
 343             // Wait for the connection resource to be up, too
 344             order_start_then_action(remote_rsc, action, pcmk__ar_none);
 345             break;
 346 
 347         case pcmk__action_stop:
 348         case pcmk__action_demote:
 349             if (pcmk_is_set(launcher->flags, pcmk__rsc_failed)) {
 350                 /* When the launcher representing a guest node fails, any stop
 351                  * or demote actions for resources running on the guest node
 352                  * are implied by the launcher stopping. This is similar to
 353                  * how fencing operations work for cluster nodes and remote
 354                  * nodes.
 355                  */
 356             } else {
 357                 /* Ensure the operation happens before the connection is brought
 358                  * down.
 359                  *
 360                  * If we really wanted to, we could order these after the
 361                  * connection start, IFF the launcher's current role was
 362                  * stopped (otherwise we re-introduce an ordering loop when the
 363                  * connection is restarting).
 364                  */
 365                 order_action_then_stop(action, remote_rsc, pcmk__ar_none);
 366             }
 367             break;
 368 
 369         default:
 370             /* Wait for the connection resource to be up */
 371             if (pcmk__action_is_recurring(action)) {
 372                 /* In case we ever get the recovery logic wrong, force
 373                  * recurring monitors to be restarted, even if just
 374                  * the connection was re-established
 375                  */
 376                 if (task != pcmk__action_unspecified) {
 377                     order_start_then_action(remote_rsc, action,
 378                                             pcmk__ar_first_implies_then);
 379                 }
 380             } else {
 381                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
 382             }
 383             break;
 384     }
 385 }
 386 
 387 /*!
 388  * \internal
 389  * \brief Order all relevant actions relative to remote connection actions
 390  *
 391  * \param[in,out] scheduler  Scheduler data
 392  */
 393 void
 394 pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
     /* [previous][next][first][last][top][bottom][index][help] */
 395 {
 396     if (!pcmk_is_set(scheduler->flags, pcmk__sched_have_remote_nodes)) {
 397         return;
 398     }
 399 
 400     crm_trace("Creating remote connection orderings");
 401 
 402     for (GList *iter = scheduler->priv->actions;
 403          iter != NULL; iter = iter->next) {
 404         pcmk_action_t *action = iter->data;
 405         pcmk_resource_t *remote = NULL;
 406 
 407         // We are only interested in resource actions
 408         if (action->rsc == NULL) {
 409             continue;
 410         }
 411 
 412         /* Special case: If we are clearing the failcount of an actual
 413          * remote connection resource, then make sure this happens before
 414          * any start of the resource in this transition.
 415          */
 416         if (pcmk_is_set(action->rsc->flags, pcmk__rsc_is_remote_connection)
 417             && pcmk__str_eq(action->task, PCMK_ACTION_CLEAR_FAILCOUNT,
 418                             pcmk__str_none)) {
 419 
 420             pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
 421                                pcmk__op_key(action->rsc->id, PCMK_ACTION_START,
 422                                             0),
 423                                NULL, pcmk__ar_ordered, scheduler);
 424 
 425             continue;
 426         }
 427 
 428         // We are only interested in actions assigned to a node
 429         if (action->node == NULL) {
 430             continue;
 431         }
 432 
 433         if (!pcmk__is_pacemaker_remote_node(action->node)) {
 434             continue;
 435         }
 436 
 437         /* We are only interested in real actions.
 438          *
 439          * @TODO This is probably wrong; pseudo-actions might be converted to
 440          * real actions and vice versa later in update_actions() at the end of
 441          * pcmk__apply_orderings().
 442          */
 443         if (pcmk_is_set(action->flags, pcmk__action_pseudo)) {
 444             continue;
 445         }
 446 
 447         remote = action->node->priv->remote;
 448         if (remote == NULL) {
 449             // Orphaned
 450             continue;
 451         }
 452 
 453         /* Another special case: if a resource is moving to a Pacemaker Remote
 454          * node, order the stop on the original node after any start of the
 455          * remote connection. This ensures that if the connection fails to
 456          * start, we leave the resource running on the original node.
 457          */
 458         if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none)) {
 459             for (GList *item = action->rsc->priv->actions; item != NULL;
 460                  item = item->next) {
 461                 pcmk_action_t *rsc_action = item->data;
 462 
 463                 if (!pcmk__same_node(rsc_action->node, action->node)
 464                     && pcmk__str_eq(rsc_action->task, PCMK_ACTION_STOP,
 465                                     pcmk__str_none)) {
 466                     pcmk__new_ordering(remote, start_key(remote), NULL,
 467                                        action->rsc, NULL, rsc_action,
 468                                        pcmk__ar_ordered, scheduler);
 469                 }
 470             }
 471         }
 472 
 473         /* The action occurs across a remote connection, so create
 474          * ordering constraints that guarantee the action occurs while the node
 475          * is active (after start, before stop ... things like that).
 476          *
 477          * This is somewhat brittle in that we need to make sure the results of
 478          * this ordering are compatible with the result of get_router_node().
 479          * It would probably be better to add PCMK__XA_ROUTER_NODE as part of
 480          * this logic rather than create_graph_action().
 481          */
 482         if (remote->priv->launcher != NULL) {
 483             crm_trace("Container ordering for %s", action->uuid);
 484             apply_launcher_ordering(action);
 485 
 486         } else {
 487             crm_trace("Remote ordering for %s", action->uuid);
 488             apply_remote_ordering(action);
 489         }
 490     }
 491 }
 492 
 493 /*!
 494  * \internal
 495  * \brief Check whether a node is a failed remote node
 496  *
 497  * \param[in] node  Node to check
 498  *
 499  * \return true if \p node is a failed remote node, false otherwise
 500  */
 501 bool
 502 pcmk__is_failed_remote_node(const pcmk_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
 503 {
 504     return pcmk__is_remote_node(node) && (node->priv->remote != NULL)
 505            && (get_remote_node_state(node) == remote_state_failed);
 506 }
 507 
 508 /*!
 509  * \internal
 510  * \brief Check whether a given resource corresponds to a given node as guest
 511  *
 512  * \param[in] rsc   Resource to check
 513  * \param[in] node  Node to check
 514  *
 515  * \return true if \p node is a guest node and \p rsc is its containing
 516  *         resource, otherwise false
 517  */
 518 bool
 519 pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
 520                                const pcmk_node_t *node)
 521 {
 522     return (rsc != NULL) && (rsc->priv->launched != NULL) && (node != NULL)
 523             && (node->priv->remote != NULL)
 524             && (node->priv->remote->priv->launcher == rsc);
 525 }
 526 
 527 /*!
 528  * \internal
 529  * \brief Get proper connection host that a remote action must be routed through
 530  *
 531  * A remote connection resource might be starting, stopping, or migrating in the
 532  * same transition that an action needs to be executed on its Pacemaker Remote
 533  * node. Determine the proper node that the remote action should be routed
 534  * through.
 535  *
 536  * \param[in] action  (Potentially remote) action to route
 537  *
 538  * \return Connection host that action should be routed through if remote,
 539  *         otherwise NULL
 540  */
 541 pcmk_node_t *
 542 pcmk__connection_host_for_action(const pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 543 {
 544     pcmk_node_t *began_on = NULL;
 545     pcmk_node_t *ended_on = NULL;
 546     bool partial_migration = false;
 547     const char *task = action->task;
 548     pcmk_resource_t *remote = NULL;
 549 
 550     if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_none)
 551         || !pcmk__is_pacemaker_remote_node(action->node)) {
 552         return NULL;
 553     }
 554 
 555     remote = action->node->priv->remote;
 556     pcmk__assert(remote != NULL);
 557 
 558     began_on = pcmk__current_node(remote);
 559     ended_on = remote->priv->assigned_node;
 560     if ((remote->priv->launcher == NULL)
 561         && (remote->priv->partial_migration_target != NULL)) {
 562         partial_migration = true;
 563     }
 564 
 565     if (began_on == NULL) {
 566         crm_trace("Routing %s for %s through remote connection's "
 567                   "next node %s (starting)%s",
 568                   action->task, (action->rsc? action->rsc->id : "no resource"),
 569                   (ended_on? ended_on->priv->name : "none"),
 570                   partial_migration? " (partial migration)" : "");
 571         return ended_on;
 572     }
 573 
 574     if (ended_on == NULL) {
 575         crm_trace("Routing %s for %s through remote connection's "
 576                   "current node %s (stopping)%s",
 577                   action->task, (action->rsc? action->rsc->id : "no resource"),
 578                   (began_on? began_on->priv->name : "none"),
 579                   partial_migration? " (partial migration)" : "");
 580         return began_on;
 581     }
 582 
 583     if (pcmk__same_node(began_on, ended_on)) {
 584         crm_trace("Routing %s for %s through remote connection's "
 585                   "current node %s (not moving)%s",
 586                   action->task, (action->rsc? action->rsc->id : "no resource"),
 587                   (began_on? began_on->priv->name : "none"),
 588                   partial_migration? " (partial migration)" : "");
 589         return began_on;
 590     }
 591 
 592     /* If we get here, the remote connection is moving during this transition.
 593      * This means some actions for resources behind the connection will get
 594      * routed through the cluster node the connection resource is currently on,
 595      * and others are routed through the cluster node the connection will end up
 596      * on.
 597      */
 598 
 599     if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
 600         task = g_hash_table_lookup(action->meta, "notify_operation");
 601     }
 602 
 603     /*
 604      * Stop, demote, and migration actions must occur before the connection can
 605      * move (these actions are required before the remote resource can stop). In
 606      * this case, we know these actions have to be routed through the initial
 607      * cluster node the connection resource lived on before the move takes
 608      * place.
 609      *
 610      * The exception is a partial migration of a (non-guest) remote connection
 611      * resource; in that case, all actions (even these) will be ordered after
 612      * the connection's pseudo-start on the migration target, so the target is
 613      * the router node.
 614      */
 615     if (pcmk__strcase_any_of(task, PCMK_ACTION_CANCEL, PCMK_ACTION_STOP,
 616                              PCMK_ACTION_DEMOTE, PCMK_ACTION_MIGRATE_FROM,
 617                              PCMK_ACTION_MIGRATE_TO, NULL)
 618         && !partial_migration) {
 619         crm_trace("Routing %s for %s through remote connection's "
 620                   "current node %s (moving)%s",
 621                   action->task, (action->rsc? action->rsc->id : "no resource"),
 622                   (began_on? began_on->priv->name : "none"),
 623                   partial_migration? " (partial migration)" : "");
 624         return began_on;
 625     }
 626 
 627     /* Everything else (start, promote, monitor, probe, refresh,
 628      * clear failcount, delete, ...) must occur after the connection starts on
 629      * the node it is moving to.
 630      */
 631     crm_trace("Routing %s for %s through remote connection's "
 632               "next node %s (moving)%s",
 633               action->task, (action->rsc? action->rsc->id : "no resource"),
 634               (ended_on? ended_on->priv->name : "none"),
 635               partial_migration? " (partial migration)" : "");
 636     return ended_on;
 637 }
 638 
 639 /*!
 640  * \internal
 641  * \brief Replace remote connection's addr="#uname" with actual address
 642  *
 643  * REMOTE_CONTAINER_HACK: If a given resource is a remote connection resource
 644  * with its "addr" parameter set to "#uname", pull the actual value from the
 645  * parameters evaluated without a node (which was put there earlier in
 646  * pcmk__create_graph() when the bundle's expand() method was called).
 647  *
 648  * \param[in,out] rsc     Resource to check
 649  * \param[in,out] params  Resource parameters evaluated per node
 650  */
 651 void
 652 pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params)
     /* [previous][next][first][last][top][bottom][index][help] */
 653 {
 654     const char *remote_addr = g_hash_table_lookup(params, PCMK_REMOTE_RA_ADDR);
 655 
 656     if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
 657         GHashTable *base = pe_rsc_params(rsc, NULL, rsc->priv->scheduler);
 658 
 659         remote_addr = g_hash_table_lookup(base, PCMK_REMOTE_RA_ADDR);
 660         if (remote_addr != NULL) {
 661             pcmk__insert_dup(params, PCMK_REMOTE_RA_ADDR, remote_addr);
 662         }
 663     }
 664 }
 665 
 666 /*!
 667  * \brief Add special guest node meta-attributes to XML
 668  *
 669  * If a given action will be executed on a guest node, add the following as XML
 670  * attributes (using meta-attribute naming):
 671  * * The resource's \c PCMK_META_CONTAINER_ATTRIBUTE_TARGET meta-attribute
 672  *   (usually set only for bundles), as \c PCMK_META_CONTAINER_ATTRIBUTE_TARGET
 673  * * The guest's physical host (current host for "down" actions, next host for
 674  *   "up" actions), as \c PCMK__META_PHYSICAL_HOST
 675  *
 676  * If the guest node has no physical host, then don't add either attribute.
 677  *
 678  * \param[in,out] args_xml  XML to add attributes to
 679  * \param[in]     action    Action to check
 680  */
 681 void
 682 pcmk__add_guest_meta_to_xml(xmlNode *args_xml, const pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 683 {
 684     const pcmk_node_t *guest = action->node;
 685     const pcmk_node_t *host = NULL;
 686     const pcmk_resource_t *launcher = NULL;
 687     enum pcmk__action_type task;
 688 
 689     if (!pcmk__is_guest_or_bundle_node(guest)) {
 690         return;
 691     }
 692     launcher = guest->priv->remote->priv->launcher;
 693 
 694     task = pcmk__parse_action(action->task);
 695     if ((task == pcmk__action_notify) || (task == pcmk__action_notified)) {
 696         task = pcmk__parse_action(g_hash_table_lookup(action->meta,
 697                                                       "notify_operation"));
 698     }
 699 
 700     switch (task) {
 701         case pcmk__action_stop:
 702         case pcmk__action_stopped:
 703         case pcmk__action_demote:
 704         case pcmk__action_demoted:
 705             // "Down" actions take place on guest's current host
 706             host = pcmk__current_node(launcher);
 707             break;
 708 
 709         case pcmk__action_start:
 710         case pcmk__action_started:
 711         case pcmk__action_monitor:
 712         case pcmk__action_promote:
 713         case pcmk__action_promoted:
 714             // "Up" actions take place on guest's next host
 715             host = launcher->priv->assigned_node;
 716             break;
 717 
 718         default:
 719             break;
 720     }
 721 
 722     if (host != NULL) {
 723         gpointer target =
 724             g_hash_table_lookup(action->rsc->priv->meta,
 725                                 PCMK_META_CONTAINER_ATTRIBUTE_TARGET);
 726 
 727         hash2metafield((gpointer) PCMK_META_CONTAINER_ATTRIBUTE_TARGET,
 728                        target,
 729                        (gpointer) args_xml);
 730         hash2metafield((gpointer) PCMK__META_PHYSICAL_HOST,
 731                        (gpointer) host->priv->name,
 732                        (gpointer) args_xml);
 733     }
 734 }

/* [previous][next][first][last][top][bottom][index][help] */