root/lib/pacemaker/pcmk_sched_remote.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. state2text
  2. order_start_then_action
  3. order_action_then_stop
  4. get_remote_node_state
  5. is_recurring_action
  6. apply_remote_ordering
  7. apply_container_ordering
  8. pcmk__order_remote_connection_actions
  9. pcmk__is_failed_remote_node
  10. pcmk__rsc_corresponds_to_guest
  11. pcmk__connection_host_for_action
  12. pcmk__substitute_remote_addr
  13. pcmk__add_bundle_meta_to_xml

   1 /*
   2  * Copyright 2004-2022 the Pacemaker project contributors
   3  *
   4  * The version control history for this file may have further details.
   5  *
   6  * This source code is licensed under the GNU General Public License version 2
   7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
   8  */
   9 
  10 #include <crm_internal.h>
  11 
  12 #include <sys/param.h>
  13 
  14 #include <crm/crm.h>
  15 #include <crm/cib.h>
  16 #include <crm/msg_xml.h>
  17 #include <crm/common/xml.h>
  18 #include <crm/common/xml_internal.h>
  19 
  20 #include <glib.h>
  21 
  22 #include <crm/pengine/status.h>
  23 #include <pacemaker-internal.h>
  24 #include "libpacemaker_private.h"
  25 
  26 enum remote_connection_state {
  27     remote_state_unknown = 0,
  28     remote_state_alive = 1,
  29     remote_state_resting = 2,
  30     remote_state_failed = 3,
  31     remote_state_stopped = 4
  32 };
  33 
  34 static const char *
  35 state2text(enum remote_connection_state state)
     /* [previous][next][first][last][top][bottom][index][help] */
  36 {
  37     switch (state) {
  38         case remote_state_unknown:
  39             return "unknown";
  40         case remote_state_alive:
  41             return "alive";
  42         case remote_state_resting:
  43             return "resting";
  44         case remote_state_failed:
  45             return "failed";
  46         case remote_state_stopped:
  47             return "stopped";
  48     }
  49 
  50     return "impossible";
  51 }
  52 
  53 /* We always use pe_order_preserve with these convenience functions to exempt
  54  * internally generated constraints from the prohibition of user constraints
  55  * involving remote connection resources.
  56  *
  57  * The start ordering additionally uses pe_order_runnable_left so that the
  58  * specified action is not runnable if the start is not runnable.
  59  */
  60 
  61 static inline void
  62 order_start_then_action(pe_resource_t *lh_rsc, pe_action_t *rh_action,
     /* [previous][next][first][last][top][bottom][index][help] */
  63                         enum pe_ordering extra, pe_working_set_t *data_set)
  64 {
  65     if ((lh_rsc != NULL) && (rh_action != NULL) && (data_set != NULL)) {
  66         pcmk__new_ordering(lh_rsc, start_key(lh_rsc), NULL,
  67                            rh_action->rsc, NULL, rh_action,
  68                            pe_order_preserve|pe_order_runnable_left|extra,
  69                            data_set);
  70     }
  71 }
  72 
  73 static inline void
  74 order_action_then_stop(pe_action_t *lh_action, pe_resource_t *rh_rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
  75                        enum pe_ordering extra, pe_working_set_t *data_set)
  76 {
  77     if ((lh_action != NULL) && (rh_rsc != NULL) && (data_set != NULL)) {
  78         pcmk__new_ordering(lh_action->rsc, NULL, lh_action,
  79                            rh_rsc, stop_key(rh_rsc), NULL,
  80                            pe_order_preserve|extra, data_set);
  81     }
  82 }
  83 
  84 static enum remote_connection_state
  85 get_remote_node_state(pe_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
  86 {
  87     pe_resource_t *remote_rsc = NULL;
  88     pe_node_t *cluster_node = NULL;
  89 
  90     CRM_ASSERT(node != NULL);
  91 
  92     remote_rsc = node->details->remote_rsc;
  93     CRM_ASSERT(remote_rsc != NULL);
  94 
  95     cluster_node = pe__current_node(remote_rsc);
  96 
  97     /* If the cluster node the remote connection resource resides on
  98      * is unclean or went offline, we can't process any operations
  99      * on that remote node until after it starts elsewhere.
 100      */
 101     if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
 102         || (remote_rsc->allocated_to == NULL)) {
 103 
 104         // The connection resource is not going to run anywhere
 105 
 106         if ((cluster_node != NULL) && cluster_node->details->unclean) {
 107             /* The remote connection is failed because its resource is on a
 108              * failed node and can't be recovered elsewhere, so we must fence.
 109              */
 110             return remote_state_failed;
 111         }
 112 
 113         if (!pcmk_is_set(remote_rsc->flags, pe_rsc_failed)) {
 114             /* Connection resource is cleanly stopped */
 115             return remote_state_stopped;
 116         }
 117 
 118         /* Connection resource is failed */
 119 
 120         if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
 121             && remote_rsc->remote_reconnect_ms
 122             && node->details->remote_was_fenced
 123             && !pe__shutdown_requested(node)) {
 124 
 125             /* We won't know whether the connection is recoverable until the
 126              * reconnect interval expires and we reattempt connection.
 127              */
 128             return remote_state_unknown;
 129         }
 130 
 131         /* The remote connection is in a failed state. If there are any
 132          * resources known to be active on it (stop) or in an unknown state
 133          * (probe), we must assume the worst and fence it.
 134          */
 135         return remote_state_failed;
 136 
 137     } else if (cluster_node == NULL) {
 138         /* Connection is recoverable but not currently running anywhere, so see
 139          * if we can recover it first
 140          */
 141         return remote_state_unknown;
 142 
 143     } else if (cluster_node->details->unclean
 144                || !(cluster_node->details->online)) {
 145         // Connection is running on a dead node, see if we can recover it first
 146         return remote_state_resting;
 147 
 148     } else if (pcmk__list_of_multiple(remote_rsc->running_on)
 149                && (remote_rsc->partial_migration_source != NULL)
 150                && (remote_rsc->partial_migration_target != NULL)) {
 151         /* We're in the middle of migrating a connection resource, so wait until
 152          * after the migration completes before performing any actions.
 153          */
 154         return remote_state_resting;
 155 
 156     }
 157     return remote_state_alive;
 158 }
 159 
 160 static int
 161 is_recurring_action(pe_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 162 {
 163     guint interval_ms;
 164 
 165     if (pcmk__guint_from_hash(action->meta,
 166                               XML_LRM_ATTR_INTERVAL_MS, 0,
 167                               &interval_ms) != pcmk_rc_ok) {
 168         return 0;
 169     }
 170     return (interval_ms > 0);
 171 }
 172 
 173 /*!
 174  * \internal
 175  * \brief Order actions on remote node relative to actions for the connection
 176  */
 177 static void
 178 apply_remote_ordering(pe_action_t *action, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 179 {
 180     pe_resource_t *remote_rsc = NULL;
 181     enum action_tasks task = text2task(action->task);
 182     enum remote_connection_state state = get_remote_node_state(action->node);
 183 
 184     enum pe_ordering order_opts = pe_order_none;
 185 
 186     if (action->rsc == NULL) {
 187         return;
 188     }
 189 
 190     CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
 191 
 192     remote_rsc = action->node->details->remote_rsc;
 193     CRM_ASSERT(remote_rsc != NULL);
 194 
 195     crm_trace("Order %s action %s relative to %s%s (state: %s)",
 196               action->task, action->uuid,
 197               pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
 198               remote_rsc->id, state2text(state));
 199 
 200     if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE,
 201                              CRMD_ACTION_MIGRATED, NULL)) {
 202         /* Migration ops map to "no_action", but we need to apply the same
 203          * ordering as for stop or demote (see get_router_node()).
 204          */
 205         task = stop_rsc;
 206     }
 207 
 208     switch (task) {
 209         case start_rsc:
 210         case action_promote:
 211             order_opts = pe_order_none;
 212 
 213             if (state == remote_state_failed) {
 214                 /* Force recovery, by making this action required */
 215                 pe__set_order_flags(order_opts, pe_order_implies_then);
 216             }
 217 
 218             /* Ensure connection is up before running this action */
 219             order_start_then_action(remote_rsc, action, order_opts, data_set);
 220             break;
 221 
 222         case stop_rsc:
 223             if (state == remote_state_alive) {
 224                 order_action_then_stop(action, remote_rsc,
 225                                        pe_order_implies_first, data_set);
 226 
 227             } else if (state == remote_state_failed) {
 228                 /* The resource is active on the node, but since we don't have a
 229                  * valid connection, the only way to stop the resource is by
 230                  * fencing the node. There is no need to order the stop relative
 231                  * to the remote connection, since the stop will become implied
 232                  * by the fencing.
 233                  */
 234                 pe_fence_node(data_set, action->node,
 235                               "resources are active but connection is unrecoverable",
 236                               FALSE);
 237 
 238             } else if (remote_rsc->next_role == RSC_ROLE_STOPPED) {
 239                 /* State must be remote_state_unknown or remote_state_stopped.
 240                  * Since the connection is not coming back up in this
 241                  * transition, stop this resource first.
 242                  */
 243                 order_action_then_stop(action, remote_rsc,
 244                                        pe_order_implies_first, data_set);
 245 
 246             } else {
 247                 /* The connection is going to be started somewhere else, so
 248                  * stop this resource after that completes.
 249                  */
 250                 order_start_then_action(remote_rsc, action, pe_order_none,
 251                                         data_set);
 252             }
 253             break;
 254 
 255         case action_demote:
 256             /* Only order this demote relative to the connection start if the
 257              * connection isn't being torn down. Otherwise, the demote would be
 258              * blocked because the connection start would not be allowed.
 259              */
 260             if ((state == remote_state_resting)
 261                 || (state == remote_state_unknown)) {
 262 
 263                 order_start_then_action(remote_rsc, action, pe_order_none,
 264                                         data_set);
 265             } /* Otherwise we can rely on the stop ordering */
 266             break;
 267 
 268         default:
 269             /* Wait for the connection resource to be up */
 270             if (is_recurring_action(action)) {
 271                 /* In case we ever get the recovery logic wrong, force
 272                  * recurring monitors to be restarted, even if just
 273                  * the connection was re-established
 274                  */
 275                 order_start_then_action(remote_rsc, action,
 276                                         pe_order_implies_then, data_set);
 277 
 278             } else {
 279                 pe_node_t *cluster_node = pe__current_node(remote_rsc);
 280 
 281                 if ((task == monitor_rsc) && (state == remote_state_failed)) {
 282                     /* We would only be here if we do not know the state of the
 283                      * resource on the remote node. Since we have no way to find
 284                      * out, it is necessary to fence the node.
 285                      */
 286                     pe_fence_node(data_set, action->node,
 287                                   "resources are in unknown state "
 288                                   "and connection is unrecoverable", FALSE);
 289                 }
 290 
 291                 if ((cluster_node != NULL) && (state == remote_state_stopped)) {
 292                     /* The connection is currently up, but is going down
 293                      * permanently. Make sure we check services are actually
 294                      * stopped _before_ we let the connection get closed.
 295                      */
 296                     order_action_then_stop(action, remote_rsc,
 297                                            pe_order_runnable_left, data_set);
 298 
 299                 } else {
 300                     order_start_then_action(remote_rsc, action, pe_order_none,
 301                                             data_set);
 302                 }
 303             }
 304             break;
 305     }
 306 }
 307 
 308 static void
 309 apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 310 {
 311     /* VMs are also classified as containers for these purposes... in
 312      * that they both involve a 'thing' running on a real or remote
 313      * cluster node.
 314      *
 315      * This allows us to be smarter about the type and extent of
 316      * recovery actions required in various scenarios
 317      */
 318     pe_resource_t *remote_rsc = NULL;
 319     pe_resource_t *container = NULL;
 320     enum action_tasks task = text2task(action->task);
 321 
 322     CRM_ASSERT(action->rsc != NULL);
 323     CRM_ASSERT(action->node != NULL);
 324     CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
 325 
 326     remote_rsc = action->node->details->remote_rsc;
 327     CRM_ASSERT(remote_rsc != NULL);
 328 
 329     container = remote_rsc->container;
 330     CRM_ASSERT(container != NULL);
 331 
 332     if (pcmk_is_set(container->flags, pe_rsc_failed)) {
 333         pe_fence_node(data_set, action->node, "container failed", FALSE);
 334     }
 335 
 336     crm_trace("Order %s action %s relative to %s%s for %s%s",
 337               action->task, action->uuid,
 338               pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
 339               remote_rsc->id,
 340               pcmk_is_set(container->flags, pe_rsc_failed)? "failed " : "",
 341               container->id);
 342 
 343     if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE,
 344                              CRMD_ACTION_MIGRATED, NULL)) {
 345         /* Migration ops map to "no_action", but we need to apply the same
 346          * ordering as for stop or demote (see get_router_node()).
 347          */
 348         task = stop_rsc;
 349     }
 350 
 351     switch (task) {
 352         case start_rsc:
 353         case action_promote:
 354             // Force resource recovery if the container is recovered
 355             order_start_then_action(container, action, pe_order_implies_then,
 356                                     data_set);
 357 
 358             // Wait for the connection resource to be up, too
 359             order_start_then_action(remote_rsc, action, pe_order_none,
 360                                     data_set);
 361             break;
 362 
 363         case stop_rsc:
 364         case action_demote:
 365             if (pcmk_is_set(container->flags, pe_rsc_failed)) {
 366                 /* When the container representing a guest node fails, any stop
 367                  * or demote actions for resources running on the guest node
 368                  * are implied by the container stopping. This is similar to
 369                  * how fencing operations work for cluster nodes and remote
 370                  * nodes.
 371                  */
 372             } else {
 373                 /* Ensure the operation happens before the connection is brought
 374                  * down.
 375                  *
 376                  * If we really wanted to, we could order these after the
 377                  * connection start, IFF the container's current role was
 378                  * stopped (otherwise we re-introduce an ordering loop when the
 379                  * connection is restarting).
 380                  */
 381                 order_action_then_stop(action, remote_rsc, pe_order_none,
 382                                        data_set);
 383             }
 384             break;
 385 
 386         default:
 387             /* Wait for the connection resource to be up */
 388             if (is_recurring_action(action)) {
 389                 /* In case we ever get the recovery logic wrong, force
 390                  * recurring monitors to be restarted, even if just
 391                  * the connection was re-established
 392                  */
 393                 if(task != no_action) {
 394                     order_start_then_action(remote_rsc, action,
 395                                             pe_order_implies_then, data_set);
 396                 }
 397             } else {
 398                 order_start_then_action(remote_rsc, action, pe_order_none,
 399                                         data_set);
 400             }
 401             break;
 402     }
 403 }
 404 
 405 /*!
 406  * \internal
 407  * \brief Order all relevant actions relative to remote connection actions
 408  *
 409  * \param[in] data_set  Cluster working set
 410  */
 411 void
 412 pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 413 {
 414     if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
 415         return;
 416     }
 417 
 418     crm_trace("Creating remote connection orderings");
 419 
 420     for (GList *gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
 421         pe_action_t *action = (pe_action_t *) gIter->data;
 422         pe_resource_t *remote = NULL;
 423 
 424         // We are only interested in resource actions
 425         if (action->rsc == NULL) {
 426             continue;
 427         }
 428 
 429         /* Special case: If we are clearing the failcount of an actual
 430          * remote connection resource, then make sure this happens before
 431          * any start of the resource in this transition.
 432          */
 433         if (action->rsc->is_remote_node &&
 434             pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
 435 
 436             pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
 437                                pcmk__op_key(action->rsc->id, RSC_START, 0),
 438                                NULL, pe_order_optional, data_set);
 439 
 440             continue;
 441         }
 442 
 443         // We are only interested in actions allocated to a node
 444         if (action->node == NULL) {
 445             continue;
 446         }
 447 
 448         if (!pe__is_guest_or_remote_node(action->node)) {
 449             continue;
 450         }
 451 
 452         /* We are only interested in real actions.
 453          *
 454          * @TODO This is probably wrong; pseudo-actions might be converted to
 455          * real actions and vice versa later in update_actions() at the end of
 456          * pcmk__apply_orderings().
 457          */
 458         if (pcmk_is_set(action->flags, pe_action_pseudo)) {
 459             continue;
 460         }
 461 
 462         remote = action->node->details->remote_rsc;
 463         if (remote == NULL) {
 464             // Orphaned
 465             continue;
 466         }
 467 
 468         /* Another special case: if a resource is moving to a Pacemaker Remote
 469          * node, order the stop on the original node after any start of the
 470          * remote connection. This ensures that if the connection fails to
 471          * start, we leave the resource running on the original node.
 472          */
 473         if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
 474             for (GList *item = action->rsc->actions; item != NULL;
 475                  item = item->next) {
 476                 pe_action_t *rsc_action = item->data;
 477 
 478                 if ((rsc_action->node->details != action->node->details)
 479                     && pcmk__str_eq(rsc_action->task, RSC_STOP, pcmk__str_casei)) {
 480                     pcmk__new_ordering(remote, start_key(remote), NULL,
 481                                        action->rsc, NULL, rsc_action,
 482                                        pe_order_optional, data_set);
 483                 }
 484             }
 485         }
 486 
 487         /* The action occurs across a remote connection, so create
 488          * ordering constraints that guarantee the action occurs while the node
 489          * is active (after start, before stop ... things like that).
 490          *
 491          * This is somewhat brittle in that we need to make sure the results of
 492          * this ordering are compatible with the result of get_router_node().
 493          * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
 494          * of this logic rather than create_graph_action().
 495          */
 496         if (remote->container) {
 497             crm_trace("Container ordering for %s", action->uuid);
 498             apply_container_ordering(action, data_set);
 499 
 500         } else {
 501             crm_trace("Remote ordering for %s", action->uuid);
 502             apply_remote_ordering(action, data_set);
 503         }
 504     }
 505 }
 506 
 507 /*!
 508  * \internal
 509  * \brief Check whether a node is a failed remote node
 510  *
 511  * \param[in] node  Node to check
 512  *
 513  * \return true if \p node is a failed remote node, false otherwise
 514  */
 515 bool
 516 pcmk__is_failed_remote_node(pe_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
 517 {
 518     return pe__is_remote_node(node) && (node->details->remote_rsc != NULL)
 519            && (get_remote_node_state(node) == remote_state_failed);
 520 }
 521 
 522 /*!
 523  * \internal
 524  * \brief Check whether a given resource corresponds to a given node as guest
 525  *
 526  * \param[in] rsc   Resource to check
 527  * \param[in] node  Node to check
 528  *
 529  * \return true if \p node is a guest node and \p rsc is its containing
 530  *         resource, otherwise false
 531  */
 532 bool
 533 pcmk__rsc_corresponds_to_guest(pe_resource_t *rsc, pe_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
 534 {
 535     return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
 536             && (node->details->remote_rsc != NULL)
 537             && (node->details->remote_rsc->container == rsc);
 538 }
 539 
 540 /*!
 541  * \internal
 542  * \brief Get proper connection host that a remote action must be routed through
 543  *
 544  * A remote connection resource might be starting, stopping, or migrating in the
 545  * same transition that an action needs to be executed on its Pacemaker Remote
 546  * node. Determine the proper node that the remote action should be routed
 547  * through.
 548  *
 549  * \param[in] action  (Potentially remote) action to route
 550  *
 551  * \return Connection host that action should be routed through if remote,
 552  *         otherwise NULL
 553  */
 554 pe_node_t *
 555 pcmk__connection_host_for_action(pe_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 556 {
 557     pe_node_t *began_on = NULL;
 558     pe_node_t *ended_on = NULL;
 559     bool partial_migration = false;
 560     const char *task = action->task;
 561 
 562     if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)
 563         || !pe__is_guest_or_remote_node(action->node)) {
 564         return NULL;
 565     }
 566 
 567     CRM_ASSERT(action->node->details->remote_rsc != NULL);
 568 
 569     began_on = pe__current_node(action->node->details->remote_rsc);
 570     ended_on = action->node->details->remote_rsc->allocated_to;
 571     if (action->node->details->remote_rsc
 572         && (action->node->details->remote_rsc->container == NULL)
 573         && action->node->details->remote_rsc->partial_migration_target) {
 574         partial_migration = true;
 575     }
 576 
 577     if (began_on == NULL) {
 578         crm_trace("Routing %s for %s through remote connection's "
 579                   "next node %s (starting)%s",
 580                   action->task, (action->rsc? action->rsc->id : "no resource"),
 581                   (ended_on? ended_on->details->uname : "none"),
 582                   partial_migration? " (partial migration)" : "");
 583         return ended_on;
 584     }
 585 
 586     if (ended_on == NULL) {
 587         crm_trace("Routing %s for %s through remote connection's "
 588                   "current node %s (stopping)%s",
 589                   action->task, (action->rsc? action->rsc->id : "no resource"),
 590                   (began_on? began_on->details->uname : "none"),
 591                   partial_migration? " (partial migration)" : "");
 592         return began_on;
 593     }
 594 
 595     if (began_on->details == ended_on->details) {
 596         crm_trace("Routing %s for %s through remote connection's "
 597                   "current node %s (not moving)%s",
 598                   action->task, (action->rsc? action->rsc->id : "no resource"),
 599                   (began_on? began_on->details->uname : "none"),
 600                   partial_migration? " (partial migration)" : "");
 601         return began_on;
 602     }
 603 
 604     /* If we get here, the remote connection is moving during this transition.
 605      * This means some actions for resources behind the connection will get
 606      * routed through the cluster node the connection resource is currently on,
 607      * and others are routed through the cluster node the connection will end up
 608      * on.
 609      */
 610 
 611     if (pcmk__str_eq(task, "notify", pcmk__str_casei)) {
 612         task = g_hash_table_lookup(action->meta, "notify_operation");
 613     }
 614 
 615     /*
 616      * Stop, demote, and migration actions must occur before the connection can
 617      * move (these actions are required before the remote resource can stop). In
 618      * this case, we know these actions have to be routed through the initial
 619      * cluster node the connection resource lived on before the move takes
 620      * place.
 621      *
 622      * The exception is a partial migration of a (non-guest) remote connection
 623      * resource; in that case, all actions (even these) will be ordered after
 624      * the connection's pseudo-start on the migration target, so the target is
 625      * the router node.
 626      */
 627     if (pcmk__strcase_any_of(task, "cancel", "stop", "demote", "migrate_from",
 628                              "migrate_to", NULL) && !partial_migration) {
 629         crm_trace("Routing %s for %s through remote connection's "
 630                   "current node %s (moving)%s",
 631                   action->task, (action->rsc? action->rsc->id : "no resource"),
 632                   (began_on? began_on->details->uname : "none"),
 633                   partial_migration? " (partial migration)" : "");
 634         return began_on;
 635     }
 636 
 637     /* Everything else (start, promote, monitor, probe, refresh,
 638      * clear failcount, delete, ...) must occur after the connection starts on
 639      * the node it is moving to.
 640      */
 641     crm_trace("Routing %s for %s through remote connection's "
 642               "next node %s (moving)%s",
 643               action->task, (action->rsc? action->rsc->id : "no resource"),
 644               (ended_on? ended_on->details->uname : "none"),
 645               partial_migration? " (partial migration)" : "");
 646     return ended_on;
 647 }
 648 
 649 /*!
 650  * \internal
 651  * \brief Replace remote connection's addr="#uname" with actual address
 652  *
 653  * REMOTE_CONTAINER_HACK: If a given resource is a remote connection resource
 654  * with its "addr" parameter set to "#uname", pull the actual value from the
 655  * parameters evaluated without a node (which was put there earlier in
 656  * pcmk__create_graph() when the bundle's expand() method was called).
 657  *
 658  * \param[in] rsc       Resource to check
 659  * \param[in] params    Resource parameters evaluated per node
 660  * \param[in] data_set  Cluster working set
 661  */
 662 void
 663 pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params,
     /* [previous][next][first][last][top][bottom][index][help] */
 664                              pe_working_set_t *data_set)
 665 {
 666     const char *remote_addr = g_hash_table_lookup(params,
 667                                                   XML_RSC_ATTR_REMOTE_RA_ADDR);
 668 
 669     if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
 670         GHashTable *base = pe_rsc_params(rsc, NULL, data_set);
 671 
 672         remote_addr = g_hash_table_lookup(base, XML_RSC_ATTR_REMOTE_RA_ADDR);
 673         if (remote_addr != NULL) {
 674             g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
 675                                 strdup(remote_addr));
 676         }
 677     }
 678 }
 679 
 680 /*!
 681  * \brief Add special bundle meta-attributes to XML
 682  *
 683  * If a given action will be executed on a guest node (including a bundle),
 684  * add the special bundle meta-attribute "container-attribute-target" and
 685  * environment variable "physical_host" as XML attributes (using meta-attribute
 686  * naming).
 687  *
 688  * \param[in] args_xml   XML to add attributes to
 689  * \param[in] action     Action to check
 690  */
 691 void
 692 pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, pe_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
 693 {
 694     pe_node_t *host = NULL;
 695     enum action_tasks task;
 696 
 697     if (!pe__is_guest_node(action->node)) {
 698         return;
 699     }
 700 
 701     task = text2task(action->task);
 702     if ((task == action_notify) || (task == action_notified)) {
 703         task = text2task(g_hash_table_lookup(action->meta, "notify_operation"));
 704     }
 705 
 706     switch (task) {
 707         case stop_rsc:
 708         case stopped_rsc:
 709         case action_demote:
 710         case action_demoted:
 711             // "Down" actions take place on guest's current host
 712             host = pe__current_node(action->node->details->remote_rsc->container);
 713             break;
 714 
 715         case start_rsc:
 716         case started_rsc:
 717         case monitor_rsc:
 718         case action_promote:
 719         case action_promoted:
 720             // "Up" actions take place on guest's next host
 721             host = action->node->details->remote_rsc->container->allocated_to;
 722             break;
 723 
 724         default:
 725             break;
 726     }
 727 
 728     if (host != NULL) {
 729         hash2metafield((gpointer) XML_RSC_ATTR_TARGET,
 730                        (gpointer) g_hash_table_lookup(action->rsc->meta,
 731                                                       XML_RSC_ATTR_TARGET),
 732                        (gpointer) args_xml);
 733         hash2metafield((gpointer) PCMK__ENV_PHYSICAL_HOST,
 734                        (gpointer) host->details->uname,
 735                        (gpointer) args_xml);
 736     }
 737 }

/* [previous][next][first][last][top][bottom][index][help] */