root/lib/pengine/pe_actions.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. add_singleton
  2. lookup_singleton
  3. find_existing_action
  4. find_exact_action_config
  5. pcmk__find_action_config
  6. new_action
  7. pcmk__unpack_action_rsc_params
  8. update_action_optional
  9. effective_quorum_policy
  10. update_resource_action_runnable
  11. valid_stop_on_fail
  12. validate_on_fail
  13. unpack_timeout
  14. unpack_interval_origin
  15. unpack_start_delay
  16. most_frequent_monitor
  17. pcmk__unpack_action_meta
  18. pcmk__action_requires
  19. pcmk__parse_on_fail
  20. pcmk__role_after_failure
  21. unpack_operation
  22. custom_action
  23. get_pseudo_op
  24. find_unfencing_devices
  25. node_priority_fencing_delay
  26. pe_fence_op
  27. get_complex_task
  28. find_first_action
  29. find_actions
  30. find_actions_exact
  31. pe__resource_actions
  32. pe__action2reason
  33. pe_action_set_reason
  34. pe__clear_resource_history
  35. pe__is_newer_op
  36. sort_op_by_callid
  37. pe__new_rsc_pseudo_action
  38. pe__add_action_expected_result

   1 /*
   2  * Copyright 2004-2025 the Pacemaker project contributors
   3  *
   4  * The version control history for this file may have further details.
   5  *
   6  * This source code is licensed under the GNU Lesser General Public License
   7  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
   8  */
   9 
  10 #include <crm_internal.h>
  11 
  12 #include <glib.h>
  13 #include <stdbool.h>
  14 
  15 #include <crm/crm.h>
  16 #include <crm/common/xml.h>
  17 #include <crm/common/scheduler_internal.h>
  18 #include <crm/pengine/internal.h>
  19 #include <crm/common/xml_internal.h>
  20 #include "pe_status_private.h"
  21 
  22 static void unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
  23                              guint interval_ms);
  24 
  25 static void
  26 add_singleton(pcmk_scheduler_t *scheduler, pcmk_action_t *action)
     /* [previous][next][first][last][top][bottom][index][help] */
  27 {
  28     if (scheduler->priv->singletons == NULL) {
  29         scheduler->priv->singletons = pcmk__strkey_table(NULL, NULL);
  30     }
  31     g_hash_table_insert(scheduler->priv->singletons, action->uuid, action);
  32 }
  33 
  34 static pcmk_action_t *
  35 lookup_singleton(pcmk_scheduler_t *scheduler, const char *action_uuid)
     /* [previous][next][first][last][top][bottom][index][help] */
  36 {
  37     /* @TODO This is the only use of the pcmk_scheduler_t:singletons hash table.
  38      * Compare the performance of this approach to keeping the
  39      * pcmk_scheduler_t:actions list sorted by action key and just searching
  40      * that instead.
  41      */
  42     if (scheduler->priv->singletons == NULL) {
  43         return NULL;
  44     }
  45     return g_hash_table_lookup(scheduler->priv->singletons, action_uuid);
  46 }
  47 
  48 /*!
  49  * \internal
  50  * \brief Find an existing action that matches arguments
  51  *
  52  * \param[in] key        Action key to match
  53  * \param[in] rsc        Resource to match (if any)
  54  * \param[in] node       Node to match (if any)
  55  * \param[in] scheduler  Scheduler data
  56  *
  57  * \return Existing action that matches arguments (or NULL if none)
  58  */
  59 static pcmk_action_t *
  60 find_existing_action(const char *key, const pcmk_resource_t *rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
  61                      const pcmk_node_t *node, const pcmk_scheduler_t *scheduler)
  62 {
  63     /* When rsc is NULL, it would be quicker to check
  64      * scheduler->priv->singletons, but checking all scheduler->priv->actions
  65      * takes the node into account.
  66      */
  67     GList *actions = (rsc == NULL)? scheduler->priv->actions : rsc->priv->actions;
  68     GList *matches = find_actions(actions, key, node);
  69     pcmk_action_t *action = NULL;
  70 
  71     if (matches == NULL) {
  72         return NULL;
  73     }
  74     CRM_LOG_ASSERT(!pcmk__list_of_multiple(matches));
  75 
  76     action = matches->data;
  77     g_list_free(matches);
  78     return action;
  79 }
  80 
  81 /*!
  82  * \internal
  83  * \brief Find the XML configuration corresponding to a specific action key
  84  *
  85  * \param[in] rsc               Resource to find action configuration for
  86  * \param[in] key               "RSC_ACTION_INTERVAL" of action to find
  87  * \param[in] include_disabled  If false, do not return disabled actions
  88  *
  89  * \return XML configuration of desired action if any, otherwise NULL
  90  */
  91 static xmlNode *
  92 find_exact_action_config(const pcmk_resource_t *rsc, const char *action_name,
     /* [previous][next][first][last][top][bottom][index][help] */
  93                          guint interval_ms, bool include_disabled)
  94 {
  95     for (xmlNode *operation = pcmk__xe_first_child(rsc->priv->ops_xml,
  96                                                    PCMK_XE_OP, NULL, NULL);
  97          operation != NULL; operation = pcmk__xe_next(operation, PCMK_XE_OP)) {
  98 
  99         bool enabled = false;
 100         const char *config_name = NULL;
 101         const char *interval_spec = NULL;
 102         guint tmp_ms = 0U;
 103 
 104         // @TODO This does not consider meta-attributes, rules, defaults, etc.
 105         if (!include_disabled
 106             && (pcmk__xe_get_bool_attr(operation, PCMK_META_ENABLED,
 107                                        &enabled) == pcmk_rc_ok) && !enabled) {
 108             continue;
 109         }
 110 
 111         interval_spec = crm_element_value(operation, PCMK_META_INTERVAL);
 112         pcmk_parse_interval_spec(interval_spec, &tmp_ms);
 113         if (tmp_ms != interval_ms) {
 114             continue;
 115         }
 116 
 117         config_name = crm_element_value(operation, PCMK_XA_NAME);
 118         if (pcmk__str_eq(action_name, config_name, pcmk__str_none)) {
 119             return operation;
 120         }
 121     }
 122     return NULL;
 123 }
 124 
 125 /*!
 126  * \internal
 127  * \brief Find the XML configuration of a resource action
 128  *
 129  * \param[in] rsc               Resource to find action configuration for
 130  * \param[in] action_name       Action name to search for
 131  * \param[in] interval_ms       Action interval (in milliseconds) to search for
 132  * \param[in] include_disabled  If false, do not return disabled actions
 133  *
 134  * \return XML configuration of desired action if any, otherwise NULL
 135  */
 136 xmlNode *
 137 pcmk__find_action_config(const pcmk_resource_t *rsc, const char *action_name,
     /* [previous][next][first][last][top][bottom][index][help] */
 138                          guint interval_ms, bool include_disabled)
 139 {
 140     xmlNode *action_config = NULL;
 141 
 142     // Try requested action first
 143     action_config = find_exact_action_config(rsc, action_name, interval_ms,
 144                                              include_disabled);
 145 
 146     // For migrate_to and migrate_from actions, retry with "migrate"
 147     // @TODO This should be either documented or deprecated
 148     if ((action_config == NULL)
 149         && pcmk__str_any_of(action_name, PCMK_ACTION_MIGRATE_TO,
 150                             PCMK_ACTION_MIGRATE_FROM, NULL)) {
 151         action_config = find_exact_action_config(rsc, "migrate", 0,
 152                                                  include_disabled);
 153     }
 154 
 155     return action_config;
 156 }
 157 
 158 /*!
 159  * \internal
 160  * \brief Create a new action object
 161  *
 162  * \param[in]     key        Action key
 163  * \param[in]     task       Action name
 164  * \param[in,out] rsc        Resource that action is for (if any)
 165  * \param[in]     node       Node that action is on (if any)
 166  * \param[in]     optional   Whether action should be considered optional
 167  * \param[in,out] scheduler  Scheduler data
 168  *
 169  * \return Newly allocated action
 170  * \note This function takes ownership of \p key. It is the caller's
 171  *       responsibility to free the return value using pcmk__free_action().
 172  */
 173 static pcmk_action_t *
 174 new_action(char *key, const char *task, pcmk_resource_t *rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
 175            const pcmk_node_t *node, bool optional, pcmk_scheduler_t *scheduler)
 176 {
 177     pcmk_action_t *action = pcmk__assert_alloc(1, sizeof(pcmk_action_t));
 178 
 179     action->rsc = rsc;
 180     action->task = pcmk__str_copy(task);
 181     action->uuid = key;
 182     action->scheduler = scheduler;
 183 
 184     if (node) {
 185         action->node = pe__copy_node(node);
 186     }
 187 
 188     if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_casei)) {
 189         // Resource history deletion for a node can be done on the DC
 190         pcmk__set_action_flags(action, pcmk__action_on_dc);
 191     }
 192 
 193     pcmk__set_action_flags(action, pcmk__action_runnable);
 194     if (optional) {
 195         pcmk__set_action_flags(action, pcmk__action_optional);
 196     } else {
 197         pcmk__clear_action_flags(action, pcmk__action_optional);
 198     }
 199 
 200     if (rsc == NULL) {
 201         action->meta = pcmk__strkey_table(free, free);
 202     } else {
 203         guint interval_ms = 0;
 204 
 205         parse_op_key(key, NULL, NULL, &interval_ms);
 206         action->op_entry = pcmk__find_action_config(rsc, task, interval_ms,
 207                                                     true);
 208 
 209         /* If the given key is for one of the many notification pseudo-actions
 210          * (pre_notify_promote, etc.), the actual action name is "notify"
 211          */
 212         if ((action->op_entry == NULL) && (strstr(key, "_notify_") != NULL)) {
 213             action->op_entry = find_exact_action_config(rsc, PCMK_ACTION_NOTIFY,
 214                                                         0, true);
 215         }
 216 
 217         unpack_operation(action, action->op_entry, interval_ms);
 218     }
 219 
 220     pcmk__rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
 221                     (optional? "optional" : "required"),
 222                     scheduler->priv->next_action_id, key, task,
 223                     ((rsc == NULL)? "no resource" : rsc->id),
 224                     pcmk__node_name(node));
 225     action->id = scheduler->priv->next_action_id++;
 226 
 227     scheduler->priv->actions = g_list_prepend(scheduler->priv->actions, action);
 228     if (rsc == NULL) {
 229         add_singleton(scheduler, action);
 230     } else {
 231         rsc->priv->actions = g_list_prepend(rsc->priv->actions, action);
 232     }
 233     return action;
 234 }
 235 
 236 /*!
 237  * \internal
 238  * \brief Unpack a resource's action-specific instance parameters
 239  *
 240  * \param[in]     action_xml  XML of action's configuration in CIB (if any)
 241  * \param[in,out] node_attrs  Table of node attributes (for rule evaluation)
 242  * \param[in,out] scheduler   Scheduler data (for rule evaluation)
 243  *
 244  * \return Newly allocated hash table of action-specific instance parameters
 245  */
 246 GHashTable *
 247 pcmk__unpack_action_rsc_params(const xmlNode *action_xml,
     /* [previous][next][first][last][top][bottom][index][help] */
 248                                GHashTable *node_attrs,
 249                                pcmk_scheduler_t *scheduler)
 250 {
 251     GHashTable *params = pcmk__strkey_table(free, free);
 252 
 253     const pcmk_rule_input_t rule_input = {
 254         .now = scheduler->priv->now,
 255         .node_attrs = node_attrs,
 256     };
 257 
 258     pe__unpack_dataset_nvpairs(action_xml, PCMK_XE_INSTANCE_ATTRIBUTES,
 259                                &rule_input, params, NULL, scheduler);
 260     return params;
 261 }
 262 
 263 /*!
 264  * \internal
 265  * \brief Update an action's optional flag
 266  *
 267  * \param[in,out] action    Action to update
 268  * \param[in]     optional  Requested optional status
 269  */
 270 static void
 271 update_action_optional(pcmk_action_t *action, gboolean optional)
     /* [previous][next][first][last][top][bottom][index][help] */
 272 {
 273     // Force a non-recurring action to be optional if its resource is unmanaged
 274     if ((action->rsc != NULL) && (action->node != NULL)
 275         && !pcmk_is_set(action->flags, pcmk__action_pseudo)
 276         && !pcmk_is_set(action->rsc->flags, pcmk__rsc_managed)
 277         && (g_hash_table_lookup(action->meta, PCMK_META_INTERVAL) == NULL)) {
 278             pcmk__rsc_debug(action->rsc,
 279                             "%s on %s is optional (%s is unmanaged)",
 280                             action->uuid, pcmk__node_name(action->node),
 281                             action->rsc->id);
 282             pcmk__set_action_flags(action, pcmk__action_optional);
 283             // We shouldn't clear runnable here because ... something
 284 
 285     // Otherwise require the action if requested
 286     } else if (!optional) {
 287         pcmk__clear_action_flags(action, pcmk__action_optional);
 288     }
 289 }
 290 
 291 static enum pe_quorum_policy
 292 effective_quorum_policy(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
     /* [previous][next][first][last][top][bottom][index][help] */
 293 {
 294     enum pe_quorum_policy policy = scheduler->no_quorum_policy;
 295 
 296     if (pcmk_is_set(scheduler->flags, pcmk__sched_quorate)) {
 297         policy = pcmk_no_quorum_ignore;
 298 
 299     } else if (scheduler->no_quorum_policy == pcmk_no_quorum_demote) {
 300         switch (rsc->priv->orig_role) {
 301             case pcmk_role_promoted:
 302             case pcmk_role_unpromoted:
 303                 if (rsc->priv->next_role > pcmk_role_unpromoted) {
 304                     pe__set_next_role(rsc, pcmk_role_unpromoted,
 305                                       PCMK_OPT_NO_QUORUM_POLICY "=demote");
 306                 }
 307                 policy = pcmk_no_quorum_ignore;
 308                 break;
 309             default:
 310                 policy = pcmk_no_quorum_stop;
 311                 break;
 312         }
 313     }
 314     return policy;
 315 }
 316 
 317 /*!
 318  * \internal
 319  * \brief Update a resource action's runnable flag
 320  *
 321  * \param[in,out] action     Action to update
 322  * \param[in,out] scheduler  Scheduler data
 323  *
 324  * \note This may also schedule fencing if a stop is unrunnable.
 325  */
 326 static void
 327 update_resource_action_runnable(pcmk_action_t *action,
     /* [previous][next][first][last][top][bottom][index][help] */
 328                                 pcmk_scheduler_t *scheduler)
 329 {
 330     pcmk_resource_t *rsc = action->rsc;
 331 
 332     if (pcmk_is_set(action->flags, pcmk__action_pseudo)) {
 333         return;
 334     }
 335 
 336     if (action->node == NULL) {
 337         pcmk__rsc_trace(rsc, "%s is unrunnable (unallocated)", action->uuid);
 338         pcmk__clear_action_flags(action, pcmk__action_runnable);
 339 
 340     } else if (!pcmk_is_set(action->flags, pcmk__action_on_dc)
 341                && !(action->node->details->online)
 342                && (!pcmk__is_guest_or_bundle_node(action->node)
 343                    || pcmk_is_set(action->node->priv->flags,
 344                                   pcmk__node_remote_reset))) {
 345         pcmk__clear_action_flags(action, pcmk__action_runnable);
 346         do_crm_log(LOG_WARNING, "%s on %s is unrunnable (node is offline)",
 347                    action->uuid, pcmk__node_name(action->node));
 348         if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)
 349             && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)
 350             && !(action->node->details->unclean)) {
 351             pe_fence_node(scheduler, action->node, "stop is unrunnable", false);
 352         }
 353 
 354     } else if (!pcmk_is_set(action->flags, pcmk__action_on_dc)
 355                && action->node->details->pending) {
 356         pcmk__clear_action_flags(action, pcmk__action_runnable);
 357         do_crm_log(LOG_WARNING,
 358                    "Action %s on %s is unrunnable (node is pending)",
 359                    action->uuid, pcmk__node_name(action->node));
 360 
 361     } else if (action->needs == pcmk__requires_nothing) {
 362         pe_action_set_reason(action, NULL, TRUE);
 363         if (pcmk__is_guest_or_bundle_node(action->node)
 364             && !pe_can_fence(scheduler, action->node)) {
 365             /* An action that requires nothing usually does not require any
 366              * fencing in order to be runnable. However, there is an exception:
 367              * such an action cannot be completed if it is on a guest node whose
 368              * host is unclean and cannot be fenced.
 369              */
 370             pcmk__rsc_debug(rsc,
 371                             "%s on %s is unrunnable "
 372                             "(node's host cannot be fenced)",
 373                             action->uuid, pcmk__node_name(action->node));
 374             pcmk__clear_action_flags(action, pcmk__action_runnable);
 375         } else {
 376             pcmk__rsc_trace(rsc,
 377                             "%s on %s does not require fencing or quorum",
 378                             action->uuid, pcmk__node_name(action->node));
 379             pcmk__set_action_flags(action, pcmk__action_runnable);
 380         }
 381 
 382     } else {
 383         switch (effective_quorum_policy(rsc, scheduler)) {
 384             case pcmk_no_quorum_stop:
 385                 pcmk__rsc_debug(rsc, "%s on %s is unrunnable (no quorum)",
 386                                 action->uuid, pcmk__node_name(action->node));
 387                 pcmk__clear_action_flags(action, pcmk__action_runnable);
 388                 pe_action_set_reason(action, "no quorum", true);
 389                 break;
 390 
 391             case pcmk_no_quorum_freeze:
 392                 if (!rsc->priv->fns->active(rsc, true)
 393                     || (rsc->priv->next_role > rsc->priv->orig_role)) {
 394                     pcmk__rsc_debug(rsc, "%s on %s is unrunnable (no quorum)",
 395                                     action->uuid,
 396                                     pcmk__node_name(action->node));
 397                     pcmk__clear_action_flags(action, pcmk__action_runnable);
 398                     pe_action_set_reason(action, "quorum freeze", true);
 399                 }
 400                 break;
 401 
 402             default:
 403                 //pe_action_set_reason(action, NULL, TRUE);
 404                 pcmk__set_action_flags(action, pcmk__action_runnable);
 405                 break;
 406         }
 407     }
 408 }
 409 
 410 static bool
 411 valid_stop_on_fail(const char *value)
     /* [previous][next][first][last][top][bottom][index][help] */
 412 {
 413     return !pcmk__strcase_any_of(value,
 414                                  PCMK_VALUE_STANDBY, PCMK_VALUE_DEMOTE,
 415                                  PCMK_VALUE_STOP, NULL);
 416 }
 417 
 418 /*!
 419  * \internal
 420  * \brief Validate (and possibly reset) resource action's on_fail meta-attribute
 421  *
 422  * \param[in]     rsc            Resource that action is for
 423  * \param[in]     action_name    Action name
 424  * \param[in]     action_config  Action configuration XML from CIB (if any)
 425  * \param[in,out] meta           Table of action meta-attributes
 426  */
 427 static void
 428 validate_on_fail(const pcmk_resource_t *rsc, const char *action_name,
     /* [previous][next][first][last][top][bottom][index][help] */
 429                  const xmlNode *action_config, GHashTable *meta)
 430 {
 431     const char *name = NULL;
 432     const char *role = NULL;
 433     const char *interval_spec = NULL;
 434     const char *value = g_hash_table_lookup(meta, PCMK_META_ON_FAIL);
 435     guint interval_ms = 0U;
 436 
 437     // Stop actions can only use certain on-fail values
 438     if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)
 439         && !valid_stop_on_fail(value)) {
 440 
 441         pcmk__config_err("Resetting '" PCMK_META_ON_FAIL "' for %s stop "
 442                          "action to default value because '%s' is not "
 443                          "allowed for stop", rsc->id, value);
 444         g_hash_table_remove(meta, PCMK_META_ON_FAIL);
 445         return;
 446     }
 447 
 448     /* Demote actions default on-fail to the on-fail value for the first
 449      * recurring monitor for the promoted role (if any).
 450      */
 451     if (pcmk__str_eq(action_name, PCMK_ACTION_DEMOTE, pcmk__str_none)
 452         && (value == NULL)) {
 453 
 454         /* @TODO This does not consider promote options set in a meta-attribute
 455          * block (which may have rules that need to be evaluated) rather than
 456          * XML properties.
 457          */
 458         for (xmlNode *operation = pcmk__xe_first_child(rsc->priv->ops_xml,
 459                                                        PCMK_XE_OP, NULL, NULL);
 460              operation != NULL;
 461              operation = pcmk__xe_next(operation, PCMK_XE_OP)) {
 462 
 463             bool enabled = false;
 464             const char *promote_on_fail = NULL;
 465 
 466             /* We only care about explicit on-fail (if promote uses default, so
 467              * can demote)
 468              */
 469             promote_on_fail = crm_element_value(operation, PCMK_META_ON_FAIL);
 470             if (promote_on_fail == NULL) {
 471                 continue;
 472             }
 473 
 474             // We only care about recurring monitors for the promoted role
 475             name = crm_element_value(operation, PCMK_XA_NAME);
 476             role = crm_element_value(operation, PCMK_XA_ROLE);
 477             if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
 478                 || !pcmk__strcase_any_of(role, PCMK_ROLE_PROMOTED,
 479                                          PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
 480                 continue;
 481             }
 482             interval_spec = crm_element_value(operation, PCMK_META_INTERVAL);
 483             pcmk_parse_interval_spec(interval_spec, &interval_ms);
 484             if (interval_ms == 0U) {
 485                 continue;
 486             }
 487 
 488             // We only care about enabled monitors
 489             if ((pcmk__xe_get_bool_attr(operation, PCMK_META_ENABLED,
 490                                         &enabled) == pcmk_rc_ok) && !enabled) {
 491                 continue;
 492             }
 493 
 494             /* Demote actions can't default to
 495              * PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE
 496              */
 497             if (pcmk__str_eq(promote_on_fail, PCMK_VALUE_DEMOTE,
 498                              pcmk__str_casei)) {
 499                 continue;
 500             }
 501 
 502             // Use value from first applicable promote action found
 503             pcmk__insert_dup(meta, PCMK_META_ON_FAIL, promote_on_fail);
 504         }
 505         return;
 506     }
 507 
 508     if (pcmk__str_eq(action_name, PCMK_ACTION_LRM_DELETE, pcmk__str_none)
 509         && !pcmk__str_eq(value, PCMK_VALUE_IGNORE, pcmk__str_casei)) {
 510 
 511         pcmk__insert_dup(meta, PCMK_META_ON_FAIL, PCMK_VALUE_IGNORE);
 512         return;
 513     }
 514 
 515     // PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE is allowed only for certain actions
 516     if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) {
 517         name = crm_element_value(action_config, PCMK_XA_NAME);
 518         role = crm_element_value(action_config, PCMK_XA_ROLE);
 519         interval_spec = crm_element_value(action_config, PCMK_META_INTERVAL);
 520         pcmk_parse_interval_spec(interval_spec, &interval_ms);
 521 
 522         if (!pcmk__str_eq(name, PCMK_ACTION_PROMOTE, pcmk__str_none)
 523             && ((interval_ms == 0U)
 524                 || !pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
 525                 || !pcmk__strcase_any_of(role, PCMK_ROLE_PROMOTED,
 526                                          PCMK__ROLE_PROMOTED_LEGACY, NULL))) {
 527 
 528             pcmk__config_err("Resetting '" PCMK_META_ON_FAIL "' for %s %s "
 529                              "action to default value because 'demote' is not "
 530                              "allowed for it", rsc->id, name);
 531             g_hash_table_remove(meta, PCMK_META_ON_FAIL);
 532             return;
 533         }
 534     }
 535 }
 536 
 537 static int
 538 unpack_timeout(const char *value)
     /* [previous][next][first][last][top][bottom][index][help] */
 539 {
 540     long long timeout_ms = crm_get_msec(value);
 541 
 542     if (timeout_ms <= 0) {
 543         timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
 544     }
 545     return (int) QB_MIN(timeout_ms, INT_MAX);
 546 }
 547 
 548 // true if value contains valid, non-NULL interval origin for recurring op
 549 static bool
 550 unpack_interval_origin(const char *value, const xmlNode *xml_obj,
     /* [previous][next][first][last][top][bottom][index][help] */
 551                        guint interval_ms, const crm_time_t *now,
 552                        long long *start_delay)
 553 {
 554     long long result = 0;
 555     guint interval_sec = pcmk__timeout_ms2s(interval_ms);
 556     crm_time_t *origin = NULL;
 557 
 558     // Ignore unspecified values and non-recurring operations
 559     if ((value == NULL) || (interval_ms == 0) || (now == NULL)) {
 560         return false;
 561     }
 562 
 563     // Parse interval origin from text
 564     origin = crm_time_new(value);
 565     if (origin == NULL) {
 566         pcmk__config_err("Ignoring '" PCMK_META_INTERVAL_ORIGIN "' for "
 567                          "operation '%s' because '%s' is not valid",
 568                          pcmk__s(pcmk__xe_id(xml_obj), "(missing ID)"), value);
 569         return false;
 570     }
 571 
 572     // Get seconds since origin (negative if origin is in the future)
 573     result = crm_time_get_seconds(now) - crm_time_get_seconds(origin);
 574     crm_time_free(origin);
 575 
 576     // Calculate seconds from closest interval to now
 577     result = result % interval_sec;
 578 
 579     // Calculate seconds remaining until next interval
 580     result = ((result <= 0)? 0 : interval_sec) - result;
 581     crm_info("Calculated a start delay of %llds for operation '%s'",
 582              result, pcmk__s(pcmk__xe_id(xml_obj), "(unspecified)"));
 583 
 584     if (start_delay != NULL) {
 585         *start_delay = result * 1000; // milliseconds
 586     }
 587     return true;
 588 }
 589 
 590 static int
 591 unpack_start_delay(const char *value, GHashTable *meta)
     /* [previous][next][first][last][top][bottom][index][help] */
 592 {
 593     long long start_delay_ms = 0;
 594 
 595     if (value == NULL) {
 596         return 0;
 597     }
 598 
 599     start_delay_ms = crm_get_msec(value);
 600     start_delay_ms = QB_MIN(start_delay_ms, INT_MAX);
 601     if (start_delay_ms < 0) {
 602         start_delay_ms = 0;
 603     }
 604 
 605     if (meta != NULL) {
 606         g_hash_table_replace(meta, strdup(PCMK_META_START_DELAY),
 607                              pcmk__itoa(start_delay_ms));
 608     }
 609 
 610     return (int) start_delay_ms;
 611 }
 612 
 613 /*!
 614  * \internal
 615  * \brief Find a resource's most frequent recurring monitor
 616  *
 617  * \param[in] rsc  Resource to check
 618  *
 619  * \return Operation XML configured for most frequent recurring monitor for
 620  *         \p rsc (if any)
 621  */
 622 static xmlNode *
 623 most_frequent_monitor(const pcmk_resource_t *rsc)
     /* [previous][next][first][last][top][bottom][index][help] */
 624 {
 625     guint min_interval_ms = G_MAXUINT;
 626     xmlNode *op = NULL;
 627 
 628     for (xmlNode *operation = pcmk__xe_first_child(rsc->priv->ops_xml,
 629                                                    PCMK_XE_OP, NULL, NULL);
 630          operation != NULL; operation = pcmk__xe_next(operation, PCMK_XE_OP)) {
 631 
 632         bool enabled = false;
 633         guint interval_ms = 0U;
 634         const char *interval_spec = crm_element_value(operation,
 635                                                       PCMK_META_INTERVAL);
 636 
 637         // We only care about enabled recurring monitors
 638         if (!pcmk__str_eq(crm_element_value(operation, PCMK_XA_NAME),
 639                           PCMK_ACTION_MONITOR, pcmk__str_none)) {
 640             continue;
 641         }
 642 
 643         pcmk_parse_interval_spec(interval_spec, &interval_ms);
 644         if (interval_ms == 0U) {
 645             continue;
 646         }
 647 
 648         // @TODO This does not consider meta-attributes, rules, defaults, etc.
 649         if ((pcmk__xe_get_bool_attr(operation, PCMK_META_ENABLED,
 650                                     &enabled) == pcmk_rc_ok) && !enabled) {
 651             continue;
 652         }
 653 
 654         if (interval_ms < min_interval_ms) {
 655             min_interval_ms = interval_ms;
 656             op = operation;
 657         }
 658     }
 659     return op;
 660 }
 661 
 662 /*!
 663  * \internal
 664  * \brief Unpack action meta-attributes
 665  *
 666  * \param[in,out] rsc            Resource that action is for
 667  * \param[in]     node           Node that action is on
 668  * \param[in]     action_name    Action name
 669  * \param[in]     interval_ms    Action interval (in milliseconds)
 670  * \param[in]     action_config  Action XML configuration from CIB (if any)
 671  *
 672  * Unpack a resource action's meta-attributes (normalizing the interval,
 673  * timeout, and start delay values as integer milliseconds) from its CIB XML
 674  * configuration (including defaults).
 675  *
 676  * \return Newly allocated hash table with normalized action meta-attributes
 677  */
 678 GHashTable *
 679 pcmk__unpack_action_meta(pcmk_resource_t *rsc, const pcmk_node_t *node,
     /* [previous][next][first][last][top][bottom][index][help] */
 680                          const char *action_name, guint interval_ms,
 681                          const xmlNode *action_config)
 682 {
 683     GHashTable *meta = NULL;
 684     const char *timeout_spec = NULL;
 685     const char *str = NULL;
 686 
 687     const pcmk_rule_input_t rule_input = {
 688         /* Node attributes are not set because node expressions are not allowed
 689          * for meta-attributes
 690          */
 691         .now = rsc->priv->scheduler->priv->now,
 692         .rsc_standard = crm_element_value(rsc->priv->xml, PCMK_XA_CLASS),
 693         .rsc_provider = crm_element_value(rsc->priv->xml, PCMK_XA_PROVIDER),
 694         .rsc_agent = crm_element_value(rsc->priv->xml, PCMK_XA_TYPE),
 695         .op_name = action_name,
 696         .op_interval_ms = interval_ms,
 697     };
 698 
 699     meta = pcmk__strkey_table(free, free);
 700 
 701     if (action_config != NULL) {
 702         // <op> <meta_attributes> take precedence over defaults
 703         pe__unpack_dataset_nvpairs(action_config, PCMK_XE_META_ATTRIBUTES,
 704                                    &rule_input, meta, NULL,
 705                                    rsc->priv->scheduler);
 706 
 707         /* Anything set as an <op> XML property has highest precedence.
 708          * This ensures we use the name and interval from the <op> tag.
 709          * (See below for the only exception, fence device start/probe timeout.)
 710          */
 711         for (xmlAttrPtr attr = action_config->properties;
 712              attr != NULL; attr = attr->next) {
 713             pcmk__insert_dup(meta, (const char *) attr->name,
 714                              pcmk__xml_attr_value(attr));
 715         }
 716     }
 717 
 718     // Derive default timeout for probes from recurring monitor timeouts
 719     if (pcmk_is_probe(action_name, interval_ms)
 720         && (g_hash_table_lookup(meta, PCMK_META_TIMEOUT) == NULL)) {
 721 
 722         xmlNode *min_interval_mon = most_frequent_monitor(rsc);
 723 
 724         if (min_interval_mon != NULL) {
 725             /* @TODO This does not consider timeouts set in
 726              * PCMK_XE_META_ATTRIBUTES blocks (which may also have rules that
 727              * need to be evaluated).
 728              */
 729             timeout_spec = crm_element_value(min_interval_mon,
 730                                              PCMK_META_TIMEOUT);
 731             if (timeout_spec != NULL) {
 732                 pcmk__rsc_trace(rsc,
 733                                 "Setting default timeout for %s probe to "
 734                                 "most frequent monitor's timeout '%s'",
 735                                 rsc->id, timeout_spec);
 736                 pcmk__insert_dup(meta, PCMK_META_TIMEOUT, timeout_spec);
 737             }
 738         }
 739     }
 740 
 741     // Cluster-wide <op_defaults> <meta_attributes>
 742     pe__unpack_dataset_nvpairs(rsc->priv->scheduler->priv->op_defaults,
 743                                PCMK_XE_META_ATTRIBUTES, &rule_input, meta, NULL,
 744                                rsc->priv->scheduler);
 745 
 746     g_hash_table_remove(meta, PCMK_XA_ID);
 747 
 748     // Normalize interval to milliseconds
 749     if (interval_ms > 0) {
 750         g_hash_table_insert(meta, pcmk__str_copy(PCMK_META_INTERVAL),
 751                             crm_strdup_printf("%u", interval_ms));
 752     } else {
 753         g_hash_table_remove(meta, PCMK_META_INTERVAL);
 754     }
 755 
 756     /* Timeout order of precedence (highest to lowest):
 757      *   1. pcmk_monitor_timeout resource parameter (only for starts and probes
 758      *      when rsc has pcmk_ra_cap_fence_params; this gets used for recurring
 759      *      monitors via the executor instead)
 760      *   2. timeout configured in <op> (with <op timeout> taking precedence over
 761      *      <op> <meta_attributes>)
 762      *   3. timeout configured in <op_defaults> <meta_attributes>
 763      *   4. PCMK_DEFAULT_ACTION_TIMEOUT_MS
 764      */
 765 
 766     // Check for pcmk_monitor_timeout
 767     if (pcmk_is_set(pcmk_get_ra_caps(rule_input.rsc_standard),
 768                     pcmk_ra_cap_fence_params)
 769         && (pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)
 770             || pcmk_is_probe(action_name, interval_ms))) {
 771 
 772         GHashTable *params = pe_rsc_params(rsc, node, rsc->priv->scheduler);
 773 
 774         timeout_spec = g_hash_table_lookup(params, "pcmk_monitor_timeout");
 775         if (timeout_spec != NULL) {
 776             pcmk__rsc_trace(rsc,
 777                             "Setting timeout for %s %s to "
 778                             "pcmk_monitor_timeout (%s)",
 779                             rsc->id, action_name, timeout_spec);
 780             pcmk__insert_dup(meta, PCMK_META_TIMEOUT, timeout_spec);
 781         }
 782     }
 783 
 784     // Normalize timeout to positive milliseconds
 785     timeout_spec = g_hash_table_lookup(meta, PCMK_META_TIMEOUT);
 786     g_hash_table_insert(meta, pcmk__str_copy(PCMK_META_TIMEOUT),
 787                         pcmk__itoa(unpack_timeout(timeout_spec)));
 788 
 789     // Ensure on-fail has a valid value
 790     validate_on_fail(rsc, action_name, action_config, meta);
 791 
 792     // Normalize PCMK_META_START_DELAY
 793     str = g_hash_table_lookup(meta, PCMK_META_START_DELAY);
 794     if (str != NULL) {
 795         unpack_start_delay(str, meta);
 796     } else {
 797         long long start_delay = 0;
 798 
 799         str = g_hash_table_lookup(meta, PCMK_META_INTERVAL_ORIGIN);
 800         if (unpack_interval_origin(str, action_config, interval_ms,
 801                                    rsc->priv->scheduler->priv->now,
 802                                    &start_delay)) {
 803             g_hash_table_insert(meta, pcmk__str_copy(PCMK_META_START_DELAY),
 804                                 crm_strdup_printf("%lld", start_delay));
 805         }
 806     }
 807     return meta;
 808 }
 809 
 810 /*!
 811  * \internal
 812  * \brief Determine an action's quorum and fencing dependency
 813  *
 814  * \param[in] rsc          Resource that action is for
 815  * \param[in] action_name  Name of action being unpacked
 816  *
 817  * \return Quorum and fencing dependency appropriate to action
 818  */
 819 enum pcmk__requires
 820 pcmk__action_requires(const pcmk_resource_t *rsc, const char *action_name)
     /* [previous][next][first][last][top][bottom][index][help] */
 821 {
 822     const char *value = NULL;
 823     enum pcmk__requires requires = pcmk__requires_nothing;
 824 
 825     CRM_CHECK((rsc != NULL) && (action_name != NULL), return requires);
 826 
 827     if (!pcmk__strcase_any_of(action_name, PCMK_ACTION_START,
 828                               PCMK_ACTION_PROMOTE, NULL)) {
 829         value = "nothing (not start or promote)";
 830 
 831     } else if (pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing)) {
 832         requires = pcmk__requires_fencing;
 833         value = "fencing";
 834 
 835     } else if (pcmk_is_set(rsc->flags, pcmk__rsc_needs_quorum)) {
 836         requires = pcmk__requires_quorum;
 837         value = "quorum";
 838 
 839     } else {
 840         value = "nothing";
 841     }
 842     pcmk__rsc_trace(rsc, "%s of %s requires %s", action_name, rsc->id, value);
 843     return requires;
 844 }
 845 
 846 /*!
 847  * \internal
 848  * \brief Parse action failure response from a user-provided string
 849  *
 850  * \param[in] rsc          Resource that action is for
 851  * \param[in] action_name  Name of action
 852  * \param[in] interval_ms  Action interval (in milliseconds)
 853  * \param[in] value        User-provided configuration value for on-fail
 854  *
 855  * \return Action failure response parsed from \p text
 856  */
 857 enum pcmk__on_fail
 858 pcmk__parse_on_fail(const pcmk_resource_t *rsc, const char *action_name,
     /* [previous][next][first][last][top][bottom][index][help] */
 859                     guint interval_ms, const char *value)
 860 {
 861     const char *desc = NULL;
 862     bool needs_remote_reset = false;
 863     enum pcmk__on_fail on_fail = pcmk__on_fail_ignore;
 864     const pcmk_scheduler_t *scheduler = NULL;
 865 
 866     // There's no enum value for unknown or invalid, so assert
 867     pcmk__assert((rsc != NULL) && (action_name != NULL));
 868     scheduler = rsc->priv->scheduler;
 869 
 870     if (value == NULL) {
 871         // Use default
 872 
 873     } else if (pcmk__str_eq(value, PCMK_VALUE_BLOCK, pcmk__str_casei)) {
 874         on_fail = pcmk__on_fail_block;
 875         desc = "block";
 876 
 877     } else if (pcmk__str_eq(value, PCMK_VALUE_FENCE, pcmk__str_casei)) {
 878         if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
 879             on_fail = pcmk__on_fail_fence_node;
 880             desc = "node fencing";
 881         } else {
 882             pcmk__config_err("Resetting '" PCMK_META_ON_FAIL "' for "
 883                              "%s of %s to 'stop' because 'fence' is not "
 884                              "valid when fencing is disabled",
 885                              action_name, rsc->id);
 886             /* @TODO This should probably do
 887             g_hash_table_remove(meta, PCMK_META_ON_FAIL);
 888             like the other "Resetting" spots, to avoid repeating the message
 889             */
 890             on_fail = pcmk__on_fail_stop;
 891             desc = "stop resource";
 892         }
 893 
 894     } else if (pcmk__str_eq(value, PCMK_VALUE_STANDBY, pcmk__str_casei)) {
 895         on_fail = pcmk__on_fail_standby_node;
 896         desc = "node standby";
 897 
 898     } else if (pcmk__strcase_any_of(value,
 899                                     PCMK_VALUE_IGNORE, PCMK_VALUE_NOTHING,
 900                                     NULL)) {
 901         desc = "ignore";
 902 
 903     } else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
 904         on_fail = pcmk__on_fail_ban;
 905         desc = "force migration";
 906 
 907     } else if (pcmk__str_eq(value, PCMK_VALUE_STOP, pcmk__str_casei)) {
 908         on_fail = pcmk__on_fail_stop;
 909         desc = "stop resource";
 910 
 911     } else if (pcmk__str_eq(value, PCMK_VALUE_RESTART, pcmk__str_casei)) {
 912         on_fail = pcmk__on_fail_restart;
 913         desc = "restart (and possibly migrate)";
 914 
 915     } else if (pcmk__str_eq(value, PCMK_VALUE_RESTART_CONTAINER,
 916                             pcmk__str_casei)) {
 917         if (rsc->priv->launcher == NULL) {
 918             pcmk__rsc_debug(rsc,
 919                             "Using default " PCMK_META_ON_FAIL " for %s "
 920                             "of %s because it does not have a launcher",
 921                             action_name, rsc->id);
 922         } else {
 923             on_fail = pcmk__on_fail_restart_container;
 924             desc = "restart container (and possibly migrate)";
 925         }
 926 
 927     } else if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) {
 928         on_fail = pcmk__on_fail_demote;
 929         desc = "demote instance";
 930 
 931     } else {
 932         pcmk__config_err("Using default '" PCMK_META_ON_FAIL "' for "
 933                          "%s of %s because '%s' is not valid",
 934                          action_name, rsc->id, value);
 935     }
 936 
 937     /* Remote node connections are handled specially. Failures that result
 938      * in dropping an active connection must result in fencing. The only
 939      * failures that don't are probes and starts. The user can explicitly set
 940      * PCMK_META_ON_FAIL=PCMK_VALUE_FENCE to fence after start failures.
 941      */
 942     if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)
 943         && pcmk__is_remote_node(pcmk_find_node(scheduler, rsc->id))
 944         && !pcmk_is_probe(action_name, interval_ms)
 945         && !pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)) {
 946         needs_remote_reset = true;
 947         if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
 948             desc = NULL; // Force default for unmanaged connections
 949         }
 950     }
 951 
 952     if (desc != NULL) {
 953         // Explicit value used, default not needed
 954 
 955     } else if (rsc->priv->launcher != NULL) {
 956         on_fail = pcmk__on_fail_restart_container;
 957         desc = "restart container (and possibly migrate) (default)";
 958 
 959     } else if (needs_remote_reset) {
 960         if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
 961             if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
 962                 desc = "fence remote node (default)";
 963             } else {
 964                 desc = "recover remote node connection (default)";
 965             }
 966             on_fail = pcmk__on_fail_reset_remote;
 967         } else {
 968             on_fail = pcmk__on_fail_stop;
 969             desc = "stop unmanaged remote node (enforcing default)";
 970         }
 971 
 972     } else if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)) {
 973         if (pcmk_is_set(scheduler->flags, pcmk__sched_fencing_enabled)) {
 974             on_fail = pcmk__on_fail_fence_node;
 975             desc = "resource fence (default)";
 976         } else {
 977             on_fail = pcmk__on_fail_block;
 978             desc = "resource block (default)";
 979         }
 980 
 981     } else {
 982         on_fail = pcmk__on_fail_restart;
 983         desc = "restart (and possibly migrate) (default)";
 984     }
 985 
 986     pcmk__rsc_trace(rsc, "Failure handling for %s-interval %s of %s: %s",
 987                     pcmk__readable_interval(interval_ms), action_name,
 988                     rsc->id, desc);
 989     return on_fail;
 990 }
 991 
 992 /*!
 993  * \internal
 994  * \brief Determine a resource's role after failure of an action
 995  *
 996  * \param[in] rsc          Resource that action is for
 997  * \param[in] action_name  Action name
 998  * \param[in] on_fail      Failure handling for action
 999  * \param[in] meta         Unpacked action meta-attributes
1000  *
1001  * \return Resource role that results from failure of action
1002  */
1003 enum rsc_role_e
1004 pcmk__role_after_failure(const pcmk_resource_t *rsc, const char *action_name,
     /* [previous][next][first][last][top][bottom][index][help] */
1005                          enum pcmk__on_fail on_fail, GHashTable *meta)
1006 {
1007     enum rsc_role_e role = pcmk_role_unknown;
1008 
1009     // Set default for role after failure specially in certain circumstances
1010     switch (on_fail) {
1011         case pcmk__on_fail_stop:
1012             role = pcmk_role_stopped;
1013             break;
1014 
1015         case pcmk__on_fail_reset_remote:
1016             if (rsc->priv->remote_reconnect_ms != 0U) {
1017                 role = pcmk_role_stopped;
1018             }
1019             break;
1020 
1021         default:
1022             break;
1023     }
1024 
1025     if (role == pcmk_role_unknown) {
1026         // Use default
1027         if (pcmk__str_eq(action_name, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
1028             role = pcmk_role_unpromoted;
1029         } else {
1030             role = pcmk_role_started;
1031         }
1032     }
1033     pcmk__rsc_trace(rsc, "Role after %s %s failure is: %s",
1034                     rsc->id, action_name, pcmk_role_text(role));
1035     return role;
1036 }
1037 
1038 /*!
1039  * \internal
1040  * \brief Unpack action configuration
1041  *
1042  * Unpack a resource action's meta-attributes (normalizing the interval,
1043  * timeout, and start delay values as integer milliseconds), requirements, and
1044  * failure policy from its CIB XML configuration (including defaults).
1045  *
1046  * \param[in,out] action       Resource action to unpack into
1047  * \param[in]     xml_obj      Action configuration XML (NULL for defaults only)
1048  * \param[in]     interval_ms  How frequently to perform the operation
1049  */
1050 static void
1051 unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
     /* [previous][next][first][last][top][bottom][index][help] */
1052                  guint interval_ms)
1053 {
1054     const char *value = NULL;
1055 
1056     action->meta = pcmk__unpack_action_meta(action->rsc, action->node,
1057                                             action->task, interval_ms, xml_obj);
1058     action->needs = pcmk__action_requires(action->rsc, action->task);
1059 
1060     value = g_hash_table_lookup(action->meta, PCMK_META_ON_FAIL);
1061     action->on_fail = pcmk__parse_on_fail(action->rsc, action->task,
1062                                           interval_ms, value);
1063 
1064     action->fail_role = pcmk__role_after_failure(action->rsc, action->task,
1065                                                  action->on_fail, action->meta);
1066 }
1067 
1068 /*!
1069  * \brief Create or update an action object
1070  *
1071  * \param[in,out] rsc          Resource that action is for (if any)
1072  * \param[in,out] key          Action key (must be non-NULL)
1073  * \param[in]     task         Action name (must be non-NULL)
1074  * \param[in]     on_node      Node that action is on (if any)
1075  * \param[in]     optional     Whether action should be considered optional
1076  * \param[in,out] scheduler    Scheduler data
1077  *
1078  * \return Action object corresponding to arguments (guaranteed not to be
1079  *         \c NULL)
1080  * \note This function takes ownership of (and might free) \p key, and
1081  *       \p scheduler takes ownership of the returned action (the caller should
1082  *       not free it).
1083  */
1084 pcmk_action_t *
1085 custom_action(pcmk_resource_t *rsc, char *key, const char *task,
     /* [previous][next][first][last][top][bottom][index][help] */
1086               const pcmk_node_t *on_node, gboolean optional,
1087               pcmk_scheduler_t *scheduler)
1088 {
1089     pcmk_action_t *action = NULL;
1090 
1091     pcmk__assert((key != NULL) && (task != NULL) && (scheduler != NULL));
1092 
1093     action = find_existing_action(key, rsc, on_node, scheduler);
1094     if (action == NULL) {
1095         action = new_action(key, task, rsc, on_node, optional, scheduler);
1096     } else {
1097         free(key);
1098     }
1099 
1100     update_action_optional(action, optional);
1101 
1102     if (rsc != NULL) {
1103         /* An action can be initially created with a NULL node, and later have
1104          * the node added via find_existing_action() (above) -> find_actions().
1105          * That is why the extra parameters are unpacked here rather than in
1106          * new_action().
1107          */
1108         if ((action->node != NULL) && (action->op_entry != NULL)
1109             && !pcmk_is_set(action->flags, pcmk__action_attrs_evaluated)) {
1110 
1111             GHashTable *attrs = action->node->priv->attrs;
1112 
1113             if (action->extra != NULL) {
1114                 g_hash_table_destroy(action->extra);
1115             }
1116             action->extra = pcmk__unpack_action_rsc_params(action->op_entry,
1117                                                            attrs, scheduler);
1118             pcmk__set_action_flags(action, pcmk__action_attrs_evaluated);
1119         }
1120 
1121         update_resource_action_runnable(action, scheduler);
1122     }
1123 
1124     if (action->extra == NULL) {
1125         action->extra = pcmk__strkey_table(free, free);
1126     }
1127 
1128     return action;
1129 }
1130 
1131 pcmk_action_t *
1132 get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler)
     /* [previous][next][first][last][top][bottom][index][help] */
1133 {
1134     pcmk_action_t *op = lookup_singleton(scheduler, name);
1135 
1136     if (op == NULL) {
1137         op = custom_action(NULL, strdup(name), name, NULL, TRUE, scheduler);
1138         pcmk__set_action_flags(op, pcmk__action_pseudo|pcmk__action_runnable);
1139     }
1140     return op;
1141 }
1142 
1143 static GList *
1144 find_unfencing_devices(GList *candidates, GList *matches) 
     /* [previous][next][first][last][top][bottom][index][help] */
1145 {
1146     for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) {
1147         pcmk_resource_t *candidate = gIter->data;
1148 
1149         if (candidate->priv->children != NULL) {
1150             matches = find_unfencing_devices(candidate->priv->children,
1151                                              matches);
1152 
1153         } else if (!pcmk_is_set(candidate->flags, pcmk__rsc_fence_device)) {
1154             continue;
1155 
1156         } else if (pcmk_is_set(candidate->flags, pcmk__rsc_needs_unfencing)) {
1157             matches = g_list_prepend(matches, candidate);
1158 
1159         } else if (pcmk__str_eq(g_hash_table_lookup(candidate->priv->meta,
1160                                                     PCMK_STONITH_PROVIDES),
1161                                 PCMK_VALUE_UNFENCING, pcmk__str_casei)) {
1162             matches = g_list_prepend(matches, candidate);
1163         }
1164     }
1165     return matches;
1166 }
1167 
1168 static int
1169 node_priority_fencing_delay(const pcmk_node_t *node,
     /* [previous][next][first][last][top][bottom][index][help] */
1170                             const pcmk_scheduler_t *scheduler)
1171 {
1172     int member_count = 0;
1173     int online_count = 0;
1174     int top_priority = 0;
1175     int lowest_priority = 0;
1176     GList *gIter = NULL;
1177 
1178     // PCMK_OPT_PRIORITY_FENCING_DELAY is disabled
1179     if (scheduler->priv->priority_fencing_ms == 0U) {
1180         return 0;
1181     }
1182 
1183     /* No need to request a delay if the fencing target is not a normal cluster
1184      * member, for example if it's a remote node or a guest node. */
1185     if (node->priv->variant != pcmk__node_variant_cluster) {
1186         return 0;
1187     }
1188 
1189     // No need to request a delay if the fencing target is in our partition
1190     if (node->details->online) {
1191         return 0;
1192     }
1193 
1194     for (gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
1195         pcmk_node_t *n = gIter->data;
1196 
1197         if (n->priv->variant != pcmk__node_variant_cluster) {
1198             continue;
1199         }
1200 
1201         member_count ++;
1202 
1203         if (n->details->online) {
1204             online_count++;
1205         }
1206 
1207         if (member_count == 1
1208             || n->priv->priority > top_priority) {
1209             top_priority = n->priv->priority;
1210         }
1211 
1212         if (member_count == 1
1213             || n->priv->priority < lowest_priority) {
1214             lowest_priority = n->priv->priority;
1215         }
1216     }
1217 
1218     // No need to delay if we have more than half of the cluster members
1219     if (online_count > member_count / 2) {
1220         return 0;
1221     }
1222 
1223     /* All the nodes have equal priority.
1224      * Any configured corresponding `pcmk_delay_base/max` will be applied. */
1225     if (lowest_priority == top_priority) {
1226         return 0;
1227     }
1228 
1229     if (node->priv->priority < top_priority) {
1230         return 0;
1231     }
1232 
1233     return pcmk__timeout_ms2s(scheduler->priv->priority_fencing_ms);
1234 }
1235 
1236 pcmk_action_t *
1237 pe_fence_op(pcmk_node_t *node, const char *op, bool optional,
     /* [previous][next][first][last][top][bottom][index][help] */
1238             const char *reason, bool priority_delay,
1239             pcmk_scheduler_t *scheduler)
1240 {
1241     char *op_key = NULL;
1242     pcmk_action_t *stonith_op = NULL;
1243 
1244     if(op == NULL) {
1245         op = scheduler->priv->fence_action;
1246     }
1247 
1248     op_key = crm_strdup_printf("%s-%s-%s",
1249                                PCMK_ACTION_STONITH, node->priv->name, op);
1250 
1251     stonith_op = lookup_singleton(scheduler, op_key);
1252     if(stonith_op == NULL) {
1253         stonith_op = custom_action(NULL, op_key, PCMK_ACTION_STONITH, node,
1254                                    TRUE, scheduler);
1255 
1256         pcmk__insert_meta(stonith_op, PCMK__META_ON_NODE, node->priv->name);
1257         pcmk__insert_meta(stonith_op, PCMK__META_ON_NODE_UUID,
1258                           node->priv->id);
1259         pcmk__insert_meta(stonith_op, PCMK__META_STONITH_ACTION, op);
1260 
1261         if (pcmk_is_set(scheduler->flags, pcmk__sched_enable_unfencing)) {
1262             /* Extra work to detect device changes
1263              */
1264             GString *digests_all = g_string_sized_new(1024);
1265             GString *digests_secure = g_string_sized_new(1024);
1266 
1267             GList *matches = find_unfencing_devices(scheduler->priv->resources,
1268                                                     NULL);
1269 
1270             for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) {
1271                 pcmk_resource_t *match = gIter->data;
1272                 const char *agent = g_hash_table_lookup(match->priv->meta,
1273                                                         PCMK_XA_TYPE);
1274                 pcmk__op_digest_t *data = NULL;
1275 
1276                 data = pe__compare_fencing_digest(match, agent, node,
1277                                                   scheduler);
1278                 if (data->rc == pcmk__digest_mismatch) {
1279                     optional = FALSE;
1280                     crm_notice("Unfencing node %s because the definition of "
1281                                "%s changed", pcmk__node_name(node), match->id);
1282                     if (!pcmk__is_daemon && (scheduler->priv->out != NULL)) {
1283                         pcmk__output_t *out = scheduler->priv->out;
1284 
1285                         out->info(out,
1286                                   "notice: Unfencing node %s because the "
1287                                   "definition of %s changed",
1288                                   pcmk__node_name(node), match->id);
1289                     }
1290                 }
1291 
1292                 pcmk__g_strcat(digests_all,
1293                                match->id, ":", agent, ":",
1294                                data->digest_all_calc, ",", NULL);
1295                 pcmk__g_strcat(digests_secure,
1296                                match->id, ":", agent, ":",
1297                                data->digest_secure_calc, ",", NULL);
1298             }
1299             pcmk__insert_dup(stonith_op->meta, PCMK__META_DIGESTS_ALL,
1300                              digests_all->str);
1301             g_string_free(digests_all, TRUE);
1302 
1303             pcmk__insert_dup(stonith_op->meta, PCMK__META_DIGESTS_SECURE,
1304                              digests_secure->str);
1305             g_string_free(digests_secure, TRUE);
1306 
1307             g_list_free(matches);
1308         }
1309 
1310     } else {
1311         free(op_key);
1312     }
1313 
1314     if ((scheduler->priv->priority_fencing_ms > 0U)
1315 
1316             /* It's a suitable case where PCMK_OPT_PRIORITY_FENCING_DELAY
1317              * applies. At least add PCMK_OPT_PRIORITY_FENCING_DELAY field as
1318              * an indicator.
1319              */
1320         && (priority_delay
1321 
1322             /* The priority delay needs to be recalculated if this function has
1323              * been called by schedule_fencing_and_shutdowns() after node
1324              * priority has already been calculated by native_add_running().
1325              */
1326             || g_hash_table_lookup(stonith_op->meta,
1327                                    PCMK_OPT_PRIORITY_FENCING_DELAY) != NULL)) {
1328 
1329             /* Add PCMK_OPT_PRIORITY_FENCING_DELAY to the fencing op even if
1330              * it's 0 for the targeting node. So that it takes precedence over
1331              * any possible `pcmk_delay_base/max`.
1332              */
1333             char *delay_s = pcmk__itoa(node_priority_fencing_delay(node,
1334                                                                    scheduler));
1335 
1336             g_hash_table_insert(stonith_op->meta,
1337                                 strdup(PCMK_OPT_PRIORITY_FENCING_DELAY),
1338                                 delay_s);
1339     }
1340 
1341     if(optional == FALSE && pe_can_fence(scheduler, node)) {
1342         pcmk__clear_action_flags(stonith_op, pcmk__action_optional);
1343         pe_action_set_reason(stonith_op, reason, false);
1344 
1345     } else if(reason && stonith_op->reason == NULL) {
1346         stonith_op->reason = strdup(reason);
1347     }
1348 
1349     return stonith_op;
1350 }
1351 
1352 enum pcmk__action_type
1353 get_complex_task(const pcmk_resource_t *rsc, const char *name)
     /* [previous][next][first][last][top][bottom][index][help] */
1354 {
1355     enum pcmk__action_type task = pcmk__parse_action(name);
1356 
1357     if (pcmk__is_primitive(rsc)) {
1358         switch (task) {
1359             case pcmk__action_stopped:
1360             case pcmk__action_started:
1361             case pcmk__action_demoted:
1362             case pcmk__action_promoted:
1363                 crm_trace("Folding %s back into its atomic counterpart for %s",
1364                           name, rsc->id);
1365                 --task;
1366                 break;
1367             default:
1368                 break;
1369         }
1370     }
1371     return task;
1372 }
1373 
1374 /*!
1375  * \internal
1376  * \brief Find first matching action in a list
1377  *
1378  * \param[in] input    List of actions to search
1379  * \param[in] uuid     If not NULL, action must have this UUID
1380  * \param[in] task     If not NULL, action must have this action name
1381  * \param[in] on_node  If not NULL, action must be on this node
1382  *
1383  * \return First action in list that matches criteria, or NULL if none
1384  */
1385 pcmk_action_t *
1386 find_first_action(const GList *input, const char *uuid, const char *task,
     /* [previous][next][first][last][top][bottom][index][help] */
1387                   const pcmk_node_t *on_node)
1388 {
1389     CRM_CHECK(uuid || task, return NULL);
1390 
1391     for (const GList *gIter = input; gIter != NULL; gIter = gIter->next) {
1392         pcmk_action_t *action = (pcmk_action_t *) gIter->data;
1393 
1394         if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
1395             continue;
1396 
1397         } else if (task != NULL && !pcmk__str_eq(task, action->task, pcmk__str_casei)) {
1398             continue;
1399 
1400         } else if (on_node == NULL) {
1401             return action;
1402 
1403         } else if (action->node == NULL) {
1404             continue;
1405 
1406         } else if (pcmk__same_node(on_node, action->node)) {
1407             return action;
1408         }
1409     }
1410 
1411     return NULL;
1412 }
1413 
1414 GList *
1415 find_actions(GList *input, const char *key, const pcmk_node_t *on_node)
     /* [previous][next][first][last][top][bottom][index][help] */
1416 {
1417     GList *gIter = input;
1418     GList *result = NULL;
1419 
1420     CRM_CHECK(key != NULL, return NULL);
1421 
1422     for (; gIter != NULL; gIter = gIter->next) {
1423         pcmk_action_t *action = (pcmk_action_t *) gIter->data;
1424 
1425         if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
1426             continue;
1427 
1428         } else if (on_node == NULL) {
1429             crm_trace("Action %s matches (ignoring node)", key);
1430             result = g_list_prepend(result, action);
1431 
1432         } else if (action->node == NULL) {
1433             crm_trace("Action %s matches (unallocated, assigning to %s)",
1434                       key, pcmk__node_name(on_node));
1435 
1436             action->node = pe__copy_node(on_node);
1437             result = g_list_prepend(result, action);
1438 
1439         } else if (pcmk__same_node(on_node, action->node)) {
1440             crm_trace("Action %s on %s matches", key, pcmk__node_name(on_node));
1441             result = g_list_prepend(result, action);
1442         }
1443     }
1444 
1445     return result;
1446 }
1447 
1448 GList *
1449 find_actions_exact(GList *input, const char *key, const pcmk_node_t *on_node)
     /* [previous][next][first][last][top][bottom][index][help] */
1450 {
1451     GList *result = NULL;
1452 
1453     CRM_CHECK(key != NULL, return NULL);
1454 
1455     if (on_node == NULL) {
1456         return NULL;
1457     }
1458 
1459     for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
1460         pcmk_action_t *action = (pcmk_action_t *) gIter->data;
1461 
1462         if ((action->node != NULL)
1463             && pcmk__str_eq(key, action->uuid, pcmk__str_casei)
1464             && pcmk__same_node(on_node, action->node)) {
1465 
1466             crm_trace("Action %s on %s matches", key, pcmk__node_name(on_node));
1467             result = g_list_prepend(result, action);
1468         }
1469     }
1470 
1471     return result;
1472 }
1473 
1474 /*!
1475  * \brief Find all actions of given type for a resource
1476  *
1477  * \param[in] rsc           Resource to search
1478  * \param[in] node          Find only actions scheduled on this node
1479  * \param[in] task          Action name to search for
1480  * \param[in] require_node  If TRUE, NULL node or action node will not match
1481  *
1482  * \return List of actions found (or NULL if none)
1483  * \note If node is not NULL and require_node is FALSE, matching actions
1484  *       without a node will be assigned to node.
1485  */
1486 GList *
1487 pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node,
     /* [previous][next][first][last][top][bottom][index][help] */
1488                      const char *task, bool require_node)
1489 {
1490     GList *result = NULL;
1491     char *key = pcmk__op_key(rsc->id, task, 0);
1492 
1493     if (require_node) {
1494         result = find_actions_exact(rsc->priv->actions, key, node);
1495     } else {
1496         result = find_actions(rsc->priv->actions, key, node);
1497     }
1498     free(key);
1499     return result;
1500 }
1501 
1502 /*!
1503  * \internal
1504  * \brief Create an action reason string based on the action itself
1505  *
1506  * \param[in] action  Action to create reason string for
1507  * \param[in] flag    Action flag that was cleared
1508  *
1509  * \return Newly allocated string suitable for use as action reason
1510  * \note It is the caller's responsibility to free() the result.
1511  */
1512 char *
1513 pe__action2reason(const pcmk_action_t *action, enum pcmk__action_flags flag)
     /* [previous][next][first][last][top][bottom][index][help] */
1514 {
1515     const char *change = NULL;
1516 
1517     switch (flag) {
1518         case pcmk__action_runnable:
1519             change = "unrunnable";
1520             break;
1521         case pcmk__action_migratable:
1522             change = "unmigrateable";
1523             break;
1524         case pcmk__action_optional:
1525             change = "required";
1526             break;
1527         default:
1528             // Bug: caller passed unsupported flag
1529             CRM_CHECK(change != NULL, change = "");
1530             break;
1531     }
1532     return crm_strdup_printf("%s%s%s %s", change,
1533                              (action->rsc == NULL)? "" : " ",
1534                              (action->rsc == NULL)? "" : action->rsc->id,
1535                              action->task);
1536 }
1537 
1538 void pe_action_set_reason(pcmk_action_t *action, const char *reason,
     /* [previous][next][first][last][top][bottom][index][help] */
1539                           bool overwrite)
1540 {
1541     if (action->reason != NULL && overwrite) {
1542         pcmk__rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
1543                         action->uuid, action->reason,
1544                         pcmk__s(reason, "(none)"));
1545     } else if (action->reason == NULL) {
1546         pcmk__rsc_trace(action->rsc, "Set %s reason to '%s'",
1547                         action->uuid, pcmk__s(reason, "(none)"));
1548     } else {
1549         // crm_assert(action->reason != NULL && !overwrite);
1550         return;
1551     }
1552 
1553     pcmk__str_update(&action->reason, reason);
1554 }
1555 
1556 /*!
1557  * \internal
1558  * \brief Create an action to clear a resource's history from CIB
1559  *
1560  * \param[in,out] rsc       Resource to clear
1561  * \param[in]     node      Node to clear history on
1562  */
1563 void
1564 pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
1565 {
1566     pcmk__assert((rsc != NULL) && (node != NULL));
1567 
1568     custom_action(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
1569                   PCMK_ACTION_LRM_DELETE, node, FALSE, rsc->priv->scheduler);
1570 }
1571 
1572 #define sort_return(an_int, why) do {                                   \
1573         free(a_uuid);                                           \
1574         free(b_uuid);                                           \
1575         crm_trace("%s (%d) %c %s (%d) : %s",                            \
1576                   a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=',   \
1577                   b_xml_id, b_call_id, why);                            \
1578         return an_int;                                                  \
1579     } while(0)
1580 
1581 int
1582 pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b)
     /* [previous][next][first][last][top][bottom][index][help] */
1583 {
1584     int a_call_id = -1;
1585     int b_call_id = -1;
1586 
1587     char *a_uuid = NULL;
1588     char *b_uuid = NULL;
1589 
1590     const char *a_xml_id = crm_element_value(xml_a, PCMK_XA_ID);
1591     const char *b_xml_id = crm_element_value(xml_b, PCMK_XA_ID);
1592 
1593     const char *a_node = crm_element_value(xml_a, PCMK__META_ON_NODE);
1594     const char *b_node = crm_element_value(xml_b, PCMK__META_ON_NODE);
1595     bool same_node = pcmk__str_eq(a_node, b_node, pcmk__str_casei);
1596 
1597     if (same_node && pcmk__str_eq(a_xml_id, b_xml_id, pcmk__str_none)) {
1598         /* We have duplicate PCMK__XE_LRM_RSC_OP entries in the status
1599          * section which is unlikely to be a good thing
1600          *    - we can handle it easily enough, but we need to get
1601          *    to the bottom of why it's happening.
1602          */
1603         pcmk__config_err("Duplicate " PCMK__XE_LRM_RSC_OP " entries named %s",
1604                          a_xml_id);
1605         sort_return(0, "duplicate");
1606     }
1607 
1608     crm_element_value_int(xml_a, PCMK__XA_CALL_ID, &a_call_id);
1609     crm_element_value_int(xml_b, PCMK__XA_CALL_ID, &b_call_id);
1610 
1611     if (a_call_id == -1 && b_call_id == -1) {
1612         /* both are pending ops so it doesn't matter since
1613          *   stops are never pending
1614          */
1615         sort_return(0, "pending");
1616 
1617     } else if (same_node && a_call_id >= 0 && a_call_id < b_call_id) {
1618         sort_return(-1, "call id");
1619 
1620     } else if (same_node && b_call_id >= 0 && a_call_id > b_call_id) {
1621         sort_return(1, "call id");
1622 
1623     } else if (a_call_id >= 0 && b_call_id >= 0
1624                && (!same_node || a_call_id == b_call_id)) {
1625         /* The op and last_failed_op are the same. Order on
1626          * PCMK_XA_LAST_RC_CHANGE.
1627          */
1628         time_t last_a = -1;
1629         time_t last_b = -1;
1630 
1631         crm_element_value_epoch(xml_a, PCMK_XA_LAST_RC_CHANGE, &last_a);
1632         crm_element_value_epoch(xml_b, PCMK_XA_LAST_RC_CHANGE, &last_b);
1633 
1634         crm_trace("rc-change: %lld vs %lld",
1635                   (long long) last_a, (long long) last_b);
1636         if (last_a >= 0 && last_a < last_b) {
1637             sort_return(-1, "rc-change");
1638 
1639         } else if (last_b >= 0 && last_a > last_b) {
1640             sort_return(1, "rc-change");
1641         }
1642         sort_return(0, "rc-change");
1643 
1644     } else {
1645         /* One of the inputs is a pending operation.
1646          * Attempt to use PCMK__XA_TRANSITION_MAGIC to determine its age relative
1647          * to the other.
1648          */
1649 
1650         int a_id = -1;
1651         int b_id = -1;
1652 
1653         const char *a_magic = crm_element_value(xml_a,
1654                                                 PCMK__XA_TRANSITION_MAGIC);
1655         const char *b_magic = crm_element_value(xml_b,
1656                                                 PCMK__XA_TRANSITION_MAGIC);
1657 
1658         CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
1659         if (!decode_transition_magic(a_magic, &a_uuid, &a_id, NULL, NULL, NULL,
1660                                      NULL)) {
1661             sort_return(0, "bad magic a");
1662         }
1663         if (!decode_transition_magic(b_magic, &b_uuid, &b_id, NULL, NULL, NULL,
1664                                      NULL)) {
1665             sort_return(0, "bad magic b");
1666         }
1667         /* try to determine the relative age of the operation...
1668          * some pending operations (e.g. a start) may have been superseded
1669          *   by a subsequent stop
1670          *
1671          * [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
1672          */
1673         if (!pcmk__str_eq(a_uuid, b_uuid, pcmk__str_casei) || a_id == b_id) {
1674             /*
1675              * some of the logic in here may be redundant...
1676              *
1677              * if the UUID from the TE doesn't match then one better
1678              *   be a pending operation.
1679              * pending operations don't survive between elections and joins
1680              *   because we query the LRM directly
1681              */
1682 
1683             if (b_call_id == -1) {
1684                 sort_return(-1, "transition + call");
1685 
1686             } else if (a_call_id == -1) {
1687                 sort_return(1, "transition + call");
1688             }
1689 
1690         } else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
1691             sort_return(-1, "transition");
1692 
1693         } else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
1694             sort_return(1, "transition");
1695         }
1696     }
1697 
1698     /* we should never end up here */
1699     CRM_CHECK(FALSE, sort_return(0, "default"));
1700 }
1701 
1702 gint
1703 sort_op_by_callid(gconstpointer a, gconstpointer b)
     /* [previous][next][first][last][top][bottom][index][help] */
1704 {
1705     return pe__is_newer_op((const xmlNode *) a, (const xmlNode *) b);
1706 }
1707 
1708 /*!
1709  * \internal
1710  * \brief Create a new pseudo-action for a resource
1711  *
1712  * \param[in,out] rsc       Resource to create action for
1713  * \param[in]     task      Action name
1714  * \param[in]     optional  Whether action should be considered optional
1715  * \param[in]     runnable  Whethe action should be considered runnable
1716  *
1717  * \return New action object corresponding to arguments
1718  */
1719 pcmk_action_t *
1720 pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task, bool optional,
     /* [previous][next][first][last][top][bottom][index][help] */
1721                           bool runnable)
1722 {
1723     pcmk_action_t *action = NULL;
1724 
1725     pcmk__assert((rsc != NULL) && (task != NULL));
1726 
1727     action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL,
1728                            optional, rsc->priv->scheduler);
1729     pcmk__set_action_flags(action, pcmk__action_pseudo);
1730     if (runnable) {
1731         pcmk__set_action_flags(action, pcmk__action_runnable);
1732     }
1733     return action;
1734 }
1735 
1736 /*!
1737  * \internal
1738  * \brief Add the expected result to an action
1739  *
1740  * \param[in,out] action           Action to add expected result to
1741  * \param[in]     expected_result  Expected result to add
1742  *
1743  * \note This is more efficient than calling pcmk__insert_meta().
1744  */
1745 void
1746 pe__add_action_expected_result(pcmk_action_t *action, int expected_result)
     /* [previous][next][first][last][top][bottom][index][help] */
1747 {
1748     pcmk__assert((action != NULL) && (action->meta != NULL));
1749 
1750     g_hash_table_insert(action->meta, pcmk__str_copy(PCMK__META_OP_TARGET_RC),
1751                         pcmk__itoa(expected_result));
1752 }

/* [previous][next][first][last][top][bottom][index][help] */