root/lib/pengine/unpack.c

/* [previous][next][first][last][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. is_dangling_guest_node
  2. pe_fence_node
  3. set_if_xpath
  4. unpack_config
  5. pe_create_node
  6. expand_remote_rsc_meta
  7. handle_startup_fencing
  8. unpack_nodes
  9. setup_container
  10. unpack_remote_nodes
  11. link_rsc2remotenode
  12. destroy_tag
  13. unpack_resources
  14. unpack_tags
  15. unpack_ticket_state
  16. unpack_tickets_state
  17. unpack_handle_remote_attrs
  18. unpack_transient_attributes
  19. unpack_node_state
  20. unpack_node_history
  21. unpack_status
  22. determine_online_status_no_fencing
  23. determine_online_status_fencing
  24. determine_remote_online_status
  25. determine_online_status
  26. pe_base_name_end
  27. clone_strip
  28. clone_zero
  29. create_fake_resource
  30. create_anonymous_orphan
  31. find_anonymous_clone
  32. unpack_find_resource
  33. process_orphan_resource
  34. process_rsc_state
  35. process_recurring
  36. calculate_active_ops
  37. unpack_shutdown_lock
  38. unpack_lrm_resource
  39. handle_orphaned_container_fillers
  40. unpack_node_lrm
  41. set_active
  42. set_node_score
  43. find_lrm_op
  44. pe__call_id
  45. stop_happened_after
  46. unpack_migrate_to_success
  47. newer_op
  48. unpack_migrate_to_failure
  49. unpack_migrate_from_failure
  50. record_failed_op
  51. get_op_key
  52. last_change_str
  53. cmp_on_fail
  54. unpack_rsc_op_failure
  55. determine_op_status
  56. should_clear_for_param_change
  57. order_after_remote_fencing
  58. should_ignore_failure_timeout
  59. check_operation_expiry
  60. pe__target_rc_from_xml
  61. get_action_on_fail
  62. update_resource_state
  63. remap_monitor_rc
  64. unpack_rsc_op
  65. add_node_attrs
  66. extract_operations
  67. find_operations

   1 /*
   2  * Copyright 2004-2021 the Pacemaker project contributors
   3  *
   4  * The version control history for this file may have further details.
   5  *
   6  * This source code is licensed under the GNU Lesser General Public License
   7  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
   8  */
   9 
  10 #include <crm_internal.h>
  11 
  12 #include <stdio.h>
  13 #include <string.h>
  14 #include <glib.h>
  15 #include <time.h>
  16 
  17 #include <crm/crm.h>
  18 #include <crm/services.h>
  19 #include <crm/msg_xml.h>
  20 #include <crm/common/xml.h>
  21 #include <crm/common/xml_internal.h>
  22 
  23 #include <crm/common/util.h>
  24 #include <crm/pengine/rules.h>
  25 #include <crm/pengine/internal.h>
  26 #include <crm/common/iso8601_internal.h>
  27 #include <pe_status_private.h>
  28 
  29 CRM_TRACE_INIT_DATA(pe_status);
  30 
  31 /* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than
  32  * use pe__set_working_set_flags()/pe__clear_working_set_flags() so that the
  33  * flag is stringified more readably in log messages.
  34  */
  35 #define set_config_flag(data_set, option, flag) do {                        \
  36         const char *scf_value = pe_pref((data_set)->config_hash, (option)); \
  37         if (scf_value != NULL) {                                            \
  38             if (crm_is_true(scf_value)) {                                   \
  39                 (data_set)->flags = pcmk__set_flags_as(__func__, __LINE__,  \
  40                                     LOG_TRACE, "Working set",               \
  41                                     crm_system_name, (data_set)->flags,     \
  42                                     (flag), #flag);                         \
  43             } else {                                                        \
  44                 (data_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,\
  45                                     LOG_TRACE, "Working set",               \
  46                                     crm_system_name, (data_set)->flags,     \
  47                                     (flag), #flag);                         \
  48             }                                                               \
  49         }                                                                   \
  50     } while(0)
  51 
  52 static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
  53                           xmlNode **last_failure,
  54                           enum action_fail_response *failed,
  55                           pe_working_set_t *data_set);
  56 static void determine_remote_online_status(pe_working_set_t *data_set,
  57                                            pe_node_t *this_node);
  58 static void add_node_attrs(xmlNode *attrs, pe_node_t *node, bool overwrite,
  59                            pe_working_set_t *data_set);
  60 static void determine_online_status(xmlNode *node_state, pe_node_t *this_node,
  61                                     pe_working_set_t *data_set);
  62 
  63 static void unpack_node_lrm(pe_node_t *node, xmlNode *xml,
  64                             pe_working_set_t *data_set);
  65 
  66 
  67 // Bitmask for warnings we only want to print once
  68 uint32_t pe_wo = 0;
  69 
  70 static gboolean
  71 is_dangling_guest_node(pe_node_t *node)
     /* [previous][next][first][last][top][bottom][index][help] */
  72 {
  73     /* we are looking for a remote-node that was supposed to be mapped to a
  74      * container resource, but all traces of that container have disappeared 
  75      * from both the config and the status section. */
  76     if (pe__is_guest_or_remote_node(node) &&
  77         node->details->remote_rsc &&
  78         node->details->remote_rsc->container == NULL &&
  79         pcmk_is_set(node->details->remote_rsc->flags,
  80                     pe_rsc_orphan_container_filler)) {
  81         return TRUE;
  82     }
  83 
  84     return FALSE;
  85 }
  86 
  87 
  88 /*!
  89  * \brief Schedule a fence action for a node
  90  *
  91  * \param[in,out] data_set  Current working set of cluster
  92  * \param[in,out] node      Node to fence
  93  * \param[in]     reason    Text description of why fencing is needed
  94  * \param[in]     priority_delay  Whether to consider `priority-fencing-delay`
  95  */
  96 void
  97 pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
     /* [previous][next][first][last][top][bottom][index][help] */
  98               const char *reason, bool priority_delay)
  99 {
 100     CRM_CHECK(node, return);
 101 
 102     /* A guest node is fenced by marking its container as failed */
 103     if (pe__is_guest_node(node)) {
 104         pe_resource_t *rsc = node->details->remote_rsc->container;
 105 
 106         if (!pcmk_is_set(rsc->flags, pe_rsc_failed)) {
 107             if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
 108                 crm_notice("Not fencing guest node %s "
 109                            "(otherwise would because %s): "
 110                            "its guest resource %s is unmanaged",
 111                            node->details->uname, reason, rsc->id);
 112             } else {
 113                 crm_warn("Guest node %s will be fenced "
 114                          "(by recovering its guest resource %s): %s",
 115                          node->details->uname, rsc->id, reason);
 116 
 117                 /* We don't mark the node as unclean because that would prevent the
 118                  * node from running resources. We want to allow it to run resources
 119                  * in this transition if the recovery succeeds.
 120                  */
 121                 node->details->remote_requires_reset = TRUE;
 122                 pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
 123             }
 124         }
 125 
 126     } else if (is_dangling_guest_node(node)) {
 127         crm_info("Cleaning up dangling connection for guest node %s: "
 128                  "fencing was already done because %s, "
 129                  "and guest resource no longer exists",
 130                  node->details->uname, reason);
 131         pe__set_resource_flags(node->details->remote_rsc,
 132                                pe_rsc_failed|pe_rsc_stop);
 133 
 134     } else if (pe__is_remote_node(node)) {
 135         pe_resource_t *rsc = node->details->remote_rsc;
 136 
 137         if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
 138             crm_notice("Not fencing remote node %s "
 139                        "(otherwise would because %s): connection is unmanaged",
 140                        node->details->uname, reason);
 141         } else if(node->details->remote_requires_reset == FALSE) {
 142             node->details->remote_requires_reset = TRUE;
 143             crm_warn("Remote node %s %s: %s",
 144                      node->details->uname,
 145                      pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
 146                      reason);
 147         }
 148         node->details->unclean = TRUE;
 149         // No need to apply `priority-fencing-delay` for remote nodes
 150         pe_fence_op(node, NULL, TRUE, reason, FALSE, data_set);
 151 
 152     } else if (node->details->unclean) {
 153         crm_trace("Cluster node %s %s because %s",
 154                   node->details->uname,
 155                   pe_can_fence(data_set, node)? "would also be fenced" : "also is unclean",
 156                   reason);
 157 
 158     } else {
 159         crm_warn("Cluster node %s %s: %s",
 160                  node->details->uname,
 161                  pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
 162                  reason);
 163         node->details->unclean = TRUE;
 164         pe_fence_op(node, NULL, TRUE, reason, priority_delay, data_set);
 165     }
 166 }
 167 
 168 // @TODO xpaths can't handle templates, rules, or id-refs
 169 
 170 // nvpair with provides or requires set to unfencing
 171 #define XPATH_UNFENCING_NVPAIR XML_CIB_TAG_NVPAIR                \
 172     "[(@" XML_NVPAIR_ATTR_NAME "='" PCMK_STONITH_PROVIDES "'"    \
 173     "or @" XML_NVPAIR_ATTR_NAME "='" XML_RSC_ATTR_REQUIRES "') " \
 174     "and @" XML_NVPAIR_ATTR_VALUE "='unfencing']"
 175 
 176 // unfencing in rsc_defaults or any resource
 177 #define XPATH_ENABLE_UNFENCING \
 178     "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RESOURCES   \
 179     "//" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR                                               \
 180     "|/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_RSCCONFIG  \
 181     "/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR
 182 
 183 static void
 184 set_if_xpath(uint64_t flag, const char *xpath, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 185 {
 186     xmlXPathObjectPtr result = NULL;
 187 
 188     if (!pcmk_is_set(data_set->flags, flag)) {
 189         result = xpath_search(data_set->input, xpath);
 190         if (result && (numXpathResults(result) > 0)) {
 191             pe__set_working_set_flags(data_set, flag);
 192         }
 193         freeXpathObject(result);
 194     }
 195 }
 196 
 197 gboolean
 198 unpack_config(xmlNode * config, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 199 {
 200     const char *value = NULL;
 201     GHashTable *config_hash = pcmk__strkey_table(free, free);
 202 
 203     pe_rule_eval_data_t rule_data = {
 204         .node_hash = NULL,
 205         .role = RSC_ROLE_UNKNOWN,
 206         .now = data_set->now,
 207         .match_data = NULL,
 208         .rsc_data = NULL,
 209         .op_data = NULL
 210     };
 211 
 212     data_set->config_hash = config_hash;
 213 
 214     pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash,
 215                                CIB_OPTIONS_FIRST, FALSE, data_set);
 216 
 217     verify_pe_options(data_set->config_hash);
 218 
 219     set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes);
 220     if (!pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
 221         crm_info("Startup probes: disabled (dangerous)");
 222     }
 223 
 224     value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG);
 225     if (value && crm_is_true(value)) {
 226         crm_info("Watchdog-based self-fencing will be performed via SBD if "
 227                  "fencing is required and stonith-watchdog-timeout is nonzero");
 228         pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
 229     }
 230 
 231     /* Set certain flags via xpath here, so they can be used before the relevant
 232      * configuration sections are unpacked.
 233      */
 234     set_if_xpath(pe_flag_enable_unfencing, XPATH_ENABLE_UNFENCING, data_set);
 235 
 236     value = pe_pref(data_set->config_hash, "stonith-timeout");
 237     data_set->stonith_timeout = (int) crm_parse_interval_spec(value);
 238     crm_debug("STONITH timeout: %d", data_set->stonith_timeout);
 239 
 240     set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled);
 241     crm_debug("STONITH of failed nodes is %s",
 242               pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled");
 243 
 244     data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action");
 245     if (!strcmp(data_set->stonith_action, "poweroff")) {
 246         pe_warn_once(pe_wo_poweroff,
 247                      "Support for stonith-action of 'poweroff' is deprecated "
 248                      "and will be removed in a future release (use 'off' instead)");
 249         data_set->stonith_action = "off";
 250     }
 251     crm_trace("STONITH will %s nodes", data_set->stonith_action);
 252 
 253     set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing);
 254     crm_debug("Concurrent fencing is %s",
 255               pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)? "enabled" : "disabled");
 256 
 257     value = pe_pref(data_set->config_hash,
 258                     XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY);
 259     if (value) {
 260         data_set->priority_fencing_delay = crm_parse_interval_spec(value) / 1000;
 261         crm_trace("Priority fencing delay is %ds", data_set->priority_fencing_delay);
 262     }
 263 
 264     set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything);
 265     crm_debug("Stop all active resources: %s",
 266               pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)));
 267 
 268     set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster);
 269     if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
 270         crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
 271     }
 272 
 273     value = pe_pref(data_set->config_hash, "no-quorum-policy");
 274 
 275     if (pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
 276         data_set->no_quorum_policy = no_quorum_ignore;
 277 
 278     } else if (pcmk__str_eq(value, "freeze", pcmk__str_casei)) {
 279         data_set->no_quorum_policy = no_quorum_freeze;
 280 
 281     } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
 282         data_set->no_quorum_policy = no_quorum_demote;
 283 
 284     } else if (pcmk__str_eq(value, "suicide", pcmk__str_casei)) {
 285         if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
 286             int do_panic = 0;
 287 
 288             crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC,
 289                                   &do_panic);
 290             if (do_panic || pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
 291                 data_set->no_quorum_policy = no_quorum_suicide;
 292             } else {
 293                 crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum");
 294                 data_set->no_quorum_policy = no_quorum_stop;
 295             }
 296         } else {
 297             pcmk__config_err("Resetting no-quorum-policy to 'stop' because "
 298                              "fencing is disabled");
 299             data_set->no_quorum_policy = no_quorum_stop;
 300         }
 301 
 302     } else {
 303         data_set->no_quorum_policy = no_quorum_stop;
 304     }
 305 
 306     switch (data_set->no_quorum_policy) {
 307         case no_quorum_freeze:
 308             crm_debug("On loss of quorum: Freeze resources");
 309             break;
 310         case no_quorum_stop:
 311             crm_debug("On loss of quorum: Stop ALL resources");
 312             break;
 313         case no_quorum_demote:
 314             crm_debug("On loss of quorum: "
 315                       "Demote promotable resources and stop other resources");
 316             break;
 317         case no_quorum_suicide:
 318             crm_notice("On loss of quorum: Fence all remaining nodes");
 319             break;
 320         case no_quorum_ignore:
 321             crm_notice("On loss of quorum: Ignore");
 322             break;
 323     }
 324 
 325     set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans);
 326     crm_trace("Orphan resources are %s",
 327               pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)? "stopped" : "ignored");
 328 
 329     set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans);
 330     crm_trace("Orphan resource actions are %s",
 331               pcmk_is_set(data_set->flags, pe_flag_stop_action_orphans)? "stopped" : "ignored");
 332 
 333     value = pe_pref(data_set->config_hash, "remove-after-stop");
 334     if (value != NULL) {
 335         if (crm_is_true(value)) {
 336             pe__set_working_set_flags(data_set, pe_flag_remove_after_stop);
 337 #ifndef PCMK__COMPAT_2_0
 338             pe_warn_once(pe_wo_remove_after,
 339                          "Support for the remove-after-stop cluster property is"
 340                          " deprecated and will be removed in a future release");
 341 #endif
 342         } else {
 343             pe__clear_working_set_flags(data_set, pe_flag_remove_after_stop);
 344         }
 345     }
 346 
 347     set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode);
 348     crm_trace("Maintenance mode: %s",
 349               pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)));
 350 
 351     set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal);
 352     crm_trace("Start failures are %s",
 353               pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)? "always fatal" : "handled by failcount");
 354 
 355     if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
 356         set_config_flag(data_set, "startup-fencing", pe_flag_startup_fencing);
 357     }
 358     if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
 359         crm_trace("Unseen nodes will be fenced");
 360     } else {
 361         pe_warn_once(pe_wo_blind, "Blind faith: not fencing unseen nodes");
 362     }
 363 
 364     pcmk__score_red = char2score(pe_pref(data_set->config_hash, "node-health-red"));
 365     pcmk__score_green = char2score(pe_pref(data_set->config_hash, "node-health-green"));
 366     pcmk__score_yellow = char2score(pe_pref(data_set->config_hash, "node-health-yellow"));
 367 
 368     crm_debug("Node scores: 'red' = %s, 'yellow' = %s, 'green' = %s",
 369              pe_pref(data_set->config_hash, "node-health-red"),
 370              pe_pref(data_set->config_hash, "node-health-yellow"),
 371              pe_pref(data_set->config_hash, "node-health-green"));
 372 
 373     data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy");
 374     crm_trace("Placement strategy: %s", data_set->placement_strategy);
 375 
 376     set_config_flag(data_set, "shutdown-lock", pe_flag_shutdown_lock);
 377     crm_trace("Resources will%s be locked to cleanly shut down nodes",
 378               (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)? "" : " not"));
 379     if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
 380         value = pe_pref(data_set->config_hash,
 381                         XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT);
 382         data_set->shutdown_lock = crm_parse_interval_spec(value) / 1000;
 383         crm_trace("Shutdown locks expire after %us", data_set->shutdown_lock);
 384     }
 385 
 386     return TRUE;
 387 }
 388 
 389 pe_node_t *
 390 pe_create_node(const char *id, const char *uname, const char *type,
     /* [previous][next][first][last][top][bottom][index][help] */
 391                const char *score, pe_working_set_t * data_set)
 392 {
 393     pe_node_t *new_node = NULL;
 394 
 395     if (pe_find_node(data_set->nodes, uname) != NULL) {
 396         pcmk__config_warn("More than one node entry has name '%s'", uname);
 397     }
 398 
 399     new_node = calloc(1, sizeof(pe_node_t));
 400     if (new_node == NULL) {
 401         return NULL;
 402     }
 403 
 404     new_node->weight = char2score(score);
 405     new_node->fixed = FALSE;
 406     new_node->details = calloc(1, sizeof(struct pe_node_shared_s));
 407 
 408     if (new_node->details == NULL) {
 409         free(new_node);
 410         return NULL;
 411     }
 412 
 413     crm_trace("Creating node for entry %s/%s", uname, id);
 414     new_node->details->id = id;
 415     new_node->details->uname = uname;
 416     new_node->details->online = FALSE;
 417     new_node->details->shutdown = FALSE;
 418     new_node->details->rsc_discovery_enabled = TRUE;
 419     new_node->details->running_rsc = NULL;
 420     new_node->details->type = node_ping;
 421 
 422     if (pcmk__str_eq(type, "remote", pcmk__str_casei)) {
 423         new_node->details->type = node_remote;
 424         pe__set_working_set_flags(data_set, pe_flag_have_remote_nodes);
 425     } else if (pcmk__str_eq(type, "member", pcmk__str_null_matches | pcmk__str_casei)) {
 426         new_node->details->type = node_member;
 427     }
 428 
 429     new_node->details->attrs = pcmk__strkey_table(free, free);
 430 
 431     if (pe__is_guest_or_remote_node(new_node)) {
 432         g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND),
 433                             strdup("remote"));
 434     } else {
 435         g_hash_table_insert(new_node->details->attrs, strdup(CRM_ATTR_KIND),
 436                             strdup("cluster"));
 437     }
 438 
 439     new_node->details->utilization = pcmk__strkey_table(free, free);
 440     new_node->details->digest_cache = pcmk__strkey_table(free,
 441                                                           pe__free_digests);
 442 
 443     data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node, sort_node_uname);
 444     return new_node;
 445 }
 446 
 447 static const char *
 448 expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data)
     /* [previous][next][first][last][top][bottom][index][help] */
 449 {
 450     xmlNode *attr_set = NULL;
 451     xmlNode *attr = NULL;
 452 
 453     const char *container_id = ID(xml_obj);
 454     const char *remote_name = NULL;
 455     const char *remote_server = NULL;
 456     const char *remote_port = NULL;
 457     const char *connect_timeout = "60s";
 458     const char *remote_allow_migrate=NULL;
 459     const char *is_managed = NULL;
 460 
 461     for (attr_set = pcmk__xe_first_child(xml_obj); attr_set != NULL;
 462          attr_set = pcmk__xe_next(attr_set)) {
 463 
 464         if (!pcmk__str_eq((const char *)attr_set->name, XML_TAG_META_SETS,
 465                           pcmk__str_casei)) {
 466             continue;
 467         }
 468 
 469         for (attr = pcmk__xe_first_child(attr_set); attr != NULL;
 470              attr = pcmk__xe_next(attr)) {
 471             const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
 472             const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
 473 
 474             if (pcmk__str_eq(name, XML_RSC_ATTR_REMOTE_NODE, pcmk__str_casei)) {
 475                 remote_name = value;
 476             } else if (pcmk__str_eq(name, "remote-addr", pcmk__str_casei)) {
 477                 remote_server = value;
 478             } else if (pcmk__str_eq(name, "remote-port", pcmk__str_casei)) {
 479                 remote_port = value;
 480             } else if (pcmk__str_eq(name, "remote-connect-timeout", pcmk__str_casei)) {
 481                 connect_timeout = value;
 482             } else if (pcmk__str_eq(name, "remote-allow-migrate", pcmk__str_casei)) {
 483                 remote_allow_migrate=value;
 484             } else if (pcmk__str_eq(name, XML_RSC_ATTR_MANAGED, pcmk__str_casei)) {
 485                 is_managed = value;
 486             }
 487         }
 488     }
 489 
 490     if (remote_name == NULL) {
 491         return NULL;
 492     }
 493 
 494     if (pe_find_resource(data->resources, remote_name) != NULL) {
 495         return NULL;
 496     }
 497 
 498     pe_create_remote_xml(parent, remote_name, container_id,
 499                          remote_allow_migrate, is_managed,
 500                          connect_timeout, remote_server, remote_port);
 501     return remote_name;
 502 }
 503 
 504 static void
 505 handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
     /* [previous][next][first][last][top][bottom][index][help] */
 506 {
 507     if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) {
 508         /* Ignore fencing for remote nodes that don't have a connection resource
 509          * associated with them. This happens when remote node entries get left
 510          * in the nodes section after the connection resource is removed.
 511          */
 512         return;
 513     }
 514 
 515     if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
 516         // All nodes are unclean until we've seen their status entry
 517         new_node->details->unclean = TRUE;
 518 
 519     } else {
 520         // Blind faith ...
 521         new_node->details->unclean = FALSE;
 522     }
 523 
 524     /* We need to be able to determine if a node's status section
 525      * exists or not separate from whether the node is unclean. */
 526     new_node->details->unseen = TRUE;
 527 }
 528 
 529 gboolean
 530 unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 531 {
 532     xmlNode *xml_obj = NULL;
 533     pe_node_t *new_node = NULL;
 534     const char *id = NULL;
 535     const char *uname = NULL;
 536     const char *type = NULL;
 537     const char *score = NULL;
 538 
 539     pe_rule_eval_data_t rule_data = {
 540         .node_hash = NULL,
 541         .role = RSC_ROLE_UNKNOWN,
 542         .now = data_set->now,
 543         .match_data = NULL,
 544         .rsc_data = NULL,
 545         .op_data = NULL
 546     };
 547 
 548     for (xml_obj = pcmk__xe_first_child(xml_nodes); xml_obj != NULL;
 549          xml_obj = pcmk__xe_next(xml_obj)) {
 550 
 551         if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_NODE, pcmk__str_none)) {
 552             new_node = NULL;
 553 
 554             id = crm_element_value(xml_obj, XML_ATTR_ID);
 555             uname = crm_element_value(xml_obj, XML_ATTR_UNAME);
 556             type = crm_element_value(xml_obj, XML_ATTR_TYPE);
 557             score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
 558             crm_trace("Processing node %s/%s", uname, id);
 559 
 560             if (id == NULL) {
 561                 pcmk__config_err("Ignoring <" XML_CIB_TAG_NODE
 562                                  "> entry in configuration without id");
 563                 continue;
 564             }
 565             new_node = pe_create_node(id, uname, type, score, data_set);
 566 
 567             if (new_node == NULL) {
 568                 return FALSE;
 569             }
 570 
 571 /*              if(data_set->have_quorum == FALSE */
 572 /*                 && data_set->no_quorum_policy == no_quorum_stop) { */
 573 /*                      /\* start shutting resources down *\/ */
 574 /*                      new_node->weight = -INFINITY; */
 575 /*              } */
 576 
 577             handle_startup_fencing(data_set, new_node);
 578 
 579             add_node_attrs(xml_obj, new_node, FALSE, data_set);
 580             pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data,
 581                                        new_node->details->utilization, NULL,
 582                                        FALSE, data_set);
 583 
 584             crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME));
 585         }
 586     }
 587 
 588     if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) {
 589         crm_info("Creating a fake local node");
 590         pe_create_node(data_set->localhost, data_set->localhost, NULL, 0,
 591                        data_set);
 592     }
 593 
 594     return TRUE;
 595 }
 596 
 597 static void
 598 setup_container(pe_resource_t * rsc, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 599 {
 600     const char *container_id = NULL;
 601 
 602     if (rsc->children) {
 603         GList *gIter = rsc->children;
 604 
 605         for (; gIter != NULL; gIter = gIter->next) {
 606             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 607 
 608             setup_container(child_rsc, data_set);
 609         }
 610         return;
 611     }
 612 
 613     container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER);
 614     if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) {
 615         pe_resource_t *container = pe_find_resource(data_set->resources, container_id);
 616 
 617         if (container) {
 618             rsc->container = container;
 619             pe__set_resource_flags(container, pe_rsc_is_container);
 620             container->fillers = g_list_append(container->fillers, rsc);
 621             pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id);
 622         } else {
 623             pe_err("Resource %s: Unknown resource container (%s)", rsc->id, container_id);
 624         }
 625     }
 626 }
 627 
 628 gboolean
 629 unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 630 {
 631     xmlNode *xml_obj = NULL;
 632 
 633     /* Create remote nodes and guest nodes from the resource configuration
 634      * before unpacking resources.
 635      */
 636     for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL;
 637          xml_obj = pcmk__xe_next(xml_obj)) {
 638 
 639         const char *new_node_id = NULL;
 640 
 641         /* Check for remote nodes, which are defined by ocf:pacemaker:remote
 642          * primitives.
 643          */
 644         if (xml_contains_remote_node(xml_obj)) {
 645             new_node_id = ID(xml_obj);
 646             /* The "pe_find_node" check is here to make sure we don't iterate over
 647              * an expanded node that has already been added to the node list. */
 648             if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
 649                 crm_trace("Found remote node %s defined by resource %s",
 650                           new_node_id, ID(xml_obj));
 651                 pe_create_node(new_node_id, new_node_id, "remote", NULL,
 652                                data_set);
 653             }
 654             continue;
 655         }
 656 
 657         /* Check for guest nodes, which are defined by special meta-attributes
 658          * of a primitive of any type (for example, VirtualDomain or Xen).
 659          */
 660         if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
 661             /* This will add an ocf:pacemaker:remote primitive to the
 662              * configuration for the guest node's connection, to be unpacked
 663              * later.
 664              */
 665             new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set);
 666             if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
 667                 crm_trace("Found guest node %s in resource %s",
 668                           new_node_id, ID(xml_obj));
 669                 pe_create_node(new_node_id, new_node_id, "remote", NULL,
 670                                data_set);
 671             }
 672             continue;
 673         }
 674 
 675         /* Check for guest nodes inside a group. Clones are currently not
 676          * supported as guest nodes.
 677          */
 678         if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_GROUP, pcmk__str_none)) {
 679             xmlNode *xml_obj2 = NULL;
 680             for (xml_obj2 = pcmk__xe_first_child(xml_obj); xml_obj2 != NULL;
 681                  xml_obj2 = pcmk__xe_next(xml_obj2)) {
 682 
 683                 new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set);
 684 
 685                 if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
 686                     crm_trace("Found guest node %s in resource %s inside group %s",
 687                               new_node_id, ID(xml_obj2), ID(xml_obj));
 688                     pe_create_node(new_node_id, new_node_id, "remote", NULL,
 689                                    data_set);
 690                 }
 691             }
 692         }
 693     }
 694     return TRUE;
 695 }
 696 
 697 /* Call this after all the nodes and resources have been
 698  * unpacked, but before the status section is read.
 699  *
 700  * A remote node's online status is reflected by the state
 701  * of the remote node's connection resource. We need to link
 702  * the remote node to this connection resource so we can have
 703  * easy access to the connection resource during the scheduler calculations.
 704  */
 705 static void
 706 link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
     /* [previous][next][first][last][top][bottom][index][help] */
 707 {
 708     pe_node_t *remote_node = NULL;
 709 
 710     if (new_rsc->is_remote_node == FALSE) {
 711         return;
 712     }
 713 
 714     if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
 715         /* remote_nodes and remote_resources are not linked in quick location calculations */
 716         return;
 717     }
 718 
 719     remote_node = pe_find_node(data_set->nodes, new_rsc->id);
 720     CRM_CHECK(remote_node != NULL, return;);
 721 
 722     pe_rsc_trace(new_rsc, "Linking remote connection resource %s to node %s",
 723                  new_rsc->id, remote_node->details->uname);
 724     remote_node->details->remote_rsc = new_rsc;
 725 
 726     if (new_rsc->container == NULL) {
 727         /* Handle start-up fencing for remote nodes (as opposed to guest nodes)
 728          * the same as is done for cluster nodes.
 729          */
 730         handle_startup_fencing(data_set, remote_node);
 731 
 732     } else {
 733         /* pe_create_node() marks the new node as "remote" or "cluster"; now
 734          * that we know the node is a guest node, update it correctly.
 735          */
 736         g_hash_table_replace(remote_node->details->attrs, strdup(CRM_ATTR_KIND),
 737                              strdup("container"));
 738     }
 739 }
 740 
 741 static void
 742 destroy_tag(gpointer data)
     /* [previous][next][first][last][top][bottom][index][help] */
 743 {
 744     pe_tag_t *tag = data;
 745 
 746     if (tag) {
 747         free(tag->id);
 748         g_list_free_full(tag->refs, free);
 749         free(tag);
 750     }
 751 }
 752 
 753 /*!
 754  * \internal
 755  * \brief Parse configuration XML for resource information
 756  *
 757  * \param[in]     xml_resources  Top of resource configuration XML
 758  * \param[in,out] data_set       Where to put resource information
 759  *
 760  * \return TRUE
 761  *
 762  * \note unpack_remote_nodes() MUST be called before this, so that the nodes can
 763  *       be used when common_unpack() calls resource_location()
 764  */
 765 gboolean
 766 unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 767 {
 768     xmlNode *xml_obj = NULL;
 769     GList *gIter = NULL;
 770 
 771     data_set->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
 772 
 773     for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL;
 774          xml_obj = pcmk__xe_next(xml_obj)) {
 775 
 776         pe_resource_t *new_rsc = NULL;
 777 
 778         if (pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE, pcmk__str_none)) {
 779             const char *template_id = ID(xml_obj);
 780 
 781             if (template_id && g_hash_table_lookup_extended(data_set->template_rsc_sets,
 782                                                             template_id, NULL, NULL) == FALSE) {
 783                 /* Record the template's ID for the knowledge of its existence anyway. */
 784                 g_hash_table_insert(data_set->template_rsc_sets, strdup(template_id), NULL);
 785             }
 786             continue;
 787         }
 788 
 789         crm_trace("Beginning unpack... <%s id=%s... >", crm_element_name(xml_obj), ID(xml_obj));
 790         if (common_unpack(xml_obj, &new_rsc, NULL, data_set) && (new_rsc != NULL)) {
 791             data_set->resources = g_list_append(data_set->resources, new_rsc);
 792             pe_rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
 793 
 794         } else {
 795             pcmk__config_err("Ignoring <%s> resource '%s' "
 796                              "because configuration is invalid",
 797                              crm_element_name(xml_obj), crm_str(ID(xml_obj)));
 798             if (new_rsc != NULL && new_rsc->fns != NULL) {
 799                 new_rsc->fns->free(new_rsc);
 800             }
 801         }
 802     }
 803 
 804     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
 805         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 806 
 807         setup_container(rsc, data_set);
 808         link_rsc2remotenode(data_set, rsc);
 809     }
 810 
 811     data_set->resources = g_list_sort(data_set->resources, sort_rsc_priority);
 812     if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
 813         /* Ignore */
 814 
 815     } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
 816                && !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
 817 
 818         pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
 819         pcmk__config_err("Either configure some or disable STONITH with the stonith-enabled option");
 820         pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity");
 821     }
 822 
 823     return TRUE;
 824 }
 825 
 826 gboolean
 827 unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 828 {
 829     xmlNode *xml_tag = NULL;
 830 
 831     data_set->tags = pcmk__strkey_table(free, destroy_tag);
 832 
 833     for (xml_tag = pcmk__xe_first_child(xml_tags); xml_tag != NULL;
 834          xml_tag = pcmk__xe_next(xml_tag)) {
 835 
 836         xmlNode *xml_obj_ref = NULL;
 837         const char *tag_id = ID(xml_tag);
 838 
 839         if (!pcmk__str_eq((const char *)xml_tag->name, XML_CIB_TAG_TAG, pcmk__str_none)) {
 840             continue;
 841         }
 842 
 843         if (tag_id == NULL) {
 844             pcmk__config_err("Ignoring <%s> without " XML_ATTR_ID,
 845                              crm_element_name(xml_tag));
 846             continue;
 847         }
 848 
 849         for (xml_obj_ref = pcmk__xe_first_child(xml_tag); xml_obj_ref != NULL;
 850              xml_obj_ref = pcmk__xe_next(xml_obj_ref)) {
 851 
 852             const char *obj_ref = ID(xml_obj_ref);
 853 
 854             if (!pcmk__str_eq((const char *)xml_obj_ref->name, XML_CIB_TAG_OBJ_REF, pcmk__str_none)) {
 855                 continue;
 856             }
 857 
 858             if (obj_ref == NULL) {
 859                 pcmk__config_err("Ignoring <%s> for tag '%s' without " XML_ATTR_ID,
 860                                  crm_element_name(xml_obj_ref), tag_id);
 861                 continue;
 862             }
 863 
 864             if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) {
 865                 return FALSE;
 866             }
 867         }
 868     }
 869 
 870     return TRUE;
 871 }
 872 
 873 /* The ticket state section:
 874  * "/cib/status/tickets/ticket_state" */
 875 static gboolean
 876 unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 877 {
 878     const char *ticket_id = NULL;
 879     const char *granted = NULL;
 880     const char *last_granted = NULL;
 881     const char *standby = NULL;
 882     xmlAttrPtr xIter = NULL;
 883 
 884     pe_ticket_t *ticket = NULL;
 885 
 886     ticket_id = ID(xml_ticket);
 887     if (pcmk__str_empty(ticket_id)) {
 888         return FALSE;
 889     }
 890 
 891     crm_trace("Processing ticket state for %s", ticket_id);
 892 
 893     ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
 894     if (ticket == NULL) {
 895         ticket = ticket_new(ticket_id, data_set);
 896         if (ticket == NULL) {
 897             return FALSE;
 898         }
 899     }
 900 
 901     for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
 902         const char *prop_name = (const char *)xIter->name;
 903         const char *prop_value = crm_element_value(xml_ticket, prop_name);
 904 
 905         if (pcmk__str_eq(prop_name, XML_ATTR_ID, pcmk__str_none)) {
 906             continue;
 907         }
 908         g_hash_table_replace(ticket->state, strdup(prop_name), strdup(prop_value));
 909     }
 910 
 911     granted = g_hash_table_lookup(ticket->state, "granted");
 912     if (granted && crm_is_true(granted)) {
 913         ticket->granted = TRUE;
 914         crm_info("We have ticket '%s'", ticket->id);
 915     } else {
 916         ticket->granted = FALSE;
 917         crm_info("We do not have ticket '%s'", ticket->id);
 918     }
 919 
 920     last_granted = g_hash_table_lookup(ticket->state, "last-granted");
 921     if (last_granted) {
 922         long long last_granted_ll;
 923 
 924         pcmk__scan_ll(last_granted, &last_granted_ll, 0LL);
 925         ticket->last_granted = (time_t) last_granted_ll;
 926     }
 927 
 928     standby = g_hash_table_lookup(ticket->state, "standby");
 929     if (standby && crm_is_true(standby)) {
 930         ticket->standby = TRUE;
 931         if (ticket->granted) {
 932             crm_info("Granted ticket '%s' is in standby-mode", ticket->id);
 933         }
 934     } else {
 935         ticket->standby = FALSE;
 936     }
 937 
 938     crm_trace("Done with ticket state for %s", ticket_id);
 939 
 940     return TRUE;
 941 }
 942 
 943 static gboolean
 944 unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
 945 {
 946     xmlNode *xml_obj = NULL;
 947 
 948     for (xml_obj = pcmk__xe_first_child(xml_tickets); xml_obj != NULL;
 949          xml_obj = pcmk__xe_next(xml_obj)) {
 950 
 951         if (!pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, pcmk__str_none)) {
 952             continue;
 953         }
 954         unpack_ticket_state(xml_obj, data_set);
 955     }
 956 
 957     return TRUE;
 958 }
 959 
 960 static void
 961 unpack_handle_remote_attrs(pe_node_t *this_node, xmlNode *state, pe_working_set_t * data_set) 
     /* [previous][next][first][last][top][bottom][index][help] */
 962 {
 963     const char *resource_discovery_enabled = NULL;
 964     xmlNode *attrs = NULL;
 965     pe_resource_t *rsc = NULL;
 966 
 967     if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
 968         return;
 969     }
 970 
 971     if ((this_node == NULL) || !pe__is_guest_or_remote_node(this_node)) {
 972         return;
 973     }
 974     crm_trace("Processing remote node id=%s, uname=%s", this_node->details->id, this_node->details->uname);
 975 
 976     pcmk__scan_min_int(crm_element_value(state, XML_NODE_IS_MAINTENANCE),
 977                        &(this_node->details->remote_maintenance), 0);
 978 
 979     rsc = this_node->details->remote_rsc;
 980     if (this_node->details->remote_requires_reset == FALSE) {
 981         this_node->details->unclean = FALSE;
 982         this_node->details->unseen = FALSE;
 983     }
 984     attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
 985     add_node_attrs(attrs, this_node, TRUE, data_set);
 986 
 987     if (pe__shutdown_requested(this_node)) {
 988         crm_info("Node %s is shutting down", this_node->details->uname);
 989         this_node->details->shutdown = TRUE;
 990     }
 991  
 992     if (crm_is_true(pe_node_attribute_raw(this_node, "standby"))) {
 993         crm_info("Node %s is in standby-mode", this_node->details->uname);
 994         this_node->details->standby = TRUE;
 995     }
 996 
 997     if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) ||
 998         ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed))) {
 999         crm_info("Node %s is in maintenance-mode", this_node->details->uname);
1000         this_node->details->maintenance = TRUE;
1001     }
1002 
1003     resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY);
1004     if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
1005         if (pe__is_remote_node(this_node)
1006             && !pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
1007             crm_warn("Ignoring %s attribute on remote node %s because stonith is disabled",
1008                      XML_NODE_ATTR_RSC_DISCOVERY, this_node->details->uname);
1009         } else {
1010             /* This is either a remote node with fencing enabled, or a guest
1011              * node. We don't care whether fencing is enabled when fencing guest
1012              * nodes, because they are "fenced" by recovering their containing
1013              * resource.
1014              */
1015             crm_info("Node %s has resource discovery disabled", this_node->details->uname);
1016             this_node->details->rsc_discovery_enabled = FALSE;
1017         }
1018     }
1019 }
1020 
1021 /*!
1022  * \internal
1023  * \brief Unpack a cluster node's transient attributes
1024  *
1025  * \param[in] state     CIB node state XML
1026  * \param[in] node      Cluster node whose attributes are being unpacked
1027  * \param[in] data_set  Cluster working set
1028  */
1029 static void
1030 unpack_transient_attributes(xmlNode *state, pe_node_t *node,
     /* [previous][next][first][last][top][bottom][index][help] */
1031                             pe_working_set_t *data_set)
1032 {
1033     const char *discovery = NULL;
1034     xmlNode *attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
1035 
1036     add_node_attrs(attrs, node, TRUE, data_set);
1037 
1038     if (crm_is_true(pe_node_attribute_raw(node, "standby"))) {
1039         crm_info("Node %s is in standby-mode", node->details->uname);
1040         node->details->standby = TRUE;
1041     }
1042 
1043     if (crm_is_true(pe_node_attribute_raw(node, "maintenance"))) {
1044         crm_info("Node %s is in maintenance-mode", node->details->uname);
1045         node->details->maintenance = TRUE;
1046     }
1047 
1048     discovery = pe_node_attribute_raw(node, XML_NODE_ATTR_RSC_DISCOVERY);
1049     if ((discovery != NULL) && !crm_is_true(discovery)) {
1050         crm_warn("Ignoring %s attribute for node %s because disabling "
1051                  "resource discovery is not allowed for cluster nodes",
1052                  XML_NODE_ATTR_RSC_DISCOVERY, node->details->uname);
1053     }
1054 }
1055 
1056 /*!
1057  * \internal
1058  * \brief Unpack a node state entry (first pass)
1059  *
1060  * Unpack one node state entry from status. This unpacks information from the
1061  * node_state element itself and node attributes inside it, but not the
1062  * resource history inside it. Multiple passes through the status are needed to
1063  * fully unpack everything.
1064  *
1065  * \param[in] state     CIB node state XML
1066  * \param[in] data_set  Cluster working set
1067  */
1068 static void
1069 unpack_node_state(xmlNode *state, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
1070 {
1071     const char *id = NULL;
1072     const char *uname = NULL;
1073     pe_node_t *this_node = NULL;
1074 
1075     id = crm_element_value(state, XML_ATTR_ID);
1076     if (id == NULL) {
1077         crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without "
1078                  XML_ATTR_ID);
1079         return;
1080     }
1081 
1082     uname = crm_element_value(state, XML_ATTR_UNAME);
1083     if (uname == NULL) {
1084         crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without "
1085                  XML_ATTR_UNAME);
1086         return;
1087     }
1088 
1089     this_node = pe_find_node_any(data_set->nodes, id, uname);
1090     if (this_node == NULL) {
1091         pcmk__config_warn("Ignoring recorded node state for '%s' because "
1092                           "it is no longer in the configuration", uname);
1093         return;
1094     }
1095 
1096     if (pe__is_guest_or_remote_node(this_node)) {
1097         /* We can't determine the online status of Pacemaker Remote nodes until
1098          * after all resource history has been unpacked. In this first pass, we
1099          * do need to mark whether the node has been fenced, as this plays a
1100          * role during unpacking cluster node resource state.
1101          */
1102         pcmk__scan_min_int(crm_element_value(state, XML_NODE_IS_FENCED),
1103                            &(this_node->details->remote_was_fenced), 0);
1104         return;
1105     }
1106 
1107     unpack_transient_attributes(state, this_node, data_set);
1108 
1109     /* Provisionally mark this cluster node as clean. We have at least seen it
1110      * in the current cluster's lifetime.
1111      */
1112     this_node->details->unclean = FALSE;
1113     this_node->details->unseen = FALSE;
1114 
1115     crm_trace("Determining online status of cluster node %s (id %s)",
1116               this_node->details->uname, id);
1117     determine_online_status(state, this_node, data_set);
1118 
1119     if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)
1120         && this_node->details->online
1121         && (data_set->no_quorum_policy == no_quorum_suicide)) {
1122         /* Everything else should flow from this automatically
1123          * (at least until the scheduler becomes able to migrate off
1124          * healthy resources)
1125          */
1126         pe_fence_node(data_set, this_node, "cluster does not have quorum",
1127                       FALSE);
1128     }
1129 }
1130 
1131 /*!
1132  * \internal
1133  * \brief Unpack nodes' resource history as much as possible
1134  *
1135  * Unpack as many nodes' resource history as possible in one pass through the
1136  * status. We need to process Pacemaker Remote nodes' connections/containers
1137  * before unpacking their history; the connection/container history will be
1138  * in another node's history, so it might take multiple passes to unpack
1139  * everything.
1140  *
1141  * \param[in] status    CIB XML status section
1142  * \param[in] fence     If true, treat any not-yet-unpacked nodes as unseen
1143  * \param[in] data_set  Cluster working set
1144  *
1145  * \return Standard Pacemaker return code (specifically pcmk_rc_ok if done,
1146  *         or EAGAIN if more unpacking remains to be done)
1147  */
1148 static int
1149 unpack_node_history(xmlNode *status, bool fence, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
1150 {
1151     int rc = pcmk_rc_ok;
1152 
1153     // Loop through all node_state entries in CIB status
1154     for (xmlNode *state = first_named_child(status, XML_CIB_TAG_STATE);
1155          state != NULL; state = crm_next_same_xml(state)) {
1156 
1157         const char *id = ID(state);
1158         const char *uname = crm_element_value(state, XML_ATTR_UNAME);
1159         pe_node_t *this_node = NULL;
1160 
1161         if ((id == NULL) || (uname == NULL)) {
1162             // Warning already logged in first pass through status section
1163             crm_trace("Not unpacking resource history from malformed "
1164                       XML_CIB_TAG_STATE " without id and/or uname");
1165             continue;
1166         }
1167 
1168         this_node = pe_find_node_any(data_set->nodes, id, uname);
1169         if (this_node == NULL) {
1170             // Warning already logged in first pass through status section
1171             crm_trace("Not unpacking resource history for node %s because "
1172                       "no longer in configuration", id);
1173             continue;
1174         }
1175 
1176         if (this_node->details->unpacked) {
1177             crm_trace("Not unpacking resource history for node %s because "
1178                       "already unpacked", id);
1179             continue;
1180         }
1181 
1182         if (fence) {
1183             // We're processing all remaining nodes
1184 
1185         } else if (pe__is_guest_node(this_node)) {
1186             /* We can unpack a guest node's history only after we've unpacked
1187              * other resource history to the point that we know that the node's
1188              * connection and containing resource are both up.
1189              */
1190             pe_resource_t *rsc = this_node->details->remote_rsc;
1191 
1192             if ((rsc == NULL) || (rsc->role != RSC_ROLE_STARTED)
1193                 || (rsc->container->role != RSC_ROLE_STARTED)) {
1194                 crm_trace("Not unpacking resource history for guest node %s "
1195                           "because container and connection are not known to "
1196                           "be up", id);
1197                 continue;
1198             }
1199 
1200         } else if (pe__is_remote_node(this_node)) {
1201             /* We can unpack a remote node's history only after we've unpacked
1202              * other resource history to the point that we know that the node's
1203              * connection is up, with the exception of when shutdown locks are
1204              * in use.
1205              */
1206             pe_resource_t *rsc = this_node->details->remote_rsc;
1207 
1208             if ((rsc == NULL)
1209                 || (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)
1210                     && (rsc->role != RSC_ROLE_STARTED))) {
1211                 crm_trace("Not unpacking resource history for remote node %s "
1212                           "because connection is not known to be up", id);
1213                 continue;
1214             }
1215 
1216         /* If fencing and shutdown locks are disabled and we're not processing
1217          * unseen nodes, then we don't want to unpack offline nodes until online
1218          * nodes have been unpacked. This allows us to number active clone
1219          * instances first.
1220          */
1221         } else if (!pcmk_any_flags_set(data_set->flags, pe_flag_stonith_enabled
1222                                                         |pe_flag_shutdown_lock)
1223                    && !this_node->details->online) {
1224             crm_trace("Not unpacking resource history for offline "
1225                       "cluster node %s", id);
1226             continue;
1227         }
1228 
1229         if (pe__is_guest_or_remote_node(this_node)) {
1230             determine_remote_online_status(data_set, this_node);
1231             unpack_handle_remote_attrs(this_node, state, data_set);
1232         }
1233 
1234         crm_trace("Unpacking resource history for %snode %s",
1235                   (fence? "unseen " : ""), id);
1236 
1237         this_node->details->unpacked = TRUE;
1238         unpack_node_lrm(this_node, state, data_set);
1239 
1240         rc = EAGAIN; // Other node histories might depend on this one
1241     }
1242     return rc;
1243 }
1244 
1245 /* remove nodes that are down, stopping */
1246 /* create positive rsc_to_node constraints between resources and the nodes they are running on */
1247 /* anything else? */
1248 gboolean
1249 unpack_status(xmlNode * status, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
1250 {
1251     xmlNode *state = NULL;
1252 
1253     crm_trace("Beginning unpack");
1254 
1255     if (data_set->tickets == NULL) {
1256         data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
1257     }
1258 
1259     for (state = pcmk__xe_first_child(status); state != NULL;
1260          state = pcmk__xe_next(state)) {
1261 
1262         if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, pcmk__str_none)) {
1263             unpack_tickets_state((xmlNode *) state, data_set);
1264 
1265         } else if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
1266             unpack_node_state(state, data_set);
1267         }
1268     }
1269 
1270     while (unpack_node_history(status, FALSE, data_set) == EAGAIN) {
1271         crm_trace("Another pass through node resource histories is needed");
1272     }
1273 
1274     // Now catch any nodes we didn't see
1275     unpack_node_history(status,
1276                         pcmk_is_set(data_set->flags, pe_flag_stonith_enabled),
1277                         data_set);
1278 
1279     /* Now that we know where resources are, we can schedule stops of containers
1280      * with failed bundle connections
1281      */
1282     if (data_set->stop_needed != NULL) {
1283         for (GList *item = data_set->stop_needed; item; item = item->next) {
1284             pe_resource_t *container = item->data;
1285             pe_node_t *node = pe__current_node(container);
1286 
1287             if (node) {
1288                 stop_action(container, node, FALSE);
1289             }
1290         }
1291         g_list_free(data_set->stop_needed);
1292         data_set->stop_needed = NULL;
1293     }
1294 
1295     /* Now that we know status of all Pacemaker Remote connections and nodes,
1296      * we can stop connections for node shutdowns, and check the online status
1297      * of remote/guest nodes that didn't have any node history to unpack.
1298      */
1299     for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
1300         pe_node_t *this_node = gIter->data;
1301 
1302         if (!pe__is_guest_or_remote_node(this_node)) {
1303             continue;
1304         }
1305         if (this_node->details->shutdown
1306             && (this_node->details->remote_rsc != NULL)) {
1307             pe__set_next_role(this_node->details->remote_rsc, RSC_ROLE_STOPPED,
1308                               "remote shutdown");
1309         }
1310         if (!this_node->details->unpacked) {
1311             determine_remote_online_status(data_set, this_node);
1312         }
1313     }
1314 
1315     return TRUE;
1316 }
1317 
1318 static gboolean
1319 determine_online_status_no_fencing(pe_working_set_t * data_set, xmlNode * node_state,
     /* [previous][next][first][last][top][bottom][index][help] */
1320                                    pe_node_t * this_node)
1321 {
1322     gboolean online = FALSE;
1323     const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
1324     const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
1325     const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
1326     const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
1327 
1328     if (!crm_is_true(in_cluster)) {
1329         crm_trace("Node is down: in_cluster=%s", crm_str(in_cluster));
1330 
1331     } else if (pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei)) {
1332         if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
1333             online = TRUE;
1334         } else {
1335             crm_debug("Node is not ready to run resources: %s", join);
1336         }
1337 
1338     } else if (this_node->details->expected_up == FALSE) {
1339         crm_trace("Controller is down: in_cluster=%s", crm_str(in_cluster));
1340         crm_trace("\tis_peer=%s, join=%s, expected=%s",
1341                   crm_str(is_peer), crm_str(join), crm_str(exp_state));
1342 
1343     } else {
1344         /* mark it unclean */
1345         pe_fence_node(data_set, this_node, "peer is unexpectedly down", FALSE);
1346         crm_info("\tin_cluster=%s, is_peer=%s, join=%s, expected=%s",
1347                  crm_str(in_cluster), crm_str(is_peer), crm_str(join), crm_str(exp_state));
1348     }
1349     return online;
1350 }
1351 
1352 static gboolean
1353 determine_online_status_fencing(pe_working_set_t * data_set, xmlNode * node_state,
     /* [previous][next][first][last][top][bottom][index][help] */
1354                                 pe_node_t * this_node)
1355 {
1356     gboolean online = FALSE;
1357     gboolean do_terminate = FALSE;
1358     bool crmd_online = FALSE;
1359     const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
1360     const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
1361     const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
1362     const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
1363     const char *terminate = pe_node_attribute_raw(this_node, "terminate");
1364 
1365 /*
1366   - XML_NODE_IN_CLUSTER    ::= true|false
1367   - XML_NODE_IS_PEER       ::= online|offline
1368   - XML_NODE_JOIN_STATE    ::= member|down|pending|banned
1369   - XML_NODE_EXPECTED      ::= member|down
1370 */
1371 
1372     if (crm_is_true(terminate)) {
1373         do_terminate = TRUE;
1374 
1375     } else if (terminate != NULL && strlen(terminate) > 0) {
1376         /* could be a time() value */
1377         char t = terminate[0];
1378 
1379         if (t != '0' && isdigit(t)) {
1380             do_terminate = TRUE;
1381         }
1382     }
1383 
1384     crm_trace("%s: in_cluster=%s, is_peer=%s, join=%s, expected=%s, term=%d",
1385               this_node->details->uname, crm_str(in_cluster), crm_str(is_peer),
1386               crm_str(join), crm_str(exp_state), do_terminate);
1387 
1388     online = crm_is_true(in_cluster);
1389     crmd_online = pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei);
1390     if (exp_state == NULL) {
1391         exp_state = CRMD_JOINSTATE_DOWN;
1392     }
1393 
1394     if (this_node->details->shutdown) {
1395         crm_debug("%s is shutting down", this_node->details->uname);
1396 
1397         /* Slightly different criteria since we can't shut down a dead peer */
1398         online = crmd_online;
1399 
1400     } else if (in_cluster == NULL) {
1401         pe_fence_node(data_set, this_node, "peer has not been seen by the cluster", FALSE);
1402 
1403     } else if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_casei)) {
1404         pe_fence_node(data_set, this_node, "peer failed the pacemaker membership criteria", FALSE);
1405 
1406     } else if (do_terminate == FALSE && pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_casei)) {
1407 
1408         if (crm_is_true(in_cluster) || crmd_online) {
1409             crm_info("- Node %s is not ready to run resources", this_node->details->uname);
1410             this_node->details->standby = TRUE;
1411             this_node->details->pending = TRUE;
1412 
1413         } else {
1414             crm_trace("%s is down or still coming up", this_node->details->uname);
1415         }
1416 
1417     } else if (do_terminate && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_casei)
1418                && crm_is_true(in_cluster) == FALSE && !crmd_online) {
1419         crm_info("Node %s was just shot", this_node->details->uname);
1420         online = FALSE;
1421 
1422     } else if (crm_is_true(in_cluster) == FALSE) {
1423         // Consider `priority-fencing-delay` for lost nodes
1424         pe_fence_node(data_set, this_node, "peer is no longer part of the cluster", TRUE);
1425 
1426     } else if (!crmd_online) {
1427         pe_fence_node(data_set, this_node, "peer process is no longer available", FALSE);
1428 
1429         /* Everything is running at this point, now check join state */
1430     } else if (do_terminate) {
1431         pe_fence_node(data_set, this_node, "termination was requested", FALSE);
1432 
1433     } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
1434         crm_info("Node %s is active", this_node->details->uname);
1435 
1436     } else if (pcmk__strcase_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) {
1437         crm_info("Node %s is not ready to run resources", this_node->details->uname);
1438         this_node->details->standby = TRUE;
1439         this_node->details->pending = TRUE;
1440 
1441     } else {
1442         pe_fence_node(data_set, this_node, "peer was in an unknown state", FALSE);
1443         crm_warn("%s: in-cluster=%s, is-peer=%s, join=%s, expected=%s, term=%d, shutdown=%d",
1444                  this_node->details->uname, crm_str(in_cluster), crm_str(is_peer),
1445                  crm_str(join), crm_str(exp_state), do_terminate, this_node->details->shutdown);
1446     }
1447 
1448     return online;
1449 }
1450 
1451 static void
1452 determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_node)
     /* [previous][next][first][last][top][bottom][index][help] */
1453 {
1454     pe_resource_t *rsc = this_node->details->remote_rsc;
1455     pe_resource_t *container = NULL;
1456     pe_node_t *host = NULL;
1457 
1458     /* If there is a node state entry for a (former) Pacemaker Remote node
1459      * but no resource creating that node, the node's connection resource will
1460      * be NULL. Consider it an offline remote node in that case.
1461      */
1462     if (rsc == NULL) {
1463         this_node->details->online = FALSE;
1464         goto remote_online_done;
1465     }
1466 
1467     container = rsc->container;
1468 
1469     if (container && pcmk__list_of_1(rsc->running_on)) {
1470         host = rsc->running_on->data;
1471     }
1472 
1473     /* If the resource is currently started, mark it online. */
1474     if (rsc->role == RSC_ROLE_STARTED) {
1475         crm_trace("%s node %s presumed ONLINE because connection resource is started",
1476                   (container? "Guest" : "Remote"), this_node->details->id);
1477         this_node->details->online = TRUE;
1478     }
1479 
1480     /* consider this node shutting down if transitioning start->stop */
1481     if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) {
1482         crm_trace("%s node %s shutting down because connection resource is stopping",
1483                   (container? "Guest" : "Remote"), this_node->details->id);
1484         this_node->details->shutdown = TRUE;
1485     }
1486 
1487     /* Now check all the failure conditions. */
1488     if(container && pcmk_is_set(container->flags, pe_rsc_failed)) {
1489         crm_trace("Guest node %s UNCLEAN because guest resource failed",
1490                   this_node->details->id);
1491         this_node->details->online = FALSE;
1492         this_node->details->remote_requires_reset = TRUE;
1493 
1494     } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
1495         crm_trace("%s node %s OFFLINE because connection resource failed",
1496                   (container? "Guest" : "Remote"), this_node->details->id);
1497         this_node->details->online = FALSE;
1498 
1499     } else if (rsc->role == RSC_ROLE_STOPPED
1500         || (container && container->role == RSC_ROLE_STOPPED)) {
1501 
1502         crm_trace("%s node %s OFFLINE because its resource is stopped",
1503                   (container? "Guest" : "Remote"), this_node->details->id);
1504         this_node->details->online = FALSE;
1505         this_node->details->remote_requires_reset = FALSE;
1506 
1507     } else if (host && (host->details->online == FALSE)
1508                && host->details->unclean) {
1509         crm_trace("Guest node %s UNCLEAN because host is unclean",
1510                   this_node->details->id);
1511         this_node->details->online = FALSE;
1512         this_node->details->remote_requires_reset = TRUE;
1513     }
1514 
1515 remote_online_done:
1516     crm_trace("Remote node %s online=%s",
1517         this_node->details->id, this_node->details->online ? "TRUE" : "FALSE");
1518 }
1519 
1520 static void
1521 determine_online_status(xmlNode * node_state, pe_node_t * this_node, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
1522 {
1523     gboolean online = FALSE;
1524     const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
1525 
1526     CRM_CHECK(this_node != NULL, return);
1527 
1528     this_node->details->shutdown = FALSE;
1529     this_node->details->expected_up = FALSE;
1530 
1531     if (pe__shutdown_requested(this_node)) {
1532         this_node->details->shutdown = TRUE;
1533 
1534     } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
1535         this_node->details->expected_up = TRUE;
1536     }
1537 
1538     if (this_node->details->type == node_ping) {
1539         this_node->details->unclean = FALSE;
1540         online = FALSE;         /* As far as resource management is concerned,
1541                                  * the node is safely offline.
1542                                  * Anyone caught abusing this logic will be shot
1543                                  */
1544 
1545     } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
1546         online = determine_online_status_no_fencing(data_set, node_state, this_node);
1547 
1548     } else {
1549         online = determine_online_status_fencing(data_set, node_state, this_node);
1550     }
1551 
1552     if (online) {
1553         this_node->details->online = TRUE;
1554 
1555     } else {
1556         /* remove node from contention */
1557         this_node->fixed = TRUE;
1558         this_node->weight = -INFINITY;
1559     }
1560 
1561     if (online && this_node->details->shutdown) {
1562         /* don't run resources here */
1563         this_node->fixed = TRUE;
1564         this_node->weight = -INFINITY;
1565     }
1566 
1567     if (this_node->details->type == node_ping) {
1568         crm_info("Node %s is not a pacemaker node", this_node->details->uname);
1569 
1570     } else if (this_node->details->unclean) {
1571         pe_proc_warn("Node %s is unclean", this_node->details->uname);
1572 
1573     } else if (this_node->details->online) {
1574         crm_info("Node %s is %s", this_node->details->uname,
1575                  this_node->details->shutdown ? "shutting down" :
1576                  this_node->details->pending ? "pending" :
1577                  this_node->details->standby ? "standby" :
1578                  this_node->details->maintenance ? "maintenance" : "online");
1579 
1580     } else {
1581         crm_trace("Node %s is offline", this_node->details->uname);
1582     }
1583 }
1584 
1585 /*!
1586  * \internal
1587  * \brief Find the end of a resource's name, excluding any clone suffix
1588  *
1589  * \param[in] id  Resource ID to check
1590  *
1591  * \return Pointer to last character of resource's base name
1592  */
1593 const char *
1594 pe_base_name_end(const char *id)
     /* [previous][next][first][last][top][bottom][index][help] */
1595 {
1596     if (!pcmk__str_empty(id)) {
1597         const char *end = id + strlen(id) - 1;
1598 
1599         for (const char *s = end; s > id; --s) {
1600             switch (*s) {
1601                 case '0':
1602                 case '1':
1603                 case '2':
1604                 case '3':
1605                 case '4':
1606                 case '5':
1607                 case '6':
1608                 case '7':
1609                 case '8':
1610                 case '9':
1611                     break;
1612                 case ':':
1613                     return (s == end)? s : (s - 1);
1614                 default:
1615                     return end;
1616             }
1617         }
1618         return end;
1619     }
1620     return NULL;
1621 }
1622 
1623 /*!
1624  * \internal
1625  * \brief Get a resource name excluding any clone suffix
1626  *
1627  * \param[in] last_rsc_id  Resource ID to check
1628  *
1629  * \return Pointer to newly allocated string with resource's base name
1630  * \note It is the caller's responsibility to free() the result.
1631  *       This asserts on error, so callers can assume result is not NULL.
1632  */
1633 char *
1634 clone_strip(const char *last_rsc_id)
     /* [previous][next][first][last][top][bottom][index][help] */
1635 {
1636     const char *end = pe_base_name_end(last_rsc_id);
1637     char *basename = NULL;
1638 
1639     CRM_ASSERT(end);
1640     basename = strndup(last_rsc_id, end - last_rsc_id + 1);
1641     CRM_ASSERT(basename);
1642     return basename;
1643 }
1644 
1645 /*!
1646  * \internal
1647  * \brief Get the name of the first instance of a cloned resource
1648  *
1649  * \param[in] last_rsc_id  Resource ID to check
1650  *
1651  * \return Pointer to newly allocated string with resource's base name plus :0
1652  * \note It is the caller's responsibility to free() the result.
1653  *       This asserts on error, so callers can assume result is not NULL.
1654  */
1655 char *
1656 clone_zero(const char *last_rsc_id)
     /* [previous][next][first][last][top][bottom][index][help] */
1657 {
1658     const char *end = pe_base_name_end(last_rsc_id);
1659     size_t base_name_len = end - last_rsc_id + 1;
1660     char *zero = NULL;
1661 
1662     CRM_ASSERT(end);
1663     zero = calloc(base_name_len + 3, sizeof(char));
1664     CRM_ASSERT(zero);
1665     memcpy(zero, last_rsc_id, base_name_len);
1666     zero[base_name_len] = ':';
1667     zero[base_name_len + 1] = '0';
1668     return zero;
1669 }
1670 
1671 static pe_resource_t *
1672 create_fake_resource(const char *rsc_id, xmlNode * rsc_entry, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
1673 {
1674     pe_resource_t *rsc = NULL;
1675     xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
1676 
1677     copy_in_properties(xml_rsc, rsc_entry);
1678     crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id);
1679     crm_log_xml_debug(xml_rsc, "Orphan resource");
1680 
1681     if (!common_unpack(xml_rsc, &rsc, NULL, data_set)) {
1682         return NULL;
1683     }
1684 
1685     if (xml_contains_remote_node(xml_rsc)) {
1686         pe_node_t *node;
1687 
1688         crm_debug("Detected orphaned remote node %s", rsc_id);
1689         node = pe_find_node(data_set->nodes, rsc_id);
1690         if (node == NULL) {
1691                 node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set);
1692         }
1693         link_rsc2remotenode(data_set, rsc);
1694 
1695         if (node) {
1696             crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
1697             node->details->shutdown = TRUE;
1698         }
1699     }
1700 
1701     if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) {
1702         /* This orphaned rsc needs to be mapped to a container. */
1703         crm_trace("Detected orphaned container filler %s", rsc_id);
1704         pe__set_resource_flags(rsc, pe_rsc_orphan_container_filler);
1705     }
1706     pe__set_resource_flags(rsc, pe_rsc_orphan);
1707     data_set->resources = g_list_append(data_set->resources, rsc);
1708     return rsc;
1709 }
1710 
1711 /*!
1712  * \internal
1713  * \brief Create orphan instance for anonymous clone resource history
1714  */
1715 static pe_resource_t *
1716 create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id,
     /* [previous][next][first][last][top][bottom][index][help] */
1717                         pe_node_t *node, pe_working_set_t *data_set)
1718 {
1719     pe_resource_t *top = pe__create_clone_child(parent, data_set);
1720 
1721     // find_rsc() because we might be a cloned group
1722     pe_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone);
1723 
1724     pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s",
1725                  top->id, parent->id, rsc_id, node->details->uname);
1726     return orphan;
1727 }
1728 
1729 /*!
1730  * \internal
1731  * \brief Check a node for an instance of an anonymous clone
1732  *
1733  * Return a child instance of the specified anonymous clone, in order of
1734  * preference: (1) the instance running on the specified node, if any;
1735  * (2) an inactive instance (i.e. within the total of clone-max instances);
1736  * (3) a newly created orphan (i.e. clone-max instances are already active).
1737  *
1738  * \param[in] data_set  Cluster information
1739  * \param[in] node      Node on which to check for instance
1740  * \param[in] parent    Clone to check
1741  * \param[in] rsc_id    Name of cloned resource in history (without instance)
1742  */
1743 static pe_resource_t *
1744 find_anonymous_clone(pe_working_set_t * data_set, pe_node_t * node, pe_resource_t * parent,
     /* [previous][next][first][last][top][bottom][index][help] */
1745                      const char *rsc_id)
1746 {
1747     GList *rIter = NULL;
1748     pe_resource_t *rsc = NULL;
1749     pe_resource_t *inactive_instance = NULL;
1750     gboolean skip_inactive = FALSE;
1751 
1752     CRM_ASSERT(parent != NULL);
1753     CRM_ASSERT(pe_rsc_is_clone(parent));
1754     CRM_ASSERT(!pcmk_is_set(parent->flags, pe_rsc_unique));
1755 
1756     // Check for active (or partially active, for cloned groups) instance
1757     pe_rsc_trace(parent, "Looking for %s on %s in %s", rsc_id, node->details->uname, parent->id);
1758     for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
1759         GList *locations = NULL;
1760         pe_resource_t *child = rIter->data;
1761 
1762         /* Check whether this instance is already known to be active or pending
1763          * anywhere, at this stage of unpacking. Because this function is called
1764          * for a resource before the resource's individual operation history
1765          * entries are unpacked, locations will generally not contain the
1766          * desired node.
1767          *
1768          * However, there are three exceptions:
1769          * (1) when child is a cloned group and we have already unpacked the
1770          *     history of another member of the group on the same node;
1771          * (2) when we've already unpacked the history of another numbered
1772          *     instance on the same node (which can happen if globally-unique
1773          *     was flipped from true to false); and
1774          * (3) when we re-run calculations on the same data set as part of a
1775          *     simulation.
1776          */
1777         child->fns->location(child, &locations, 2);
1778         if (locations) {
1779             /* We should never associate the same numbered anonymous clone
1780              * instance with multiple nodes, and clone instances can't migrate,
1781              * so there must be only one location, regardless of history.
1782              */
1783             CRM_LOG_ASSERT(locations->next == NULL);
1784 
1785             if (((pe_node_t *)locations->data)->details == node->details) {
1786                 /* This child instance is active on the requested node, so check
1787                  * for a corresponding configured resource. We use find_rsc()
1788                  * instead of child because child may be a cloned group, and we
1789                  * need the particular member corresponding to rsc_id.
1790                  *
1791                  * If the history entry is orphaned, rsc will be NULL.
1792                  */
1793                 rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone);
1794                 if (rsc) {
1795                     /* If there are multiple instance history entries for an
1796                      * anonymous clone in a single node's history (which can
1797                      * happen if globally-unique is switched from true to
1798                      * false), we want to consider the instances beyond the
1799                      * first as orphans, even if there are inactive instance
1800                      * numbers available.
1801                      */
1802                     if (rsc->running_on) {
1803                         crm_notice("Active (now-)anonymous clone %s has "
1804                                    "multiple (orphan) instance histories on %s",
1805                                    parent->id, node->details->uname);
1806                         skip_inactive = TRUE;
1807                         rsc = NULL;
1808                     } else {
1809                         pe_rsc_trace(parent, "Resource %s, active", rsc->id);
1810                     }
1811                 }
1812             }
1813             g_list_free(locations);
1814 
1815         } else {
1816             pe_rsc_trace(parent, "Resource %s, skip inactive", child->id);
1817             if (!skip_inactive && !inactive_instance
1818                 && !pcmk_is_set(child->flags, pe_rsc_block)) {
1819                 // Remember one inactive instance in case we don't find active
1820                 inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL,
1821                                                           pe_find_clone);
1822 
1823                 /* ... but don't use it if it was already associated with a
1824                  * pending action on another node
1825                  */
1826                 if (inactive_instance && inactive_instance->pending_node
1827                     && (inactive_instance->pending_node->details != node->details)) {
1828                     inactive_instance = NULL;
1829                 }
1830             }
1831         }
1832     }
1833 
1834     if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) {
1835         pe_rsc_trace(parent, "Resource %s, empty slot", inactive_instance->id);
1836         rsc = inactive_instance;
1837     }
1838 
1839     /* If the resource has "requires" set to "quorum" or "nothing", and we don't
1840      * have a clone instance for every node, we don't want to consume a valid
1841      * instance number for unclean nodes. Such instances may appear to be active
1842      * according to the history, but should be considered inactive, so we can
1843      * start an instance elsewhere. Treat such instances as orphans.
1844      *
1845      * An exception is instances running on guest nodes -- since guest node
1846      * "fencing" is actually just a resource stop, requires shouldn't apply.
1847      *
1848      * @TODO Ideally, we'd use an inactive instance number if it is not needed
1849      * for any clean instances. However, we don't know that at this point.
1850      */
1851     if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
1852         && (!node->details->online || node->details->unclean)
1853         && !pe__is_guest_node(node)
1854         && !pe__is_universal_clone(parent, data_set)) {
1855 
1856         rsc = NULL;
1857     }
1858 
1859     if (rsc == NULL) {
1860         rsc = create_anonymous_orphan(parent, rsc_id, node, data_set);
1861         pe_rsc_trace(parent, "Resource %s, orphan", rsc->id);
1862     }
1863     return rsc;
1864 }
1865 
1866 static pe_resource_t *
1867 unpack_find_resource(pe_working_set_t * data_set, pe_node_t * node, const char *rsc_id,
     /* [previous][next][first][last][top][bottom][index][help] */
1868                      xmlNode * rsc_entry)
1869 {
1870     pe_resource_t *rsc = NULL;
1871     pe_resource_t *parent = NULL;
1872 
1873     crm_trace("looking for %s", rsc_id);
1874     rsc = pe_find_resource(data_set->resources, rsc_id);
1875 
1876     if (rsc == NULL) {
1877         /* If we didn't find the resource by its name in the operation history,
1878          * check it again as a clone instance. Even when clone-max=0, we create
1879          * a single :0 orphan to match against here.
1880          */
1881         char *clone0_id = clone_zero(rsc_id);
1882         pe_resource_t *clone0 = pe_find_resource(data_set->resources, clone0_id);
1883 
1884         if (clone0 && !pcmk_is_set(clone0->flags, pe_rsc_unique)) {
1885             rsc = clone0;
1886             parent = uber_parent(clone0);
1887             crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
1888         } else {
1889             crm_trace("%s is not known as %s either (orphan)",
1890                       rsc_id, clone0_id);
1891         }
1892         free(clone0_id);
1893 
1894     } else if (rsc->variant > pe_native) {
1895         crm_trace("Resource history for %s is orphaned because it is no longer primitive",
1896                   rsc_id);
1897         return NULL;
1898 
1899     } else {
1900         parent = uber_parent(rsc);
1901     }
1902 
1903     if (pe_rsc_is_anon_clone(parent)) {
1904 
1905         if (pe_rsc_is_bundled(parent)) {
1906             rsc = pe__find_bundle_replica(parent->parent, node);
1907         } else {
1908             char *base = clone_strip(rsc_id);
1909 
1910             rsc = find_anonymous_clone(data_set, node, parent, base);
1911             free(base);
1912             CRM_ASSERT(rsc != NULL);
1913         }
1914     }
1915 
1916     if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_casei)
1917         && !pcmk__str_eq(rsc_id, rsc->clone_name, pcmk__str_casei)) {
1918 
1919         free(rsc->clone_name);
1920         rsc->clone_name = strdup(rsc_id);
1921         pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
1922                      rsc_id, node->details->uname, rsc->id,
1923                      (pcmk_is_set(rsc->flags, pe_rsc_orphan)? " (ORPHAN)" : ""));
1924     }
1925     return rsc;
1926 }
1927 
1928 static pe_resource_t *
1929 process_orphan_resource(xmlNode * rsc_entry, pe_node_t * node, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
1930 {
1931     pe_resource_t *rsc = NULL;
1932     const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
1933 
1934     crm_debug("Detected orphan resource %s on %s", rsc_id, node->details->uname);
1935     rsc = create_fake_resource(rsc_id, rsc_entry, data_set);
1936 
1937     if (!pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
1938         pe__clear_resource_flags(rsc, pe_rsc_managed);
1939 
1940     } else {
1941         CRM_CHECK(rsc != NULL, return NULL);
1942         pe_rsc_trace(rsc, "Added orphan %s", rsc->id);
1943         resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__", data_set);
1944     }
1945     return rsc;
1946 }
1947 
1948 static void
1949 process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
     /* [previous][next][first][last][top][bottom][index][help] */
1950                   enum action_fail_response on_fail,
1951                   xmlNode * migrate_op, pe_working_set_t * data_set)
1952 {
1953     pe_node_t *tmpnode = NULL;
1954     char *reason = NULL;
1955     enum action_fail_response save_on_fail = action_fail_ignore;
1956 
1957     CRM_ASSERT(rsc);
1958     pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
1959                  rsc->id, role2text(rsc->role), node->details->uname, fail2text(on_fail));
1960 
1961     /* process current state */
1962     if (rsc->role != RSC_ROLE_UNKNOWN) {
1963         pe_resource_t *iter = rsc;
1964 
1965         while (iter) {
1966             if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
1967                 pe_node_t *n = pe__copy_node(node);
1968 
1969                 pe_rsc_trace(rsc, "%s (aka. %s) known on %s", rsc->id, rsc->clone_name,
1970                              n->details->uname);
1971                 g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
1972             }
1973             if (pcmk_is_set(iter->flags, pe_rsc_unique)) {
1974                 break;
1975             }
1976             iter = iter->parent;
1977         }
1978     }
1979 
1980     /* If a managed resource is believed to be running, but node is down ... */
1981     if (rsc->role > RSC_ROLE_STOPPED
1982         && node->details->online == FALSE
1983         && node->details->maintenance == FALSE
1984         && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1985 
1986         gboolean should_fence = FALSE;
1987 
1988         /* If this is a guest node, fence it (regardless of whether fencing is
1989          * enabled, because guest node fencing is done by recovery of the
1990          * container resource rather than by the fencer). Mark the resource
1991          * we're processing as failed. When the guest comes back up, its
1992          * operation history in the CIB will be cleared, freeing the affected
1993          * resource to run again once we are sure we know its state.
1994          */
1995         if (pe__is_guest_node(node)) {
1996             pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
1997             should_fence = TRUE;
1998 
1999         } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
2000             if (pe__is_remote_node(node) && node->details->remote_rsc
2001                 && !pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_failed)) {
2002 
2003                 /* Setting unseen means that fencing of the remote node will
2004                  * occur only if the connection resource is not going to start
2005                  * somewhere. This allows connection resources on a failed
2006                  * cluster node to move to another node without requiring the
2007                  * remote nodes to be fenced as well.
2008                  */
2009                 node->details->unseen = TRUE;
2010                 reason = crm_strdup_printf("%s is active there (fencing will be"
2011                                            " revoked if remote connection can "
2012                                            "be re-established elsewhere)",
2013                                            rsc->id);
2014             }
2015             should_fence = TRUE;
2016         }
2017 
2018         if (should_fence) {
2019             if (reason == NULL) {
2020                reason = crm_strdup_printf("%s is thought to be active there", rsc->id);
2021             }
2022             pe_fence_node(data_set, node, reason, FALSE);
2023         }
2024         free(reason);
2025     }
2026 
2027     /* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */
2028     save_on_fail = on_fail;
2029 
2030     if (node->details->unclean) {
2031         /* No extra processing needed
2032          * Also allows resources to be started again after a node is shot
2033          */
2034         on_fail = action_fail_ignore;
2035     }
2036 
2037     switch (on_fail) {
2038         case action_fail_ignore:
2039             /* nothing to do */
2040             break;
2041 
2042         case action_fail_demote:
2043             pe__set_resource_flags(rsc, pe_rsc_failed);
2044             demote_action(rsc, node, FALSE);
2045             break;
2046 
2047         case action_fail_fence:
2048             /* treat it as if it is still running
2049              * but also mark the node as unclean
2050              */
2051             reason = crm_strdup_printf("%s failed there", rsc->id);
2052             pe_fence_node(data_set, node, reason, FALSE);
2053             free(reason);
2054             break;
2055 
2056         case action_fail_standby:
2057             node->details->standby = TRUE;
2058             node->details->standby_onfail = TRUE;
2059             break;
2060 
2061         case action_fail_block:
2062             /* is_managed == FALSE will prevent any
2063              * actions being sent for the resource
2064              */
2065             pe__clear_resource_flags(rsc, pe_rsc_managed);
2066             pe__set_resource_flags(rsc, pe_rsc_block);
2067             break;
2068 
2069         case action_fail_migrate:
2070             /* make sure it comes up somewhere else
2071              * or not at all
2072              */
2073             resource_location(rsc, node, -INFINITY, "__action_migration_auto__", data_set);
2074             break;
2075 
2076         case action_fail_stop:
2077             pe__set_next_role(rsc, RSC_ROLE_STOPPED, "on-fail=stop");
2078             break;
2079 
2080         case action_fail_recover:
2081             if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
2082                 pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
2083                 stop_action(rsc, node, FALSE);
2084             }
2085             break;
2086 
2087         case action_fail_restart_container:
2088             pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
2089             if (rsc->container && pe_rsc_is_bundled(rsc)) {
2090                 /* A bundle's remote connection can run on a different node than
2091                  * the bundle's container. We don't necessarily know where the
2092                  * container is running yet, so remember it and add a stop
2093                  * action for it later.
2094                  */
2095                 data_set->stop_needed = g_list_prepend(data_set->stop_needed,
2096                                                        rsc->container);
2097             } else if (rsc->container) {
2098                 stop_action(rsc->container, node, FALSE);
2099             } else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
2100                 stop_action(rsc, node, FALSE);
2101             }
2102             break;
2103 
2104         case action_fail_reset_remote:
2105             pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
2106             if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
2107                 tmpnode = NULL;
2108                 if (rsc->is_remote_node) {
2109                     tmpnode = pe_find_node(data_set->nodes, rsc->id);
2110                 }
2111                 if (tmpnode &&
2112                     pe__is_remote_node(tmpnode) &&
2113                     tmpnode->details->remote_was_fenced == 0) {
2114 
2115                     /* The remote connection resource failed in a way that
2116                      * should result in fencing the remote node.
2117                      */
2118                     pe_fence_node(data_set, tmpnode,
2119                                   "remote connection is unrecoverable", FALSE);
2120                 }
2121             }
2122 
2123             /* require the stop action regardless if fencing is occurring or not. */
2124             if (rsc->role > RSC_ROLE_STOPPED) {
2125                 stop_action(rsc, node, FALSE);
2126             }
2127 
2128             /* if reconnect delay is in use, prevent the connection from exiting the
2129              * "STOPPED" role until the failure is cleared by the delay timeout. */
2130             if (rsc->remote_reconnect_ms) {
2131                 pe__set_next_role(rsc, RSC_ROLE_STOPPED, "remote reset");
2132             }
2133             break;
2134     }
2135 
2136     /* ensure a remote-node connection failure forces an unclean remote-node
2137      * to be fenced. By setting unseen = FALSE, the remote-node failure will
2138      * result in a fencing operation regardless if we're going to attempt to 
2139      * reconnect to the remote-node in this transition or not. */
2140     if (pcmk_is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) {
2141         tmpnode = pe_find_node(data_set->nodes, rsc->id);
2142         if (tmpnode && tmpnode->details->unclean) {
2143             tmpnode->details->unseen = FALSE;
2144         }
2145     }
2146 
2147     if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
2148         if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
2149             if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
2150                 pcmk__config_warn("Detected active orphan %s running on %s",
2151                                   rsc->id, node->details->uname);
2152             } else {
2153                 pcmk__config_warn("Resource '%s' must be stopped manually on "
2154                                   "%s because cluster is configured not to "
2155                                   "stop active orphans",
2156                                   rsc->id, node->details->uname);
2157             }
2158         }
2159 
2160         native_add_running(rsc, node, data_set, (save_on_fail != action_fail_ignore));
2161         switch (on_fail) {
2162             case action_fail_ignore:
2163                 break;
2164             case action_fail_demote:
2165             case action_fail_block:
2166                 pe__set_resource_flags(rsc, pe_rsc_failed);
2167                 break;
2168             default:
2169                 pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
2170                 break;
2171         }
2172 
2173     } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) {
2174         /* Only do this for older status sections that included instance numbers
2175          * Otherwise stopped instances will appear as orphans
2176          */
2177         pe_rsc_trace(rsc, "Resetting clone_name %s for %s (stopped)", rsc->clone_name, rsc->id);
2178         free(rsc->clone_name);
2179         rsc->clone_name = NULL;
2180 
2181     } else {
2182         GList *possible_matches = pe__resource_actions(rsc, node, RSC_STOP,
2183                                                        FALSE);
2184         GList *gIter = possible_matches;
2185 
2186         for (; gIter != NULL; gIter = gIter->next) {
2187             pe_action_t *stop = (pe_action_t *) gIter->data;
2188 
2189             pe__set_action_flags(stop, pe_action_optional);
2190         }
2191 
2192         g_list_free(possible_matches);
2193     }
2194 }
2195 
2196 /* create active recurring operations as optional */
2197 static void
2198 process_recurring(pe_node_t * node, pe_resource_t * rsc,
     /* [previous][next][first][last][top][bottom][index][help] */
2199                   int start_index, int stop_index,
2200                   GList *sorted_op_list, pe_working_set_t * data_set)
2201 {
2202     int counter = -1;
2203     const char *task = NULL;
2204     const char *status = NULL;
2205     GList *gIter = sorted_op_list;
2206 
2207     CRM_ASSERT(rsc);
2208     pe_rsc_trace(rsc, "%s: Start index %d, stop index = %d", rsc->id, start_index, stop_index);
2209 
2210     for (; gIter != NULL; gIter = gIter->next) {
2211         xmlNode *rsc_op = (xmlNode *) gIter->data;
2212 
2213         guint interval_ms = 0;
2214         char *key = NULL;
2215         const char *id = ID(rsc_op);
2216 
2217         counter++;
2218 
2219         if (node->details->online == FALSE) {
2220             pe_rsc_trace(rsc, "Skipping %s/%s: node is offline", rsc->id, node->details->uname);
2221             break;
2222 
2223             /* Need to check if there's a monitor for role="Stopped" */
2224         } else if (start_index < stop_index && counter <= stop_index) {
2225             pe_rsc_trace(rsc, "Skipping %s/%s: resource is not active", id, node->details->uname);
2226             continue;
2227 
2228         } else if (counter < start_index) {
2229             pe_rsc_trace(rsc, "Skipping %s/%s: old %d", id, node->details->uname, counter);
2230             continue;
2231         }
2232 
2233         crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
2234         if (interval_ms == 0) {
2235             pe_rsc_trace(rsc, "Skipping %s/%s: non-recurring", id, node->details->uname);
2236             continue;
2237         }
2238 
2239         status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
2240         if (pcmk__str_eq(status, "-1", pcmk__str_casei)) {
2241             pe_rsc_trace(rsc, "Skipping %s/%s: status", id, node->details->uname);
2242             continue;
2243         }
2244         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
2245         /* create the action */
2246         key = pcmk__op_key(rsc->id, task, interval_ms);
2247         pe_rsc_trace(rsc, "Creating %s/%s", key, node->details->uname);
2248         custom_action(rsc, key, task, node, TRUE, TRUE, data_set);
2249     }
2250 }
2251 
2252 void
2253 calculate_active_ops(GList *sorted_op_list, int *start_index, int *stop_index)
     /* [previous][next][first][last][top][bottom][index][help] */
2254 {
2255     int counter = -1;
2256     int implied_monitor_start = -1;
2257     int implied_clone_start = -1;
2258     const char *task = NULL;
2259     const char *status = NULL;
2260     GList *gIter = sorted_op_list;
2261 
2262     *stop_index = -1;
2263     *start_index = -1;
2264 
2265     for (; gIter != NULL; gIter = gIter->next) {
2266         xmlNode *rsc_op = (xmlNode *) gIter->data;
2267 
2268         counter++;
2269 
2270         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
2271         status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
2272 
2273         if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)
2274             && pcmk__str_eq(status, "0", pcmk__str_casei)) {
2275             *stop_index = counter;
2276 
2277         } else if (pcmk__strcase_any_of(task, CRMD_ACTION_START, CRMD_ACTION_MIGRATED, NULL)) {
2278             *start_index = counter;
2279 
2280         } else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
2281             const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
2282 
2283             if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
2284                 implied_monitor_start = counter;
2285             }
2286         } else if (pcmk__strcase_any_of(task, CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE, NULL)) {
2287             implied_clone_start = counter;
2288         }
2289     }
2290 
2291     if (*start_index == -1) {
2292         if (implied_clone_start != -1) {
2293             *start_index = implied_clone_start;
2294         } else if (implied_monitor_start != -1) {
2295             *start_index = implied_monitor_start;
2296         }
2297     }
2298 }
2299 
2300 // If resource history entry has shutdown lock, remember lock node and time
2301 static void
2302 unpack_shutdown_lock(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node,
     /* [previous][next][first][last][top][bottom][index][help] */
2303                      pe_working_set_t *data_set)
2304 {
2305     time_t lock_time = 0;   // When lock started (i.e. node shutdown time)
2306 
2307     if ((crm_element_value_epoch(rsc_entry, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
2308                                  &lock_time) == pcmk_ok) && (lock_time != 0)) {
2309 
2310         if ((data_set->shutdown_lock > 0)
2311             && (get_effective_time(data_set)
2312                 > (lock_time + data_set->shutdown_lock))) {
2313             pe_rsc_info(rsc, "Shutdown lock for %s on %s expired",
2314                         rsc->id, node->details->uname);
2315             pe__clear_resource_history(rsc, node, data_set);
2316         } else {
2317             rsc->lock_node = node;
2318             rsc->lock_time = lock_time;
2319         }
2320     }
2321 }
2322 
2323 /*!
2324  * \internal
2325  * \brief Unpack one lrm_resource entry from a node's CIB status
2326  *
2327  * \param[in] node       Node whose status is being unpacked
2328  * \param[in] rsc_entry  lrm_resource XML being unpacked
2329  * \param[in] data_set   Cluster working set
2330  *
2331  * \return Resource corresponding to the entry, or NULL if no operation history
2332  */
2333 static pe_resource_t *
2334 unpack_lrm_resource(pe_node_t *node, xmlNode *lrm_resource,
     /* [previous][next][first][last][top][bottom][index][help] */
2335                     pe_working_set_t *data_set)
2336 {
2337     GList *gIter = NULL;
2338     int stop_index = -1;
2339     int start_index = -1;
2340     enum rsc_role_e req_role = RSC_ROLE_UNKNOWN;
2341 
2342     const char *task = NULL;
2343     const char *rsc_id = ID(lrm_resource);
2344 
2345     pe_resource_t *rsc = NULL;
2346     GList *op_list = NULL;
2347     GList *sorted_op_list = NULL;
2348 
2349     xmlNode *migrate_op = NULL;
2350     xmlNode *rsc_op = NULL;
2351     xmlNode *last_failure = NULL;
2352 
2353     enum action_fail_response on_fail = action_fail_ignore;
2354     enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN;
2355 
2356     if (rsc_id == NULL) {
2357         crm_warn("Ignoring malformed " XML_LRM_TAG_RESOURCE
2358                  " entry without id");
2359         return NULL;
2360     }
2361     crm_trace("Unpacking " XML_LRM_TAG_RESOURCE " for %s on %s",
2362               rsc_id, node->details->uname);
2363 
2364     // Build a list of individual lrm_rsc_op entries, so we can sort them
2365     for (rsc_op = first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP);
2366          rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) {
2367 
2368         op_list = g_list_prepend(op_list, rsc_op);
2369     }
2370 
2371     if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
2372         if (op_list == NULL) {
2373             // If there are no operations, there is nothing to do
2374             return NULL;
2375         }
2376     }
2377 
2378     /* find the resource */
2379     rsc = unpack_find_resource(data_set, node, rsc_id, lrm_resource);
2380     if (rsc == NULL) {
2381         if (op_list == NULL) {
2382             // If there are no operations, there is nothing to do
2383             return NULL;
2384         } else {
2385             rsc = process_orphan_resource(lrm_resource, node, data_set);
2386         }
2387     }
2388     CRM_ASSERT(rsc != NULL);
2389 
2390     // Check whether the resource is "shutdown-locked" to this node
2391     if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
2392         unpack_shutdown_lock(lrm_resource, rsc, node, data_set);
2393     }
2394 
2395     /* process operations */
2396     saved_role = rsc->role;
2397     rsc->role = RSC_ROLE_UNKNOWN;
2398     sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
2399 
2400     for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
2401         xmlNode *rsc_op = (xmlNode *) gIter->data;
2402 
2403         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
2404         if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
2405             migrate_op = rsc_op;
2406         }
2407 
2408         unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail, data_set);
2409     }
2410 
2411     /* create active recurring operations as optional */
2412     calculate_active_ops(sorted_op_list, &start_index, &stop_index);
2413     process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set);
2414 
2415     /* no need to free the contents */
2416     g_list_free(sorted_op_list);
2417 
2418     process_rsc_state(rsc, node, on_fail, migrate_op, data_set);
2419 
2420     if (get_target_role(rsc, &req_role)) {
2421         if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) {
2422             pe__set_next_role(rsc, req_role, XML_RSC_ATTR_TARGET_ROLE);
2423 
2424         } else if (req_role > rsc->next_role) {
2425             pe_rsc_info(rsc, "%s: Not overwriting calculated next role %s"
2426                         " with requested next role %s",
2427                         rsc->id, role2text(rsc->next_role), role2text(req_role));
2428         }
2429     }
2430 
2431     if (saved_role > rsc->role) {
2432         rsc->role = saved_role;
2433     }
2434 
2435     return rsc;
2436 }
2437 
2438 static void
2439 handle_orphaned_container_fillers(xmlNode * lrm_rsc_list, pe_working_set_t * data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
2440 {
2441     xmlNode *rsc_entry = NULL;
2442     for (rsc_entry = pcmk__xe_first_child(lrm_rsc_list); rsc_entry != NULL;
2443          rsc_entry = pcmk__xe_next(rsc_entry)) {
2444 
2445         pe_resource_t *rsc;
2446         pe_resource_t *container;
2447         const char *rsc_id;
2448         const char *container_id;
2449 
2450         if (!pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_casei)) {
2451             continue;
2452         }
2453 
2454         container_id = crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER);
2455         rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
2456         if (container_id == NULL || rsc_id == NULL) {
2457             continue;
2458         }
2459 
2460         container = pe_find_resource(data_set->resources, container_id);
2461         if (container == NULL) {
2462             continue;
2463         }
2464 
2465         rsc = pe_find_resource(data_set->resources, rsc_id);
2466         if (rsc == NULL ||
2467             !pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler) ||
2468             rsc->container != NULL) {
2469             continue;
2470         }
2471 
2472         pe_rsc_trace(rsc, "Mapped container of orphaned resource %s to %s",
2473                      rsc->id, container_id);
2474         rsc->container = container;
2475         container->fillers = g_list_append(container->fillers, rsc);
2476     }
2477 }
2478 
2479 /*!
2480  * \internal
2481  * \brief Unpack one node's lrm status section
2482  *
2483  * \param[in] node      Node whose status is being unpacked
2484  * \param[in] xml       CIB node state XML
2485  * \param[in] data_set  Cluster working set
2486  */
2487 static void
2488 unpack_node_lrm(pe_node_t *node, xmlNode *xml, pe_working_set_t *data_set)
     /* [previous][next][first][last][top][bottom][index][help] */
2489 {
2490     bool found_orphaned_container_filler = false;
2491 
2492     // Drill down to lrm_resources section
2493     xml = find_xml_node(xml, XML_CIB_TAG_LRM, FALSE);
2494     if (xml == NULL) {
2495         return;
2496     }
2497     xml = find_xml_node(xml, XML_LRM_TAG_RESOURCES, FALSE);
2498     if (xml == NULL) {
2499         return;
2500     }
2501 
2502     // Unpack each lrm_resource entry
2503     for (xmlNode *rsc_entry = first_named_child(xml, XML_LRM_TAG_RESOURCE);
2504          rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
2505 
2506         pe_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, data_set);
2507 
2508         if ((rsc != NULL)
2509             && pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) {
2510             found_orphaned_container_filler = true;
2511         }
2512     }
2513 
2514     /* Now that all resource state has been unpacked for this node, map any
2515      * orphaned container fillers to their container resource.
2516      */
2517     if (found_orphaned_container_filler) {
2518         handle_orphaned_container_fillers(xml, data_set);
2519     }
2520 }
2521 
2522 static void
2523 set_active(pe_resource_t * rsc)
     /* [previous][next][first][last][top][bottom][index][help] */
2524 {
2525     pe_resource_t *top = uber_parent(rsc);
2526 
2527     if (top && pcmk_is_set(top->flags, pe_rsc_promotable)) {
2528         rsc->role = RSC_ROLE_UNPROMOTED;
2529     } else {
2530         rsc->role = RSC_ROLE_STARTED;
2531     }
2532 }
2533 
2534 static void
2535 set_node_score(gpointer key, gpointer value, gpointer user_data)
     /* [previous][next][first][last][top][bottom][index][help] */
2536 {
2537     pe_node_t *node = value;
2538     int *score = user_data;
2539 
2540     node->weight = *score;
2541 }
2542 
2543 #define STATUS_PATH_MAX 1024
2544 static xmlNode *
2545 find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
     /* [previous][next][first][last][top][bottom][index][help] */
2546             bool success_only, pe_working_set_t *data_set)
2547 {
2548     int offset = 0;
2549     char xpath[STATUS_PATH_MAX];
2550     xmlNode *xml = NULL;
2551 
2552     offset += snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//node_state[@uname='%s']", node);
2553     offset +=
2554         snprintf(xpath + offset, STATUS_PATH_MAX - offset, "//" XML_LRM_TAG_RESOURCE "[@id='%s']",
2555                  resource);
2556 
2557     /* Need to check against transition_magic too? */
2558     if (source && pcmk__str_eq(op, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
2559         offset +=
2560             snprintf(xpath + offset, STATUS_PATH_MAX - offset,
2561                      "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_target='%s']", op,
2562                      source);
2563     } else if (source && pcmk__str_eq(op, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
2564         offset +=
2565             snprintf(xpath + offset, STATUS_PATH_MAX - offset,
2566                      "/" XML_LRM_TAG_RSC_OP "[@operation='%s' and @migrate_source='%s']", op,
2567                      source);
2568     } else {
2569         offset +=
2570             snprintf(xpath + offset, STATUS_PATH_MAX - offset,
2571                      "/" XML_LRM_TAG_RSC_OP "[@operation='%s']", op);
2572     }
2573 
2574     CRM_LOG_ASSERT(offset > 0);
2575     xml = get_xpath_object(xpath, data_set->input, LOG_DEBUG);
2576 
2577     if (xml && success_only) {
2578         int rc = PCMK_OCF_UNKNOWN_ERROR;
2579         int status = PCMK_LRM_OP_ERROR;
2580 
2581         crm_element_value_int(xml, XML_LRM_ATTR_RC, &rc);
2582         crm_element_value_int(xml, XML_LRM_ATTR_OPSTATUS, &status);
2583         if ((rc != PCMK_OCF_OK) || (status != PCMK_LRM_OP_DONE)) {
2584             return NULL;
2585         }
2586     }
2587     return xml;
2588 }
2589 
2590 static int
2591 pe__call_id(xmlNode *op_xml)
     /* [previous][next][first][last][top][bottom][index][help] */
2592 {
2593     int id = 0;
2594 
2595     if (op_xml) {
2596         crm_element_value_int(op_xml, XML_LRM_ATTR_CALLID, &id);
2597     }
2598     return id;
2599 }
2600 
2601 /*!
2602  * \brief Check whether a stop happened on the same node after some event
2603  *
2604  * \param[in] rsc       Resource being checked
2605  * \param[in] node      Node being checked
2606  * \param[in] xml_op    Event that stop is being compared to
2607  * \param[in] data_set  Cluster working set
2608  *
2609  * \return TRUE if stop happened after event, FALSE otherwise
2610  *
2611  * \note This is really unnecessary, but kept as a safety mechanism. We
2612  *       currently don't save more than one successful event in history, so this
2613  *       only matters when processing really old CIB files that we don't
2614  *       technically support anymore, or as preparation for logging an extended
2615  *       history in the future.
2616  */
2617 static bool
2618 stop_happened_after(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
     /* [previous][next][first][last][top][bottom][index][help] */
2619                     pe_working_set_t *data_set)
2620 {
2621     xmlNode *stop_op = find_lrm_op(rsc->id, CRMD_ACTION_STOP,
2622                                    node->details->uname, NULL, TRUE, data_set);
2623 
2624     return (stop_op && (pe__call_id(stop_op) > pe__call_id(xml_op)));
2625 }
2626 
2627 static void
2628 unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
     /* [previous][next][first][last][top][bottom][index][help] */
2629                           pe_working_set_t *data_set)
2630 {
2631     /* A successful migration sequence is:
2632      *    migrate_to on source node
2633      *    migrate_from on target node
2634      *    stop on source node
2635      *
2636      * If a migrate_to is followed by a stop, the entire migration (successful
2637      * or failed) is complete, and we don't care what happened on the target.
2638      *
2639      * If no migrate_from has happened, the migration is considered to be
2640      * "partial". If the migrate_from failed, make sure the resource gets
2641      * stopped on both source and target (if up).
2642      *
2643      * If the migrate_to and migrate_from both succeeded (which also implies the
2644      * resource is no longer running on the source), but there is no stop, the
2645      * migration is considered to be "dangling". Schedule a stop on the source
2646      * in this case.
2647      */
2648     int from_rc = 0;
2649     int from_status = 0;
2650     pe_node_t *target_node = NULL;
2651     pe_node_t *source_node = NULL;
2652     xmlNode *migrate_from = NULL;
2653     const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
2654     const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
2655 
2656     // Sanity check
2657     CRM_CHECK(source && target && !strcmp(source, node->details->uname), return);
2658 
2659     if (stop_happened_after(rsc, node, xml_op, data_set)) {
2660         return;
2661     }
2662 
2663     // Clones are not allowed to migrate, so role can't be promoted
2664     rsc->role = RSC_ROLE_STARTED;
2665 
2666     target_node = pe_find_node(data_set->nodes, target);
2667     source_node = pe_find_node(data_set->nodes, source);
2668 
2669     // Check whether there was a migrate_from action on the target
2670     migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target,
2671                                source, FALSE, data_set);
2672     if (migrate_from) {
2673         crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc);
2674         crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status);
2675         pe_rsc_trace(rsc, "%s op on %s exited with status=%d, rc=%d",
2676                      ID(migrate_from), target, from_status, from_rc);
2677     }
2678 
2679     if (migrate_from && from_rc == PCMK_OCF_OK
2680         && from_status == PCMK_LRM_OP_DONE) {
2681         /* The migrate_to and migrate_from both succeeded, so mark the migration
2682          * as "dangling". This will be used to schedule a stop action on the
2683          * source without affecting the target.
2684          */
2685         pe_rsc_trace(rsc, "Detected dangling migration op: %s on %s", ID(xml_op),
2686                      source);
2687         rsc->role = RSC_ROLE_STOPPED;
2688         rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node);
2689 
2690     } else if (migrate_from && (from_status != PCMK_LRM_OP_PENDING)) { // Failed
2691         if (target_node && target_node->details->online) {
2692             pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node,
2693                          target_node->details->online);
2694             native_add_running(rsc, target_node, data_set, TRUE);
2695         }
2696 
2697     } else { // Pending, or complete but erased
2698         if (target_node && target_node->details->online) {
2699             pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node,
2700                          target_node->details->online);
2701 
2702             native_add_running(rsc, target_node, data_set, FALSE);
2703             if (source_node && source_node->details->online) {
2704                 /* This is a partial migration: the migrate_to completed
2705                  * successfully on the source, but the migrate_from has not
2706                  * completed. Remember the source and target; if the newly
2707                  * chosen target remains the same when we schedule actions
2708                  * later, we may continue with the migration.
2709                  */
2710                 rsc->partial_migration_target = target_node;
2711                 rsc->partial_migration_source = source_node;
2712             }
2713         } else {
2714             /* Consider it failed here - forces a restart, prevents migration */
2715             pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
2716             pe__clear_resource_flags(rsc, pe_rsc_allow_migrate);
2717         }
2718     }
2719 }
2720 
2721 // Is there an action_name in node_name's rsc history newer than call_id?
2722 static bool
2723 newer_op(pe_resource_t *rsc, const char *action_name, const char *node_name,
     /* [previous][next][first][last][top][bottom][index][help] */
2724          int call_id, pe_working_set_t *data_set)
2725 {
2726     xmlNode *action = find_lrm_op(rsc->id, action_name, node_name, NULL, TRUE,
2727                                   data_set);
2728 
2729     return pe__call_id(action) > call_id;
2730 }
2731 
2732 static void
2733 unpack_migrate_to_failure(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
     /* [previous][next][first][last][top][bottom][index][help] */
2734                           pe_working_set_t *data_set)
2735 {
2736     int target_stop_id = 0;
2737     int target_migrate_from_id = 0;
2738     xmlNode *target_stop = NULL;
2739     xmlNode *target_migrate_from = NULL;
2740     const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
2741     const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
2742 
2743     // Sanity check
2744     CRM_CHECK(source && target && !strcmp(source, node->details->uname), return);
2745 
2746     /* If a migration failed, we have to assume the resource is active. Clones
2747      * are not allowed to migrate, so role can't be promoted.
2748      */
2749     rsc->role = RSC_ROLE_STARTED;
2750 
2751     // Check for stop on the target
2752     target_stop = find_lrm_op(rsc->id, CRMD_ACTION_STOP, target, NULL,
2753                               TRUE, data_set);
2754     target_stop_id = pe__call_id(target_stop);
2755 
2756     // Check for migrate_from on the target
2757     target_migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target,
2758                                       source, TRUE, data_set);
2759     target_migrate_from_id = pe__call_id(target_migrate_from);
2760 
2761     if ((target_stop == NULL) || (target_stop_id < target_migrate_from_id)) {
2762         /* There was no stop on the target, or a stop that happened before a
2763          * migrate_from, so assume the resource is still active on the target
2764          * (if it is up).
2765          */
2766         pe_node_t *target_node = pe_find_node(data_set->nodes, target);
2767 
2768         pe_rsc_trace(rsc, "stop (%d) + migrate_from (%d)",
2769                      target_stop_id, target_migrate_from_id);
2770         if (target_node && target_node->details->online) {
2771             native_add_running(rsc, target_node, data_set, FALSE);
2772         }
2773 
2774     } else if (target_migrate_from == NULL) {
2775         /* We know there was a stop on the target, but there may not have been a
2776          * migrate_from (the stop could have happened before migrate_from was
2777          * scheduled or attempted).
2778          *
2779          * That means this could be a "dangling" migration. But first, check
2780          * whether there is a newer successful stop, start, or migrate_from on
2781          * the source node -- it's possible the failed migration was followed by
2782          * a successful stop, full restart, or migration in the reverse
2783          * direction, in which case we don't want to force a stop.
2784          */
2785         int source_migrate_to_id = pe__call_id(xml_op);
2786 
2787         if (newer_op(rsc, CRMD_ACTION_MIGRATED, source, source_migrate_to_id,
2788                      data_set)
2789             || newer_op(rsc, CRMD_ACTION_START, source, source_migrate_to_id,
2790                      data_set)
2791             || newer_op(rsc, CRMD_ACTION_STOP, source, source_migrate_to_id,
2792                      data_set)) {
2793             return;
2794         }
2795 
2796         // Mark node as having dangling migration so we can force a stop later
2797         rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node);
2798     }
2799 }
2800 
2801 static void
2802 unpack_migrate_from_failure(pe_resource_t *rsc, pe_node_t *node,
     /* [previous][next][first][last][top][bottom][index][help] */
2803                             xmlNode *xml_op, pe_working_set_t *data_set)
2804 {
2805     xmlNode *source_stop = NULL;
2806     xmlNode *source_migrate_to = NULL;
2807     const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
2808     const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
2809 
2810     // Sanity check
2811     CRM_CHECK(source && target && !strcmp(target, node->details->uname), return);
2812 
2813     /* If a migration failed, we have to assume the resource is active. Clones
2814      * are not allowed to migrate, so role can't be promoted.
2815      */
2816     rsc->role = RSC_ROLE_STARTED;
2817 
2818     // Check for a stop on the source
2819     source_stop = find_lrm_op(rsc->id, CRMD_ACTION_STOP, source, NULL,
2820                               TRUE, data_set);
2821 
2822     // Check for a migrate_to on the source
2823     source_migrate_to = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATE,
2824                                     source, target, TRUE, data_set);
2825 
2826     if ((source_stop == NULL)
2827         || (pe__call_id(source_stop) < pe__call_id(source_migrate_to))) {
2828         /* There was no stop on the source, or a stop that happened before
2829          * migrate_to, so assume the resource is still active on the source (if
2830          * it is up).
2831          */
2832         pe_node_t *source_node = pe_find_node(data_set->nodes, source);
2833 
2834         if (source_node && source_node->details->online) {
2835             native_add_running(rsc, source_node, data_set, TRUE);
2836         }
2837     }
2838 }
2839 
2840 static void
2841 record_failed_op(xmlNode *op, const pe_node_t *node,
     /* [previous][next][first][last][top][bottom][index][help] */
2842                  const pe_resource_t *rsc, pe_working_set_t *data_set)
2843 {
2844     xmlNode *xIter = NULL;
2845     const char *op_key = crm_element_value(op, XML_LRM_ATTR_TASK_KEY);
2846 
2847     if (node->details->online == FALSE) {
2848         return;
2849     }
2850 
2851     for (xIter = data_set->failed->children; xIter; xIter = xIter->next) {
2852         const char *key = crm_element_value(xIter, XML_LRM_ATTR_TASK_KEY);
2853         const char *uname = crm_element_value(xIter, XML_ATTR_UNAME);
2854 
2855         if(pcmk__str_eq(op_key, key, pcmk__str_casei) && pcmk__str_eq(uname, node->details->uname, pcmk__str_casei)) {
2856             crm_trace("Skipping duplicate entry %s on %s", op_key, node->details->uname);
2857             return;
2858         }
2859     }
2860 
2861     crm_trace("Adding entry %s on %s", op_key, node->details->uname);
2862     crm_xml_add(op, XML_ATTR_UNAME, node->details->uname);
2863     crm_xml_add(op, XML_LRM_ATTR_RSCID, rsc->id);
2864     add_node_copy(data_set->failed, op);
2865 }
2866 
2867 static const char *get_op_key(xmlNode *xml_op)
     /* [previous][next][first][last][top][bottom][index][help] */
2868 {
2869     const char *key = crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY);
2870     if(key == NULL) {
2871         key = ID(xml_op);
2872     }
2873     return key;
2874 }
2875 
2876 static const char *
2877 last_change_str(xmlNode *xml_op)
     /* [previous][next][first][last][top][bottom][index][help] */
2878 {
2879     time_t when;
2880     const char *when_s = NULL;
2881 
2882     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
2883                                 &when) == pcmk_ok) {
2884         when_s = pcmk__epoch2str(&when);
2885         if (when_s) {
2886             // Skip day of week to make message shorter
2887             when_s = strchr(when_s, ' ');
2888             if (when_s) {
2889                 ++when_s;
2890             }
2891         }
2892     }
2893     return ((when_s && *when_s)? when_s : "unknown time");
2894 }
2895 
2896 /*!
2897  * \internal
2898  * \brief Compare two on-fail values
2899  *
2900  * \param[in] first   One on-fail value to compare
2901  * \param[in] second  The other on-fail value to compare
2902  *
2903  * \return A negative number if second is more severe than first, zero if they
2904  *         are equal, or a positive number if first is more severe than second.
2905  * \note This is only needed until the action_fail_response values can be
2906  *       renumbered at the next API compatibility break.
2907  */
2908 static int
2909 cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
     /* [previous][next][first][last][top][bottom][index][help] */
2910 {
2911     switch (first) {
2912         case action_fail_demote:
2913             switch (second) {
2914                 case action_fail_ignore:
2915                     return 1;
2916                 case action_fail_demote:
2917                     return 0;
2918                 default:
2919                     return -1;
2920             }
2921             break;
2922 
2923         case action_fail_reset_remote:
2924             switch (second) {
2925                 case action_fail_ignore:
2926                 case action_fail_demote:
2927                 case action_fail_recover:
2928                     return 1;
2929                 case action_fail_reset_remote:
2930                     return 0;
2931                 default:
2932                     return -1;
2933             }
2934             break;
2935 
2936         case action_fail_restart_container:
2937             switch (second) {
2938                 case action_fail_ignore:
2939                 case action_fail_demote:
2940                 case action_fail_recover:
2941                 case action_fail_reset_remote:
2942                     return 1;
2943                 case action_fail_restart_container:
2944                     return 0;
2945                 default:
2946                     return -1;
2947             }
2948             break;
2949 
2950         default:
2951             break;
2952     }
2953     switch (second) {
2954         case action_fail_demote:
2955             return (first == action_fail_ignore)? -1 : 1;
2956 
2957         case action_fail_reset_remote:
2958             switch (first) {
2959                 case action_fail_ignore:
2960                 case action_fail_demote:
2961                 case action_fail_recover:
2962                     return -1;
2963                 default:
2964                     return 1;
2965             }
2966             break;
2967 
2968         case action_fail_restart_container:
2969             switch (first) {
2970                 case action_fail_ignore:
2971                 case action_fail_demote:
2972                 case action_fail_recover:
2973                 case action_fail_reset_remote:
2974                     return -1;
2975                 default:
2976                     return 1;
2977             }
2978             break;
2979 
2980         default:
2981             break;
2982     }
2983     return first - second;
2984 }
2985 
2986 static void
2987 unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * xml_op, xmlNode ** last_failure,
     /* [previous][next][first][last][top][bottom][index][help] */
2988                       enum action_fail_response * on_fail, pe_working_set_t * data_set)
2989 {
2990     guint interval_ms = 0;
2991     bool is_probe = false;
2992     pe_action_t *action = NULL;
2993 
2994     const char *key = get_op_key(xml_op);
2995     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
2996     const char *exit_reason = crm_element_value(xml_op,
2997                                                 XML_LRM_ATTR_EXIT_REASON);
2998 
2999     CRM_ASSERT(rsc);
3000     CRM_CHECK(task != NULL, return);
3001 
3002     *last_failure = xml_op;
3003 
3004     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
3005     if ((interval_ms == 0) && !strcmp(task, CRMD_ACTION_STATUS)) {
3006         is_probe = true;
3007     }
3008 
3009     if (exit_reason == NULL) {
3010         exit_reason = "";
3011     }
3012 
3013     if (!pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)
3014         && (rc == PCMK_OCF_NOT_INSTALLED)) {
3015         crm_trace("Unexpected result (%s%s%s) was recorded for "
3016                   "%s of %s on %s at %s " CRM_XS " rc=%d id=%s",
3017                   services_ocf_exitcode_str(rc),
3018                   (*exit_reason? ": " : ""), exit_reason,
3019                   (is_probe? "probe" : task), rsc->id, node->details->uname,
3020                   last_change_str(xml_op), rc, ID(xml_op));
3021     } else {
3022         crm_warn("Unexpected result (%s%s%s) was recorded for "
3023                   "%s of %s on %s at %s " CRM_XS " rc=%d id=%s",
3024                  services_ocf_exitcode_str(rc),
3025                  (*exit_reason? ": " : ""), exit_reason,
3026                  (is_probe? "probe" : task), rsc->id, node->details->uname,
3027                  last_change_str(xml_op), rc, ID(xml_op));
3028 
3029         if (is_probe && (rc != PCMK_OCF_OK)
3030             && (rc != PCMK_OCF_NOT_RUNNING)
3031             && (rc != PCMK_OCF_RUNNING_PROMOTED)) {
3032 
3033             /* A failed (not just unexpected) probe result could mean the user
3034              * didn't know resources will be probed even where they can't run.
3035              */
3036             crm_notice("If it is not possible for %s to run on %s, see "
3037                        "the resource-discovery option for location constraints",
3038                        rsc->id, node->details->uname);
3039         }
3040 
3041         record_failed_op(xml_op, node, rsc, data_set);
3042     }
3043 
3044     action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set);
3045     if (cmp_on_fail(*on_fail, action->on_fail) < 0) {
3046         pe_rsc_trace(rsc, "on-fail %s -> %s for %s (%s)", fail2text(*on_fail),
3047                      fail2text(action->on_fail), action->uuid, key);
3048         *on_fail = action->on_fail;
3049     }
3050 
3051     if (!strcmp(task, CRMD_ACTION_STOP)) {
3052         resource_location(rsc, node, -INFINITY, "__stop_fail__", data_set);
3053 
3054     } else if (!strcmp(task, CRMD_ACTION_MIGRATE)) {
3055         unpack_migrate_to_failure(rsc, node, xml_op, data_set);
3056 
3057     } else if (!strcmp(task, CRMD_ACTION_MIGRATED)) {
3058         unpack_migrate_from_failure(rsc, node, xml_op, data_set);
3059 
3060     } else if (!strcmp(task, CRMD_ACTION_PROMOTE)) {
3061         rsc->role = RSC_ROLE_PROMOTED;
3062 
3063     } else if (!strcmp(task, CRMD_ACTION_DEMOTE)) {
3064         if (action->on_fail == action_fail_block) {
3065             rsc->role = RSC_ROLE_PROMOTED;
3066             pe__set_next_role(rsc, RSC_ROLE_STOPPED,
3067                               "demote with on-fail=block");
3068 
3069         } else if(rc == PCMK_OCF_NOT_RUNNING) {
3070             rsc->role = RSC_ROLE_STOPPED;
3071 
3072         } else {
3073             /* Staying in the promoted role would put the scheduler and
3074              * controller into a loop. Setting the role to unpromoted is not
3075              * dangerous because the resource will be stopped as part of
3076              * recovery, and any promotion will be ordered after that stop.
3077              */
3078             rsc->role = RSC_ROLE_UNPROMOTED;
3079         }
3080     }
3081 
3082     if(is_probe && rc == PCMK_OCF_NOT_INSTALLED) {
3083         /* leave stopped */
3084         pe_rsc_trace(rsc, "Leaving %s stopped", rsc->id);
3085         rsc->role = RSC_ROLE_STOPPED;
3086 
3087     } else if (rsc->role < RSC_ROLE_STARTED) {
3088         pe_rsc_trace(rsc, "Setting %s active", rsc->id);
3089         set_active(rsc);
3090     }
3091 
3092     pe_rsc_trace(rsc, "Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s",
3093                  rsc->id, role2text(rsc->role),
3094                  pcmk__btoa(node->details->unclean),
3095                  fail2text(action->on_fail), role2text(action->fail_role));
3096 
3097     if (action->fail_role != RSC_ROLE_STARTED && rsc->next_role < action->fail_role) {
3098         pe__set_next_role(rsc, action->fail_role, "failure");
3099     }
3100 
3101     if (action->fail_role == RSC_ROLE_STOPPED) {
3102         int score = -INFINITY;
3103 
3104         pe_resource_t *fail_rsc = rsc;
3105 
3106         if (fail_rsc->parent) {
3107             pe_resource_t *parent = uber_parent(fail_rsc);
3108 
3109             if (pe_rsc_is_clone(parent)
3110                 && !pcmk_is_set(parent->flags, pe_rsc_unique)) {
3111                 /* For clone resources, if a child fails on an operation
3112                  * with on-fail = stop, all the resources fail.  Do this by preventing
3113                  * the parent from coming up again. */
3114                 fail_rsc = parent;
3115             }
3116         }
3117         crm_notice("%s will not be started under current conditions",
3118                    fail_rsc->id);
3119         /* make sure it doesn't come up again */
3120         if (fail_rsc->allowed_nodes != NULL) {
3121             g_hash_table_destroy(fail_rsc->allowed_nodes);
3122         }
3123         fail_rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
3124         g_hash_table_foreach(fail_rsc->allowed_nodes, set_node_score, &score);
3125     }
3126 
3127     pe_free_action(action);
3128 }
3129 
3130 /*!
3131  * \internal
3132  * \brief Remap operation status based on action result
3133  *
3134  * Given an action result, determine an appropriate operation status for the
3135  * purposes of responding to the action (the status provided by the executor is
3136  * not directly usable since the executor does not know what was expected).
3137  *
3138  * \param[in,out] rsc        Resource that operation history entry is for
3139  * \param[in]     rc         Actual return code of operation
3140  * \param[in]     target_rc  Expected return code of operation
3141  * \param[in]     node       Node where operation was executed
3142  * \param[in]     xml_op     Operation history entry XML from CIB status
3143  * \param[in,out] on_fail    What should be done about the result
3144  * \param[in]     data_set   Current cluster working set
3145  *
3146  * \return Operation status based on return code and action info
3147  * \note This may update the resource's current and next role.
3148  */
3149 static int
3150 determine_op_status(
     /* [previous][next][first][last][top][bottom][index][help] */
3151     pe_resource_t *rsc, int rc, int target_rc, pe_node_t * node, xmlNode * xml_op, enum action_fail_response * on_fail, pe_working_set_t * data_set) 
3152 {
3153     guint interval_ms = 0;
3154     bool is_probe = false;
3155     int result = PCMK_LRM_OP_DONE;
3156     const char *key = get_op_key(xml_op);
3157     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
3158     const char *exit_reason = crm_element_value(xml_op,
3159                                                 XML_LRM_ATTR_EXIT_REASON);
3160 
3161     CRM_ASSERT(rsc);
3162     CRM_CHECK(task != NULL, return PCMK_LRM_OP_ERROR);
3163 
3164     if (exit_reason == NULL) {
3165         exit_reason = "";
3166     }
3167 
3168     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
3169     if ((interval_ms == 0) && !strcmp(task, CRMD_ACTION_STATUS)) {
3170         is_probe = true;
3171         task = "probe";
3172     }
3173 
3174     if (target_rc < 0) {
3175         /* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with
3176          * Heartbeat 2.0.7 or earlier as the cluster layer, did not include the
3177          * target_rc in the transition key, which (along with the similar case
3178          * of a corrupted transition key in the CIB) will be reported to this
3179          * function as -1. Pacemaker 2.0+ does not support rolling upgrades from
3180          * those versions or processing of saved CIB files from those versions,
3181          * so we do not need to care much about this case.
3182          */
3183         result = PCMK_LRM_OP_ERROR;
3184         crm_warn("Expected result not found for %s on %s (corrupt or obsolete CIB?)",
3185                  key, node->details->uname);
3186 
3187     } else if (target_rc != rc) {
3188         result = PCMK_LRM_OP_ERROR;
3189         pe_rsc_debug(rsc, "%s on %s: expected %d (%s), got %d (%s%s%s)",
3190                      key, node->details->uname,
3191                      target_rc, services_ocf_exitcode_str(target_rc),
3192                      rc, services_ocf_exitcode_str(rc),
3193                      (*exit_reason? ": " : ""), exit_reason);
3194     }
3195 
3196     switch (rc) {
3197         case PCMK_OCF_OK:
3198             if (is_probe && (target_rc == PCMK_OCF_NOT_RUNNING)) {
3199                 result = PCMK_LRM_OP_DONE;
3200                 pe_rsc_info(rsc, "Probe found %s active on %s at %s",
3201                             rsc->id, node->details->uname,
3202                             last_change_str(xml_op));
3203             }
3204             break;
3205 
3206         case PCMK_OCF_NOT_RUNNING:
3207             if (is_probe || (target_rc == rc)
3208                 || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
3209 
3210                 result = PCMK_LRM_OP_DONE;
3211                 rsc->role = RSC_ROLE_STOPPED;
3212 
3213                 /* clear any previous failure actions */
3214                 *on_fail = action_fail_ignore;
3215                 pe__set_next_role(rsc, RSC_ROLE_UNKNOWN, "not running");
3216             }
3217             break;
3218 
3219         case PCMK_OCF_RUNNING_PROMOTED:
3220             if (is_probe && (rc != target_rc)) {
3221                 result = PCMK_LRM_OP_DONE;
3222                 pe_rsc_info(rsc,
3223                             "Probe found %s active and promoted on %s at %s",
3224                             rsc->id, node->details->uname,
3225                             last_change_str(xml_op));
3226             }
3227             rsc->role = RSC_ROLE_PROMOTED;
3228             break;
3229 
3230         case PCMK_OCF_DEGRADED_PROMOTED:
3231         case PCMK_OCF_FAILED_PROMOTED:
3232             rsc->role = RSC_ROLE_PROMOTED;
3233             result = PCMK_LRM_OP_ERROR;
3234             break;
3235 
3236         case PCMK_OCF_NOT_CONFIGURED:
3237             result = PCMK_LRM_OP_ERROR_FATAL;
3238             break;
3239 
3240         case PCMK_OCF_UNIMPLEMENT_FEATURE:
3241             if (interval_ms > 0) {
3242                 result = PCMK_LRM_OP_NOTSUPPORTED;
3243                 break;
3244             }
3245             // fall through
3246         case PCMK_OCF_NOT_INSTALLED:
3247         case PCMK_OCF_INVALID_PARAM:
3248         case PCMK_OCF_INSUFFICIENT_PRIV:
3249             if (!pe_can_fence(data_set, node)
3250                 && !strcmp(task, CRMD_ACTION_STOP)) {
3251                 /* If a stop fails and we can't fence, there's nothing else we can do */
3252                 pe_proc_err("No further recovery can be attempted for %s "
3253                             "because %s on %s failed (%s%s%s) at %s "
3254                             CRM_XS " rc=%d id=%s", rsc->id, task,
3255                             node->details->uname, services_ocf_exitcode_str(rc),
3256                             (*exit_reason? ": " : ""), exit_reason,
3257                             last_change_str(xml_op), rc, ID(xml_op));
3258                 pe__clear_resource_flags(rsc, pe_rsc_managed);
3259                 pe__set_resource_flags(rsc, pe_rsc_block);
3260             }
3261             result = PCMK_LRM_OP_ERROR_HARD;
3262             break;
3263 
3264         default:
3265             if (result == PCMK_LRM_OP_DONE) {
3266                 crm_info("Treating unknown exit status %d from %s of %s "
3267                          "on %s at %s as failure",
3268                          rc, task, rsc->id, node->details->uname,
3269                          last_change_str(xml_op));
3270                 result = PCMK_LRM_OP_ERROR;
3271             }
3272             break;
3273     }
3274     return result;
3275 }
3276 
3277 // return TRUE if start or monitor last failure but parameters changed
3278 static bool
3279 should_clear_for_param_change(xmlNode *xml_op, const char *task,
     /* [previous][next][first][last][top][bottom][index][help] */
3280                               pe_resource_t *rsc, pe_node_t *node,
3281                               pe_working_set_t *data_set)
3282 {
3283     if (!strcmp(task, "start") || !strcmp(task, "monitor")) {
3284 
3285         if (pe__bundle_needs_remote_name(rsc, data_set)) {
3286             /* We haven't allocated resources yet, so we can't reliably
3287              * substitute addr parameters for the REMOTE_CONTAINER_HACK.
3288              * When that's needed, defer the check until later.
3289              */
3290             pe__add_param_check(xml_op, rsc, node, pe_check_last_failure,
3291                                 data_set);
3292 
3293         } else {
3294             op_digest_cache_t *digest_data = NULL;
3295 
3296             digest_data = rsc_action_digest_cmp(rsc, xml_op, node, data_set);
3297             switch (digest_data->rc) {
3298                 case RSC_DIGEST_UNKNOWN:
3299                     crm_trace("Resource %s history entry %s on %s"
3300                               " has no digest to compare",
3301                               rsc->id, get_op_key(xml_op), node->details->id);
3302                     break;
3303                 case RSC_DIGEST_MATCH:
3304                     break;
3305                 default:
3306                     return TRUE;
3307             }
3308         }
3309     }
3310     return FALSE;
3311 }
3312 
3313 // Order action after fencing of remote node, given connection rsc
3314 static void
3315 order_after_remote_fencing(pe_action_t *action, pe_resource_t *remote_conn,
     /* [previous][next][first][last][top][bottom][index][help] */
3316                            pe_working_set_t *data_set)
3317 {
3318     pe_node_t *remote_node = pe_find_node(data_set->nodes, remote_conn->id);
3319 
3320     if (remote_node) {
3321         pe_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
3322                                          FALSE, data_set);
3323 
3324         order_actions(fence, action, pe_order_implies_then);
3325     }
3326 }
3327 
3328 static bool
3329 should_ignore_failure_timeout(pe_resource_t *rsc, xmlNode *xml_op,
     /* [previous][next][first][last][top][bottom][index][help] */
3330                               const char *task, guint interval_ms,
3331                               bool is_last_failure, pe_working_set_t *data_set)
3332 {
3333     /* Clearing failures of recurring monitors has special concerns. The
3334      * executor reports only changes in the monitor result, so if the
3335      * monitor is still active and still getting the same failure result,
3336      * that will go undetected after the failure is cleared.
3337      *
3338      * Also, the operation history will have the time when the recurring
3339      * monitor result changed to the given code, not the time when the
3340      * result last happened.
3341      *
3342      * @TODO We probably should clear such failures only when the failure
3343      * timeout has passed since the last occurrence of the failed result.
3344      * However we don't record that information. We could maybe approximate
3345      * that by clearing only if there is a more recent successful monitor or
3346      * stop result, but we don't even have that information at this point
3347      * since we are still unpacking the resource's operation history.
3348      *
3349      * This is especially important for remote connection resources with a
3350      * reconnect interval, so in that case, we skip clearing failures
3351      * if the remote node hasn't been fenced.
3352      */
3353     if (rsc->remote_reconnect_ms
3354         && pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
3355         && (interval_ms != 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
3356 
3357         pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
3358 
3359         if (remote_node && !remote_node->details->remote_was_fenced) {
3360             if (is_last_failure) {
3361                 crm_info("Waiting to clear monitor failure for remote node %s"
3362                          " until fencing has occurred", rsc->id);
3363             }
3364             return TRUE;
3365         }
3366     }
3367     return FALSE;
3368 }
3369 
3370 /*!
3371  * \internal
3372  * \brief Check operation age and schedule failure clearing when appropriate
3373  *
3374  * This function has two distinct purposes. The first is to check whether an
3375  * operation history entry is expired (i.e. the resource has a failure timeout,
3376  * the entry is older than the timeout, and the resource either has no fail
3377  * count or its fail count is entirely older than the timeout). The second is to
3378  * schedule fail count clearing when appropriate (i.e. the operation is expired
3379  * and either the resource has an expired fail count or the operation is a
3380  * last_failure for a remote connection resource with a reconnect interval,
3381  * or the operation is a last_failure for a start or monitor operation and the
3382  * resource's parameters have changed since the operation).
3383  *
3384  * \param[in] rsc       Resource that operation happened to
3385  * \param[in] node      Node that operation happened on
3386  * \param[in] rc        Actual result of operation
3387  * \param[in] xml_op    Operation history entry XML
3388  * \param[in] data_set  Current working set
3389  *
3390  * \return TRUE if operation history entry is expired, FALSE otherwise
3391  */
3392 static bool
3393 check_operation_expiry(pe_resource_t *rsc, pe_node_t *node, int rc,
     /* [previous][next][first][last][top][bottom][index][help] */
3394                        xmlNode *xml_op, pe_working_set_t *data_set)
3395 {
3396     bool expired = FALSE;
3397     bool is_last_failure = pcmk__ends_with(ID(xml_op), "_last_failure_0");
3398     time_t last_run = 0;
3399     guint interval_ms = 0;
3400     int unexpired_fail_count = 0;
3401     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
3402     const char *clear_reason = NULL;
3403 
3404     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
3405 
3406     if ((rsc->failure_timeout > 0)
3407         && (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
3408                                     &last_run) == 0)) {
3409 
3410         // Resource has a failure-timeout, and history entry has a timestamp
3411 
3412         time_t now = get_effective_time(data_set);
3413         time_t last_failure = 0;
3414 
3415         // Is this particular operation history older than the failure timeout?
3416         if ((now >= (last_run + rsc->failure_timeout))
3417             && !should_ignore_failure_timeout(rsc, xml_op, task, interval_ms,
3418                                               is_last_failure, data_set)) {
3419             expired = TRUE;
3420         }
3421 
3422         // Does the resource as a whole have an unexpired fail count?
3423         unexpired_fail_count = pe_get_failcount(node, rsc, &last_failure,
3424                                                 pe_fc_effective, xml_op,
3425                                                 data_set);
3426 
3427         // Update scheduler recheck time according to *last* failure
3428         crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d timeout=%ds"
3429                   " last-failure@%lld",
3430                   ID(xml_op), (long long) last_run, (expired? "" : "not "),
3431                   (long long) now, unexpired_fail_count, rsc->failure_timeout,
3432                   (long long) last_failure);
3433         last_failure += rsc->failure_timeout + 1;
3434         if (unexpired_fail_count && (now < last_failure)) {
3435             pe__update_recheck_time(last_failure, data_set);
3436         }
3437     }
3438 
3439     if (expired) {
3440         if (pe_get_failcount(node, rsc, NULL, pe_fc_default, xml_op, data_set)) {
3441 
3442             // There is a fail count ignoring timeout
3443 
3444             if (unexpired_fail_count == 0) {
3445                 // There is no fail count considering timeout
3446                 clear_reason = "it expired";
3447 
3448             } else {
3449                 /* This operation is old, but there is an unexpired fail count.
3450                  * In a properly functioning cluster, this should only be
3451                  * possible if this operation is not a failure (otherwise the
3452                  * fail count should be expired too), so this is really just a
3453                  * failsafe.
3454                  */
3455                 expired = FALSE;
3456             }
3457 
3458         } else if (is_last_failure && rsc->remote_reconnect_ms) {
3459             /* Clear any expired last failure when reconnect interval is set,
3460              * even if there is no fail count.
3461              */
3462             clear_reason = "reconnect interval is set";
3463         }
3464     }
3465 
3466     if (!expired && is_last_failure
3467         && should_clear_for_param_change(xml_op, task, rsc, node, data_set)) {
3468         clear_reason = "resource parameters have changed";
3469     }
3470 
3471     if (clear_reason != NULL) {
3472         // Schedule clearing of the fail count
3473         pe_action_t *clear_op = pe__clear_failcount(rsc, node, clear_reason,
3474                                                     data_set);
3475 
3476         if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
3477             && rsc->remote_reconnect_ms) {
3478             /* If we're clearing a remote connection due to a reconnect
3479              * interval, we want to wait until any scheduled fencing
3480              * completes.
3481              *
3482              * We could limit this to remote_node->details->unclean, but at
3483              * this point, that's always true (it won't be reliable until
3484              * after unpack_node_history() is done).
3485              */
3486             crm_info("Clearing %s failure will wait until any scheduled "
3487                      "fencing of %s completes", task, rsc->id);
3488             order_after_remote_fencing(clear_op, rsc, data_set);
3489         }
3490     }
3491 
3492     if (expired && (interval_ms == 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
3493         switch(rc) {
3494             case PCMK_OCF_OK:
3495             case PCMK_OCF_NOT_RUNNING:
3496             case PCMK_OCF_RUNNING_PROMOTED:
3497             case PCMK_OCF_DEGRADED:
3498             case PCMK_OCF_DEGRADED_PROMOTED:
3499                 // Don't expire probes that return these values
3500                 expired = FALSE;
3501                 break;
3502         }
3503     }
3504 
3505     return expired;
3506 }
3507 
3508 int pe__target_rc_from_xml(xmlNode *xml_op)
     /* [previous][next][first][last][top][bottom][index][help] */
3509 {
3510     int target_rc = 0;
3511     const char *key = crm_element_value(xml_op, XML_ATTR_TRANSITION_KEY);
3512 
3513     if (key == NULL) {
3514         return -1;
3515     }
3516     decode_transition_key(key, NULL, NULL, NULL, &target_rc);
3517     return target_rc;
3518 }
3519 
3520 static enum action_fail_response
3521 get_action_on_fail(pe_resource_t *rsc, const char *key, const char *task, pe_working_set_t * data_set) 
     /* [previous][next][first][last][top][bottom][index][help] */
3522 {
3523     enum action_fail_response result = action_fail_recover;
3524     pe_action_t *action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set);
3525 
3526     result = action->on_fail;
3527     pe_free_action(action);
3528 
3529     return result;
3530 }
3531 
3532 static void
3533 update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, const char * task, int rc,
     /* [previous][next][first][last][top][bottom][index][help] */
3534                       xmlNode * last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set)
3535 {
3536     gboolean clear_past_failure = FALSE;
3537 
3538     CRM_ASSERT(rsc);
3539     CRM_ASSERT(xml_op);
3540 
3541     if (rc == PCMK_OCF_NOT_RUNNING) {
3542         clear_past_failure = TRUE;
3543 
3544     } else if (rc == PCMK_OCF_NOT_INSTALLED) {
3545         rsc->role = RSC_ROLE_STOPPED;
3546 
3547     } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
3548         if (last_failure) {
3549             const char *op_key = get_op_key(xml_op);
3550             const char *last_failure_key = get_op_key(last_failure);
3551 
3552             if (pcmk__str_eq(op_key, last_failure_key, pcmk__str_casei)) {
3553                 clear_past_failure = TRUE;
3554             }
3555         }
3556 
3557         if (rsc->role < RSC_ROLE_STARTED) {
3558             set_active(rsc);
3559         }
3560 
3561     } else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_casei)) {
3562         rsc->role = RSC_ROLE_STARTED;
3563         clear_past_failure = TRUE;
3564 
3565     } else if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)) {
3566         rsc->role = RSC_ROLE_STOPPED;
3567         clear_past_failure = TRUE;
3568 
3569     } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
3570         rsc->role = RSC_ROLE_PROMOTED;
3571         clear_past_failure = TRUE;
3572 
3573     } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
3574 
3575         if (*on_fail == action_fail_demote) {
3576             // Demote clears an error only if on-fail=demote
3577             clear_past_failure = TRUE;
3578         }
3579         rsc->role = RSC_ROLE_UNPROMOTED;
3580 
3581     } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
3582         rsc->role = RSC_ROLE_STARTED;
3583         clear_past_failure = TRUE;
3584 
3585     } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
3586         unpack_migrate_to_success(rsc, node, xml_op, data_set);
3587 
3588     } else if (rsc->role < RSC_ROLE_STARTED) {
3589         pe_rsc_trace(rsc, "%s active on %s", rsc->id, node->details->uname);
3590         set_active(rsc);
3591     }
3592 
3593     /* clear any previous failure actions */
3594     if (clear_past_failure) {
3595         switch (*on_fail) {
3596             case action_fail_stop:
3597             case action_fail_fence:
3598             case action_fail_migrate:
3599             case action_fail_standby:
3600                 pe_rsc_trace(rsc, "%s.%s is not cleared by a completed stop",
3601                              rsc->id, fail2text(*on_fail));
3602                 break;
3603 
3604             case action_fail_block:
3605             case action_fail_ignore:
3606             case action_fail_demote:
3607             case action_fail_recover:
3608             case action_fail_restart_container:
3609                 *on_fail = action_fail_ignore;
3610                 pe__set_next_role(rsc, RSC_ROLE_UNKNOWN, "clear past failures");
3611                 break;
3612             case action_fail_reset_remote:
3613                 if (rsc->remote_reconnect_ms == 0) {
3614                     /* With no reconnect interval, the connection is allowed to
3615                      * start again after the remote node is fenced and
3616                      * completely stopped. (With a reconnect interval, we wait
3617                      * for the failure to be cleared entirely before attempting
3618                      * to reconnect.)
3619                      */
3620                     *on_fail = action_fail_ignore;
3621                     pe__set_next_role(rsc, RSC_ROLE_UNKNOWN,
3622                                       "clear past failures and reset remote");
3623                 }
3624                 break;
3625         }
3626     }
3627 }
3628 
3629 /*!
3630  * \internal
3631  * \brief Remap informational monitor results to usual values
3632  *
3633  * Certain OCF result codes are for providing extended information to the
3634  * user about services that aren't yet failed but not entirely healthy either.
3635  * These must be treated as the "normal" result by pacemaker.
3636  *
3637  * \param[in] rc        Actual result of a monitor action
3638  * \param[in] xml_op    Operation history XML
3639  * \param[in] node      Node that operation happened on
3640  * \param[in] rsc       Resource that operation happened to
3641  * \param[in] data_set  Cluster working set
3642  *
3643  * \return Result code that pacemaker should use
3644  *
3645  * \note If the result is remapped, and the node is not shutting down or failed,
3646  *       the operation will be recorded in the data set's list of failed
3647  *       operations, to highlight it for the user.
3648  */
3649 static int
3650 remap_monitor_rc(int rc, xmlNode *xml_op, const pe_node_t *node,
     /* [previous][next][first][last][top][bottom][index][help] */
3651                  const pe_resource_t *rsc, pe_working_set_t *data_set)
3652 {
3653     int remapped_rc = pcmk__effective_rc(rc);
3654 
3655     if (rc != remapped_rc) {
3656         crm_trace("Remapping monitor result %d to %d", rc, remapped_rc);
3657         if (!node->details->shutdown || node->details->online) {
3658             record_failed_op(xml_op, node, rsc, data_set);
3659         }
3660     }
3661     return remapped_rc;
3662 }
3663 
3664 static void
3665 unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
     /* [previous][next][first][last][top][bottom][index][help] */
3666               xmlNode **last_failure, enum action_fail_response *on_fail,
3667               pe_working_set_t *data_set)
3668 {
3669     int rc = 0;
3670     int task_id = 0;
3671     int target_rc = 0;
3672     int status = PCMK_LRM_OP_UNKNOWN;
3673     guint interval_ms = 0;
3674     const char *task = NULL;
3675     const char *task_key = NULL;
3676     const char *exit_reason = NULL;
3677     bool expired = FALSE;
3678     pe_resource_t *parent = rsc;
3679     enum action_fail_response failure_strategy = action_fail_recover;
3680 
3681     CRM_CHECK(rsc && node && xml_op, return);
3682 
3683     target_rc = pe__target_rc_from_xml(xml_op);
3684     task_key = get_op_key(xml_op);
3685     task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
3686     exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
3687     if (exit_reason == NULL) {
3688         exit_reason = "";
3689     }
3690 
3691     crm_element_value_int(xml_op, XML_LRM_ATTR_RC, &rc);
3692     crm_element_value_int(xml_op, XML_LRM_ATTR_CALLID, &task_id);
3693     crm_element_value_int(xml_op, XML_LRM_ATTR_OPSTATUS, &status);
3694     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
3695 
3696     CRM_CHECK(task != NULL, return);
3697     CRM_CHECK(status <= PCMK_LRM_OP_INVALID, return);
3698     CRM_CHECK(status >= PCMK_LRM_OP_PENDING, return);
3699 
3700     if (!strcmp(task, CRMD_ACTION_NOTIFY) ||
3701         !strcmp(task, CRMD_ACTION_METADATA)) {
3702         /* safe to ignore these */
3703         return;
3704     }
3705 
3706     if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
3707         parent = uber_parent(rsc);
3708     }
3709 
3710     pe_rsc_trace(rsc, "Unpacking task %s/%s (call_id=%d, status=%d, rc=%d) on %s (role=%s)",
3711                  task_key, task, task_id, status, rc, node->details->uname, role2text(rsc->role));
3712 
3713     if (node->details->unclean) {
3714         pe_rsc_trace(rsc, "Node %s (where %s is running) is unclean."
3715                      " Further action depends on the value of the stop's on-fail attribute",
3716                      node->details->uname, rsc->id);
3717     }
3718 
3719     /* It should be possible to call remap_monitor_rc() first then call
3720      * check_operation_expiry() only if rc != target_rc, because there should
3721      * never be a fail count without at least one unexpected result in the
3722      * resource history. That would be more efficient by avoiding having to call
3723      * check_operation_expiry() for expected results.
3724      *
3725      * However, we do have such configurations in the scheduler regression
3726      * tests, even if it shouldn't be possible with the current code. It's
3727      * probably a good idea anyway, but that would require updating the test
3728      * inputs to something currently possible.
3729      */
3730 
3731     if ((status != PCMK_LRM_OP_NOT_INSTALLED)
3732         && check_operation_expiry(rsc, node, rc, xml_op, data_set)) {
3733         expired = TRUE;
3734     }
3735 
3736     if (!strcmp(task, CRMD_ACTION_STATUS)) {
3737         rc = remap_monitor_rc(rc, xml_op, node, rsc, data_set);
3738     }
3739 
3740     if (expired && (rc != target_rc)) {
3741         const char *magic = crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC);
3742 
3743         if (interval_ms == 0) {
3744             crm_notice("Ignoring expired %s failure on %s "
3745                        CRM_XS " actual=%d expected=%d magic=%s",
3746                        task_key, node->details->uname, rc, target_rc, magic);
3747             goto done;
3748 
3749         } else if(node->details->online && node->details->unclean == FALSE) {
3750             /* Reschedule the recurring monitor. CancelXmlOp() won't work at
3751              * this stage, so as a hacky workaround, forcibly change the restart
3752              * digest so check_action_definition() does what we want later.
3753              *
3754              * @TODO We should skip this if there is a newer successful monitor.
3755              *       Also, this causes rescheduling only if the history entry
3756              *       has an op-digest (which the expire-non-blocked-failure
3757              *       scheduler regression test doesn't, but that may not be a
3758              *       realistic scenario in production).
3759              */
3760             crm_notice("Rescheduling %s after failure expired on %s "
3761                        CRM_XS " actual=%d expected=%d magic=%s",
3762                        task_key, node->details->uname, rc, target_rc, magic);
3763             crm_xml_add(xml_op, XML_LRM_ATTR_RESTART_DIGEST, "calculated-failure-timeout");
3764             goto done;
3765         }
3766     }
3767 
3768     /* If the executor reported an operation status of anything but done or
3769      * error, consider that final. But for done or error, we know better whether
3770      * it should be treated as a failure or not, because we know the expected
3771      * result.
3772      */
3773     if(status == PCMK_LRM_OP_DONE || status == PCMK_LRM_OP_ERROR) {
3774         status = determine_op_status(rsc, rc, target_rc, node, xml_op, on_fail, data_set);
3775         pe_rsc_trace(rsc, "Remapped %s status to %d", task_key, status);
3776     }
3777 
3778     switch (status) {
3779         case PCMK_LRM_OP_CANCELLED:
3780             // Should never happen
3781             pe_err("Resource history contains cancellation '%s' "
3782                    "(%s of %s on %s at %s)",
3783                    ID(xml_op), task, rsc->id, node->details->uname,
3784                    last_change_str(xml_op));
3785             break;
3786 
3787         case PCMK_LRM_OP_PENDING:
3788             if (!strcmp(task, CRMD_ACTION_START)) {
3789                 pe__set_resource_flags(rsc, pe_rsc_start_pending);
3790                 set_active(rsc);
3791 
3792             } else if (!strcmp(task, CRMD_ACTION_PROMOTE)) {
3793                 rsc->role = RSC_ROLE_PROMOTED;
3794 
3795             } else if (!strcmp(task, CRMD_ACTION_MIGRATE) && node->details->unclean) {
3796                 /* If a pending migrate_to action is out on a unclean node,
3797                  * we have to force the stop action on the target. */
3798                 const char *migrate_target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
3799                 pe_node_t *target = pe_find_node(data_set->nodes, migrate_target);
3800                 if (target) {
3801                     stop_action(rsc, target, FALSE);
3802                 }
3803             }
3804 
3805             if (rsc->pending_task == NULL) {
3806                 if ((interval_ms != 0) || strcmp(task, CRMD_ACTION_STATUS)) {
3807                     rsc->pending_task = strdup(task);
3808                     rsc->pending_node = node;
3809                 } else {
3810                     /* Pending probes are not printed, even if pending
3811                      * operations are requested. If someone ever requests that
3812                      * behavior, enable the below and the corresponding part of
3813                      * native.c:native_pending_task().
3814                      */
3815 #if 0
3816                     rsc->pending_task = strdup("probe");
3817                     rsc->pending_node = node;
3818 #endif
3819                 }
3820             }
3821             break;
3822 
3823         case PCMK_LRM_OP_DONE:
3824             pe_rsc_trace(rsc, "%s of %s on %s completed at %s " CRM_XS " id=%s",
3825                          task, rsc->id, node->details->uname,
3826                          last_change_str(xml_op), ID(xml_op));
3827             update_resource_state(rsc, node, xml_op, task, rc, *last_failure, on_fail, data_set);
3828             break;
3829 
3830         case PCMK_LRM_OP_NOT_INSTALLED:
3831             failure_strategy = get_action_on_fail(rsc, task_key, task, data_set);
3832             if (failure_strategy == action_fail_ignore) {
3833                 crm_warn("Cannot ignore failed %s of %s on %s: "
3834                          "Resource agent doesn't exist "
3835                          CRM_XS " status=%d rc=%d id=%s",
3836                          task, rsc->id, node->details->uname, status, rc,
3837                          ID(xml_op));
3838                 /* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */
3839                 *on_fail = action_fail_migrate;
3840             }
3841             resource_location(parent, node, -INFINITY, "hard-error", data_set);
3842             unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set);
3843             break;
3844 
3845         case PCMK_LRM_OP_NOT_CONNECTED:
3846             if (pe__is_guest_or_remote_node(node)
3847                 && pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_managed)) {
3848                 /* We should never get into a situation where a managed remote
3849                  * connection resource is considered OK but a resource action
3850                  * behind the connection gets a "not connected" status. But as a
3851                  * fail-safe in case a bug or unusual circumstances do lead to
3852                  * that, ensure the remote connection is considered failed.
3853                  */
3854                 pe__set_resource_flags(node->details->remote_rsc,
3855                                        pe_rsc_failed|pe_rsc_stop);
3856             }
3857 
3858             // fall through
3859 
3860         case PCMK_LRM_OP_ERROR:
3861         case PCMK_LRM_OP_ERROR_HARD:
3862         case PCMK_LRM_OP_ERROR_FATAL:
3863         case PCMK_LRM_OP_TIMEOUT:
3864         case PCMK_LRM_OP_NOTSUPPORTED:
3865         case PCMK_LRM_OP_INVALID:
3866 
3867             failure_strategy = get_action_on_fail(rsc, task_key, task, data_set);
3868             if ((failure_strategy == action_fail_ignore)
3869                 || (failure_strategy == action_fail_restart_container
3870                     && !strcmp(task, CRMD_ACTION_STOP))) {
3871 
3872                 crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s "
3873                          "succeeded " CRM_XS " rc=%d id=%s",
3874                          task, services_ocf_exitcode_str(rc),
3875                          (*exit_reason? ": " : ""), exit_reason, rsc->id,
3876                          node->details->uname, last_change_str(xml_op), rc,
3877                          ID(xml_op));
3878 
3879                 update_resource_state(rsc, node, xml_op, task, target_rc, *last_failure, on_fail, data_set);
3880                 crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
3881                 pe__set_resource_flags(rsc, pe_rsc_failure_ignored);
3882 
3883                 record_failed_op(xml_op, node, rsc, data_set);
3884 
3885                 if ((failure_strategy == action_fail_restart_container)
3886                     && cmp_on_fail(*on_fail, action_fail_recover) <= 0) {
3887                     *on_fail = failure_strategy;
3888                 }
3889 
3890             } else {
3891                 unpack_rsc_op_failure(rsc, node, rc, xml_op, last_failure, on_fail, data_set);
3892 
3893                 if(status == PCMK_LRM_OP_ERROR_HARD) {
3894                     do_crm_log(rc != PCMK_OCF_NOT_INSTALLED?LOG_ERR:LOG_NOTICE,
3895                                "Preventing %s from restarting on %s because "
3896                                "of hard failure (%s%s%s)" CRM_XS " rc=%d id=%s",
3897                                parent->id, node->details->uname,
3898                                services_ocf_exitcode_str(rc),
3899                                (*exit_reason? ": " : ""), exit_reason,
3900                                rc, ID(xml_op));
3901                     resource_location(parent, node, -INFINITY, "hard-error", data_set);
3902 
3903                 } else if(status == PCMK_LRM_OP_ERROR_FATAL) {
3904                     crm_err("Preventing %s from restarting anywhere because "
3905                             "of fatal failure (%s%s%s) " CRM_XS " rc=%d id=%s",
3906                             parent->id, services_ocf_exitcode_str(rc),
3907                             (*exit_reason? ": " : ""), exit_reason,
3908                             rc, ID(xml_op));
3909                     resource_location(parent, NULL, -INFINITY, "fatal-error", data_set);
3910                 }
3911             }
3912             break;
3913     }
3914 
3915   done:
3916     pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s",
3917                  rsc->id, task, role2text(rsc->role),
3918                  role2text(rsc->next_role));
3919 }
3920 
3921 static void
3922 add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite,
     /* [previous][next][first][last][top][bottom][index][help] */
3923                pe_working_set_t *data_set)
3924 {
3925     const char *cluster_name = NULL;
3926 
3927     pe_rule_eval_data_t rule_data = {
3928         .node_hash = NULL,
3929         .role = RSC_ROLE_UNKNOWN,
3930         .now = data_set->now,
3931         .match_data = NULL,
3932         .rsc_data = NULL,
3933         .op_data = NULL
3934     };
3935 
3936     g_hash_table_insert(node->details->attrs,
3937                         strdup(CRM_ATTR_UNAME), strdup(node->details->uname));
3938 
3939     g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID),
3940                         strdup(node->details->id));
3941     if (pcmk__str_eq(node->details->id, data_set->dc_uuid, pcmk__str_casei)) {
3942         data_set->dc_node = node;
3943         node->details->is_dc = TRUE;
3944         g_hash_table_insert(node->details->attrs,
3945                             strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE));
3946     } else {
3947         g_hash_table_insert(node->details->attrs,
3948                             strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE));
3949     }
3950 
3951     cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name");
3952     if (cluster_name) {
3953         g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME),
3954                             strdup(cluster_name));
3955     }
3956 
3957     pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data,
3958                                node->details->attrs, NULL, overwrite, data_set);
3959 
3960     if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) {
3961         const char *site_name = pe_node_attribute_raw(node, "site-name");
3962 
3963         if (site_name) {
3964             g_hash_table_insert(node->details->attrs,
3965                                 strdup(CRM_ATTR_SITE_NAME),
3966                                 strdup(site_name));
3967 
3968         } else if (cluster_name) {
3969             /* Default to cluster-name if unset */
3970             g_hash_table_insert(node->details->attrs,
3971                                 strdup(CRM_ATTR_SITE_NAME),
3972                                 strdup(cluster_name));
3973         }
3974     }
3975 }
3976 
3977 static GList *
3978 extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter)
     /* [previous][next][first][last][top][bottom][index][help] */
3979 {
3980     int counter = -1;
3981     int stop_index = -1;
3982     int start_index = -1;
3983 
3984     xmlNode *rsc_op = NULL;
3985 
3986     GList *gIter = NULL;
3987     GList *op_list = NULL;
3988     GList *sorted_op_list = NULL;
3989 
3990     /* extract operations */
3991     op_list = NULL;
3992     sorted_op_list = NULL;
3993 
3994     for (rsc_op = pcmk__xe_first_child(rsc_entry);
3995          rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) {
3996 
3997         if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP,
3998                          pcmk__str_none)) {
3999             crm_xml_add(rsc_op, "resource", rsc);
4000             crm_xml_add(rsc_op, XML_ATTR_UNAME, node);
4001             op_list = g_list_prepend(op_list, rsc_op);
4002         }
4003     }
4004 
4005     if (op_list == NULL) {
4006         /* if there are no operations, there is nothing to do */
4007         return NULL;
4008     }
4009 
4010     sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
4011 
4012     /* create active recurring operations as optional */
4013     if (active_filter == FALSE) {
4014         return sorted_op_list;
4015     }
4016 
4017     op_list = NULL;
4018 
4019     calculate_active_ops(sorted_op_list, &start_index, &stop_index);
4020 
4021     for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
4022         xmlNode *rsc_op = (xmlNode *) gIter->data;
4023 
4024         counter++;
4025 
4026         if (start_index < stop_index) {
4027             crm_trace("Skipping %s: not active", ID(rsc_entry));
4028             break;
4029 
4030         } else if (counter < start_index) {
4031             crm_trace("Skipping %s: old", ID(rsc_op));
4032             continue;
4033         }
4034         op_list = g_list_append(op_list, rsc_op);
4035     }
4036 
4037     g_list_free(sorted_op_list);
4038     return op_list;
4039 }
4040 
4041 GList *
4042 find_operations(const char *rsc, const char *node, gboolean active_filter,
     /* [previous][next][first][last][top][bottom][index][help] */
4043                 pe_working_set_t * data_set)
4044 {
4045     GList *output = NULL;
4046     GList *intermediate = NULL;
4047 
4048     xmlNode *tmp = NULL;
4049     xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE);
4050 
4051     pe_node_t *this_node = NULL;
4052 
4053     xmlNode *node_state = NULL;
4054 
4055     for (node_state = pcmk__xe_first_child(status); node_state != NULL;
4056          node_state = pcmk__xe_next(node_state)) {
4057 
4058         if (pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
4059             const char *uname = crm_element_value(node_state, XML_ATTR_UNAME);
4060 
4061             if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) {
4062                 continue;
4063             }
4064 
4065             this_node = pe_find_node(data_set->nodes, uname);
4066             if(this_node == NULL) {
4067                 CRM_LOG_ASSERT(this_node != NULL);
4068                 continue;
4069 
4070             } else if (pe__is_guest_or_remote_node(this_node)) {
4071                 determine_remote_online_status(data_set, this_node);
4072 
4073             } else {
4074                 determine_online_status(node_state, this_node, data_set);
4075             }
4076 
4077             if (this_node->details->online
4078                 || pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
4079                 /* offline nodes run no resources...
4080                  * unless stonith is enabled in which case we need to
4081                  *   make sure rsc start events happen after the stonith
4082                  */
4083                 xmlNode *lrm_rsc = NULL;
4084 
4085                 tmp = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
4086                 tmp = find_xml_node(tmp, XML_LRM_TAG_RESOURCES, FALSE);
4087 
4088                 for (lrm_rsc = pcmk__xe_first_child(tmp); lrm_rsc != NULL;
4089                      lrm_rsc = pcmk__xe_next(lrm_rsc)) {
4090 
4091                     if (pcmk__str_eq((const char *)lrm_rsc->name,
4092                                      XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
4093 
4094                         const char *rsc_id = crm_element_value(lrm_rsc, XML_ATTR_ID);
4095 
4096                         if (rsc != NULL && !pcmk__str_eq(rsc_id, rsc, pcmk__str_casei)) {
4097                             continue;
4098                         }
4099 
4100                         intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter);
4101                         output = g_list_concat(output, intermediate);
4102                     }
4103                 }
4104             }
4105         }
4106     }
4107 
4108     return output;
4109 }

/* [previous][next][first][last][top][bottom][index][help] */