24 check_health(
const char *value)
    27                            "migrate-on-red", NULL);
    31 check_stonith_action(
const char *value)
    37 check_placement_strategy(
const char *value)
    50         "no-quorum-policy", NULL, 
"select", 
"stop, freeze, ignore, demote, suicide",
    52         "What to do when the cluster does not have quorum",
    56         "symmetric-cluster", NULL, 
"boolean", NULL,
    58         "Whether resources can run on any node by default",
    62         "maintenance-mode", NULL, 
"boolean", NULL,
    64         "Whether the cluster should refrain from monitoring, starting, "    65             "and stopping resources",
    69         "start-failure-is-fatal", NULL, 
"boolean", NULL,
    71         "Whether a start failure should prevent a resource from being "    72             "recovered on the same node",
    73         "When true, the cluster will immediately ban a resource from a node "    74             "if it fails to start there. When false, the cluster will instead "    75             "check the resource's fail count against its migration-threshold."    78         "enable-startup-probes", NULL, 
"boolean", NULL,
    80         "Whether the cluster should check for active resources during start-up",
    86         "Whether to lock resources to a cleanly shut down node",
    87         "When true, resources active on a node when it is cleanly shut down "    88             "are kept \"locked\" to that node (not allowed to run elsewhere) "    89             "until they start again on that node after it rejoins (or for at "    90             "most shutdown-lock-limit, if set). Stonith resources and "    91             "Pacemaker Remote connections are never locked. Clone and bundle "    92             "instances and the promoted role of promotable clones are currently"    93             " never locked, though support could be added in a future release."    98         "Do not lock resources to a cleanly shut down node longer than this",
    99         "If shutdown-lock is true and this is set to a nonzero time duration, "   100             "shutdown locks will expire after this much time has passed since "   101             "the shutdown was initiated, even if the node has not rejoined."   106         "stonith-enabled", NULL, 
"boolean", NULL,
   108         "*** Advanced Use Only *** "   109             "Whether nodes may be fenced as part of recovery",
   110         "If false, unresponsive nodes are immediately assumed to be harmless, "   111             "and resources that were active on them may be recovered "   112             "elsewhere. This can result in a \"split-brain\" situation, "   113             "potentially leading to data loss and/or service unavailability."   116         "stonith-action", NULL, 
"select", 
"reboot, off, poweroff",
   117         "reboot", check_stonith_action,
   118         "Action to send to fence device when a node needs to be fenced "   119             "(\"poweroff\" is a deprecated alias for \"off\")",
   123         "stonith-timeout", NULL, 
"time", NULL,
   125         "*** Advanced Use Only *** Unused by Pacemaker",
   126         "This value is not used by Pacemaker, but is kept for backward "   127             "compatibility, and certain legacy fence agents might use it."   132         "Whether watchdog integration is enabled",
   133         "This is set automatically by the cluster according to whether SBD "   134             "is detected to be in use. User-configured values are ignored. "   135             "The value `true` is meaningful if diskless SBD is used and "   136             "`stonith-watchdog-timeout` is nonzero. In that case, if fencing "   137             "is required, watchdog-based self-fencing will be performed via "   138             "SBD without requiring a fencing resource explicitly configured."   141         "concurrent-fencing", NULL, 
"boolean", NULL,
   143         "Allow performing fencing operations in parallel",
   147         "startup-fencing", NULL, 
"boolean", NULL,
   149         "*** Advanced Use Only *** Whether to fence unseen nodes at start-up",
   150         "Setting this to false may lead to a \"split-brain\" situation,"   151             "potentially leading to data loss and/or service unavailability."   156         "Apply fencing delay targeting the lost nodes with the highest total resource priority",
   157         "Apply specified delay for the fencings that are targeting the lost "   158             "nodes with the highest total resource priority in case we don't "   159             "have the majority of the nodes in our cluster partition, so that "   160             "the more significant nodes potentially win any fencing match, "   161             "which is especially meaningful under split-brain of 2-node "   162             "cluster. A promoted resource instance takes the base priority + 1 "   163             "on calculation if the base priority is not 0. Any static/random "   164             "delays that are introduced by `pcmk_delay_base/max` configured "   165             "for the corresponding fencing resources will be added to this "   166             "delay. This delay should be significantly greater than, safely "   167             "twice, the maximum `pcmk_delay_base/max`. By default, priority "   168             "fencing delay is disabled."   172         "cluster-delay", NULL, 
"time", NULL,
   174         "Maximum time for node-to-node communication",
   175         "The node elected Designated Controller (DC) will consider an action "   176             "failed if it does not get a response from the node executing the "   177             "action within this time (after considering the action's own "   178             "timeout). The \"correct\" value will depend on the speed and "   179             "load of your network and cluster nodes."   182         "batch-limit", NULL, 
"integer", NULL,
   184         "Maximum number of jobs that the cluster may execute in parallel "   186         "The \"correct\" value will depend on the speed and load of your "   187             "network and cluster nodes. If set to 0, the cluster will "   188             "impose a dynamically calculated limit when any node has a "   192         "migration-limit", NULL, 
"integer", NULL,
   194         "The number of live migration actions that the cluster is allowed "   195             "to execute in parallel on a node (-1 means no limit)"   200         "stop-all-resources", NULL, 
"boolean", NULL,
   202         "Whether the cluster should stop all active resources",
   206         "stop-orphan-resources", NULL, 
"boolean", NULL,
   208         "Whether to stop resources that were removed from the configuration",
   212         "stop-orphan-actions", NULL, 
"boolean", NULL,
   214         "Whether to cancel recurring actions removed from the configuration",
   218         "remove-after-stop", NULL, 
"boolean", NULL,
   220         "*** Deprecated *** Whether to remove stopped resources from "   222         "Values other than default are poorly tested and potentially dangerous."   223             " This option will be removed in a future release."   228         "pe-error-series-max", NULL, 
"integer", NULL,
   230         "The number of scheduler inputs resulting in errors to save",
   231         "Zero to disable, -1 to store unlimited."   234         "pe-warn-series-max",  NULL, 
"integer", NULL,
   236         "The number of scheduler inputs resulting in warnings to save",
   237         "Zero to disable, -1 to store unlimited."   240         "pe-input-series-max", NULL, 
"integer", NULL,
   242         "The number of scheduler inputs without errors or warnings to save",
   243         "Zero to disable, -1 to store unlimited."   248         "node-health-strategy", NULL, 
"select",
   249         "none, migrate-on-red, only-green, progressive, custom",
   250         "none", check_health,
   251         "How cluster should react to node health attributes",
   252         "Requires external entities to create node attributes (named with "   253             "the prefix \"#health\") with values \"red\", \"yellow\" or "   257         "node-health-base", NULL, 
"integer", NULL,
   259         "Base health score assigned to a node",
   260         "Only used when node-health-strategy is set to progressive."   263         "node-health-green", NULL, 
"integer", NULL,
   265         "The score to use for a node health attribute whose value is \"green\"",
   266         "Only used when node-health-strategy is set to custom or progressive."   269         "node-health-yellow", NULL, 
"integer", NULL,
   271         "The score to use for a node health attribute whose value is \"yellow\"",
   272         "Only used when node-health-strategy is set to custom or progressive."   275         "node-health-red", NULL, 
"integer", NULL,
   277         "The score to use for a node health attribute whose value is \"red\"",
   278         "Only used when node-health-strategy is set to custom or progressive."   283         "placement-strategy", NULL, 
"select",
   284         "default, utilization, minimal, balanced",
   285         "default", check_placement_strategy,
   286         "How the cluster should allocate resources to nodes",
   295                                 "Pacemaker scheduler options",
   296                                 "Cluster options used by Pacemaker's scheduler"   297                                     " (formerly called pengine)",
   316     const char *result = 
"<unknown>";
   344             result = 
"restart-container";
   347             result = 
"reset-remote";
   399     crm_trace(
"Unsupported action: %s", task);
   408     const char *result = 
"<unknown>";
   412             result = 
"no_action";
   469 #ifdef PCMK__COMPAT_2_0   475 #ifdef PCMK__COMPAT_2_0   504     crm_err(
"Unknown role: %s", role);
   518     int result = score1 + score2;
   525             crm_trace(
"-INFINITY + -INFINITY = -INFINITY");
   527             crm_trace(
"-INFINITY + +INFINITY = -INFINITY");
   529             crm_trace(
"-INFINITY + %d = -INFINITY", score2);
   537             crm_trace(
"+INFINITY + -INFINITY = -INFINITY");
   539             crm_trace(
"%d + -INFINITY = -INFINITY", score1);
   547             crm_trace(
"+INFINITY + +INFINITY = +INFINITY");
   549             crm_trace(
"+INFINITY + %d = +INFINITY", score2);
   555         crm_trace(
"%d + +INFINITY = +INFINITY", score1);
   566         crm_trace(
"%d + %d = +INFINITY", score1, score2);
   570         crm_trace(
"%d + %d = -INFINITY", score1, score2);
   574     crm_trace(
"%d + %d = %d", score1, score2, result);
   584     if (
name == NULL || value == NULL) {
   590     } 
else if (g_hash_table_lookup(hash, 
name) == NULL) {
   591         g_hash_table_insert(hash, strdup(
name), strdup(value));
   604     } 
else if(rsc == NULL) {
   626         return g_hash_table_lookup(
host->details->attrs, 
name);
   629     pe_rsc_trace(rsc, 
"%s: Not looking for %s on the container host: %s is inactive",
 
#define CRM_CHECK(expr, failure_action)
 
#define RSC_ROLE_UNPROMOTED_S
 
void add_hash_param(GHashTable *hash, const char *name, const char *value)
 
#define CRMD_ACTION_MIGRATED
 
#define XML_CONFIG_ATTR_SHUTDOWN_LOCK
 
pe_resource_t * container
 
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
 
#define XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY
 
#define RSC_ROLE_STARTED_S
 
pe_resource_t * remote_rsc
 
#define CRMD_ACTION_NOTIFY
 
int pe__add_scores(int score1, int score2)
 
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
 
#define CRMD_ACTION_PROMOTE
 
#define RSC_ROLE_PROMOTED_LEGACY_S
 
const char * task2text(enum action_tasks task)
 
#define CRM_SCORE_INFINITY
 
#define XML_RSC_ATTR_TARGET
 
gboolean was_processing_error
 
#define PCMK__CONCURRENT_FENCING_DEFAULT
 
gboolean was_processing_warning
 
#define CRMD_ACTION_START
 
enum rsc_role_e text2role(const char *role)
 
#define CRM_OP_LRM_REFRESH
 
#define CRMD_ACTION_DEMOTED
 
bool pcmk__valid_quorum(const char *value)
 
#define XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT
 
#define CRMD_ACTION_DEMOTE
 
bool pcmk__valid_interval_spec(const char *value)
 
const char * role2text(enum rsc_role_e role)
 
void pcmk__print_option_metadata(const char *name, const char *desc_short, const char *desc_long, pcmk__cluster_option_t *option_list, int len)
 
#define crm_trace(fmt, args...)
 
#define RSC_ROLE_UNPROMOTED_LEGACY_S
 
struct pe_node_shared_s * details
 
Wrappers for and extensions to libxml2. 
 
const char * pe_node_attribute_calculated(const pe_node_t *node, const char *name, const pe_resource_t *rsc)
 
#define CRMD_ACTION_PROMOTED
 
const char * pcmk__cluster_option(GHashTable *options, pcmk__cluster_option_t *option_list, int len, const char *name)
 
#define RSC_ROLE_PROMOTED_S
 
void verify_pe_options(GHashTable *options)
 
const char * pe_pref(GHashTable *options, const char *name)
 
#define CRMD_ACTION_STOPPED
 
bool pcmk__valid_boolean(const char *value)
 
#define crm_err(fmt, args...)
 
#define XML_ATTR_HAVE_WATCHDOG
 
bool pcmk__valid_number(const char *value)
 
#define CRMD_ACTION_MIGRATE
 
#define RSC_ROLE_STOPPED_S
 
rsc_role_e
Possible roles that a resource can be in. 
 
enum action_tasks text2task(const char *task)
 
#define RSC_ROLE_UNKNOWN_S
 
void pcmk__validate_cluster_options(GHashTable *options, pcmk__cluster_option_t *option_list, int len)
 
#define CRMD_ACTION_NOTIFIED
 
#define pe_rsc_trace(rsc, fmt, args...)
 
const char * fail2text(enum action_fail_response fail)
 
#define CRMD_ACTION_STARTED
 
#define CRMD_ACTION_CANCEL
 
#define CRMD_ACTION_DELETE
 
#define CRMD_ACTION_STATUS