pacemaker  2.1.2-ada5c3b36
Scalable High-Availability cluster resource manager
pcmk_sched_allocate.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2021 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <sys/param.h>
13 
14 #include <crm/crm.h>
15 #include <crm/cib.h>
16 #include <crm/msg_xml.h>
17 #include <crm/common/xml.h>
19 
20 #include <glib.h>
21 
22 #include <crm/pengine/status.h>
23 #include <pacemaker-internal.h>
24 #include "libpacemaker_private.h"
25 
26 CRM_TRACE_INIT_DATA(pacemaker);
27 
28 extern bool pcmk__is_daemon;
29 
30 void set_alloc_actions(pe_working_set_t * data_set);
31 extern void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
32 extern gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
33 
35  {
49  },
50  {
64  },
65  {
79  },
80  {
94  }
95 };
96 
97 static gboolean
98 check_rsc_parameters(pe_resource_t * rsc, pe_node_t * node, xmlNode * rsc_entry,
99  gboolean active_here, pe_working_set_t * data_set)
100 {
101  int attr_lpc = 0;
102  gboolean force_restart = FALSE;
103  gboolean delete_resource = FALSE;
104  gboolean changed = FALSE;
105 
106  const char *value = NULL;
107  const char *old_value = NULL;
108 
109  const char *attr_list[] = {
113  };
114 
115  for (; attr_lpc < PCMK__NELEM(attr_list); attr_lpc++) {
116  value = crm_element_value(rsc->xml, attr_list[attr_lpc]);
117  old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]);
118  if (value == old_value /* i.e. NULL */
119  || pcmk__str_eq(value, old_value, pcmk__str_none)) {
120  continue;
121  }
122 
123  changed = TRUE;
124  trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set);
125  if (active_here) {
126  force_restart = TRUE;
127  crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s",
128  rsc->id, node->details->uname, attr_list[attr_lpc],
129  crm_str(old_value), crm_str(value));
130  }
131  }
132  if (force_restart) {
133  /* make sure the restart happens */
134  stop_action(rsc, node, FALSE);
136  delete_resource = TRUE;
137 
138  } else if (changed) {
139  delete_resource = TRUE;
140  }
141  return delete_resource;
142 }
143 
144 static void
145 CancelXmlOp(pe_resource_t * rsc, xmlNode * xml_op, pe_node_t * active_node,
146  const char *reason, pe_working_set_t * data_set)
147 {
148  guint interval_ms = 0;
149  pe_action_t *cancel = NULL;
150 
151  const char *task = NULL;
152  const char *call_id = NULL;
153 
154  CRM_CHECK(xml_op != NULL, return);
155  CRM_CHECK(active_node != NULL, return);
156 
157  task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
158  call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
159  crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
160 
161  crm_info("Action " PCMK__OP_FMT " on %s will be stopped: %s",
162  rsc->id, task, interval_ms,
163  active_node->details->uname, (reason? reason : "unknown"));
164 
165  cancel = pe_cancel_op(rsc, task, interval_ms, active_node, data_set);
166  add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
167  pcmk__new_ordering(rsc, stop_key(rsc), NULL, rsc, NULL, cancel,
168  pe_order_optional, data_set);
169 }
170 
171 static gboolean
172 check_action_definition(pe_resource_t * rsc, pe_node_t * active_node, xmlNode * xml_op,
173  pe_working_set_t * data_set)
174 {
175  char *key = NULL;
176  guint interval_ms = 0;
177  const op_digest_cache_t *digest_data = NULL;
178  gboolean did_change = FALSE;
179 
180  const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
181  const char *digest_secure = NULL;
182 
183  CRM_CHECK(active_node != NULL, return FALSE);
184 
185  crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
186  if (interval_ms > 0) {
187  xmlNode *op_match = NULL;
188 
189  /* we need to reconstruct the key because of the way we used to construct resource IDs */
190  key = pcmk__op_key(rsc->id, task, interval_ms);
191 
192  pe_rsc_trace(rsc, "Checking parameters for %s", key);
193  op_match = find_rsc_op_entry(rsc, key);
194 
195  if ((op_match == NULL)
197  CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set);
198  free(key);
199  return TRUE;
200 
201  } else if (op_match == NULL) {
202  pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname);
203  free(key);
204  return TRUE;
205  }
206  free(key);
207  key = NULL;
208  }
209 
210  crm_trace("Testing " PCMK__OP_FMT " on %s",
211  rsc->id, task, interval_ms, active_node->details->uname);
212  if ((interval_ms == 0) && pcmk__str_eq(task, RSC_STATUS, pcmk__str_casei)) {
213  /* Reload based on the start action not a probe */
214  task = RSC_START;
215 
216  } else if ((interval_ms == 0) && pcmk__str_eq(task, RSC_MIGRATED, pcmk__str_casei)) {
217  /* Reload based on the start action not a migrate */
218  task = RSC_START;
219  } else if ((interval_ms == 0) && pcmk__str_eq(task, RSC_PROMOTE, pcmk__str_casei)) {
220  /* Reload based on the start action not a promote */
221  task = RSC_START;
222  }
223 
224  digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set);
225 
226  if (pcmk_is_set(data_set->flags, pe_flag_sanitized)) {
227  digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
228  }
229 
230  if(digest_data->rc != RSC_DIGEST_MATCH
231  && digest_secure
232  && digest_data->digest_secure_calc
233  && strcmp(digest_data->digest_secure_calc, digest_secure) == 0) {
234  if (!pcmk__is_daemon && data_set->priv != NULL) {
235  pcmk__output_t *out = data_set->priv;
236  out->info(out, "Only 'private' parameters to "
237  PCMK__OP_FMT " on %s changed: %s", rsc->id, task,
238  interval_ms, active_node->details->uname,
240  }
241 
242  } else if (digest_data->rc == RSC_DIGEST_RESTART) {
243  /* Changes that force a restart */
244  pe_action_t *required = NULL;
245 
246  did_change = TRUE;
247  key = pcmk__op_key(rsc->id, task, interval_ms);
248  crm_log_xml_info(digest_data->params_restart, "params:restart");
249  required = custom_action(rsc, key, task, NULL, FALSE, TRUE, data_set);
250  pe_action_set_reason(required, "resource definition change", true);
251  trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set);
252 
253  } else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) {
254  // Changes that can potentially be handled by an agent reload
255  const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
256 
257  did_change = TRUE;
258  trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set);
259  crm_log_xml_info(digest_data->params_all, "params:reload");
260  key = pcmk__op_key(rsc->id, task, interval_ms);
261 
262  if (interval_ms > 0) {
263  pe_action_t *op = NULL;
264 
265 #if 0
266  /* Always reload/restart the entire resource */
267  ReloadRsc(rsc, active_node, data_set);
268 #else
269  /* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */
270  op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set);
272 #endif
273 
274  } else if (digest_restart) {
275  pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id);
276 
277  /* Reload this resource */
278  ReloadRsc(rsc, active_node, data_set);
279  free(key);
280 
281  } else {
282  pe_action_t *required = NULL;
283  pe_rsc_trace(rsc, "Resource %s doesn't support agent reloads",
284  rsc->id);
285 
286  /* Re-send the start/demote/promote op
287  * Recurring ops will be detected independently
288  */
289  required = custom_action(rsc, key, task, NULL, FALSE, TRUE,
290  data_set);
291  pe_action_set_reason(required, "resource definition change", true);
292  }
293  }
294 
295  return did_change;
296 }
297 
304 static void
305 check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
306  enum pe_check_parameters check, pe_working_set_t *data_set)
307 {
308  const char *reason = NULL;
309  op_digest_cache_t *digest_data = NULL;
310 
311  switch (check) {
312  case pe_check_active:
313  if (check_action_definition(rsc, node, rsc_op, data_set)
314  && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
315  data_set)) {
316 
317  reason = "action definition changed";
318  }
319  break;
320 
322  digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
323  switch (digest_data->rc) {
324  case RSC_DIGEST_UNKNOWN:
325  crm_trace("Resource %s history entry %s on %s has no digest to compare",
326  rsc->id, ID(rsc_op), node->details->id);
327  break;
328  case RSC_DIGEST_MATCH:
329  break;
330  default:
331  reason = "resource parameters have changed";
332  break;
333  }
334  break;
335  }
336 
337  if (reason) {
338  pe__clear_failcount(rsc, node, reason, data_set);
339  }
340 }
341 
342 static void
343 check_actions_for(xmlNode * rsc_entry, pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set)
344 {
345  GList *gIter = NULL;
346  int offset = -1;
347  int stop_index = 0;
348  int start_index = 0;
349 
350  const char *task = NULL;
351 
352  xmlNode *rsc_op = NULL;
353  GList *op_list = NULL;
354  GList *sorted_op_list = NULL;
355 
356  CRM_CHECK(node != NULL, return);
357 
358  if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
359  pe_resource_t *parent = uber_parent(rsc);
360  if(parent == NULL
361  || pe_rsc_is_clone(parent) == FALSE
362  || pcmk_is_set(parent->flags, pe_rsc_unique)) {
363  pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id);
364  DeleteRsc(rsc, node, FALSE, data_set);
365  } else {
366  pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id);
367  }
368  return;
369 
370  } else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
371  if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) {
372  DeleteRsc(rsc, node, FALSE, data_set);
373  }
374  pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s",
375  rsc->id, node->details->uname);
376  return;
377  }
378 
379  pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname);
380 
381  if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) {
382  DeleteRsc(rsc, node, FALSE, data_set);
383  }
384 
385  for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL;
386  rsc_op = pcmk__xe_next(rsc_op)) {
387 
388  if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) {
389  op_list = g_list_prepend(op_list, rsc_op);
390  }
391  }
392 
393  sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
394  calculate_active_ops(sorted_op_list, &start_index, &stop_index);
395 
396  for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
397  xmlNode *rsc_op = (xmlNode *) gIter->data;
398  guint interval_ms = 0;
399 
400  offset++;
401 
402  if (start_index < stop_index) {
403  /* stopped */
404  continue;
405  } else if (offset < start_index) {
406  /* action occurred prior to a start */
407  continue;
408  }
409 
410  task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
411  crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
412 
413  if ((interval_ms > 0) &&
415  // Maintenance mode cancels recurring operations
416  CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
417 
418  } else if ((interval_ms > 0) || pcmk__strcase_any_of(task, RSC_STATUS, RSC_START,
419  RSC_PROMOTE, RSC_MIGRATED, NULL)) {
420  /* If a resource operation failed, and the operation's definition
421  * has changed, clear any fail count so they can be retried fresh.
422  */
423 
424  if (pe__bundle_needs_remote_name(rsc, data_set)) {
425  /* We haven't allocated resources to nodes yet, so if the
426  * REMOTE_CONTAINER_HACK is used, we may calculate the digest
427  * based on the literal "#uname" value rather than the properly
428  * substituted value. That would mistakenly make the action
429  * definition appear to have been changed. Defer the check until
430  * later in this case.
431  */
432  pe__add_param_check(rsc_op, rsc, node, pe_check_active,
433  data_set);
434 
435  } else if (check_action_definition(rsc, node, rsc_op, data_set)
436  && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
437  data_set)) {
438  pe__clear_failcount(rsc, node, "action definition changed",
439  data_set);
440  }
441  }
442  }
443  g_list_free(sorted_op_list);
444 }
445 
446 static GList *
447 find_rsc_list(GList *result, pe_resource_t * rsc, const char *id, gboolean renamed_clones,
448  gboolean partial, pe_working_set_t * data_set)
449 {
450  GList *gIter = NULL;
451  gboolean match = FALSE;
452 
453  if (id == NULL) {
454  return NULL;
455  }
456 
457  if (rsc == NULL) {
458  if (data_set == NULL) {
459  return NULL;
460  }
461  for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
462  pe_resource_t *child = (pe_resource_t *) gIter->data;
463 
464  result = find_rsc_list(result, child, id, renamed_clones, partial,
465  NULL);
466  }
467  return result;
468  }
469 
470  if (partial) {
471  if (strstr(rsc->id, id)) {
472  match = TRUE;
473 
474  } else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) {
475  match = TRUE;
476  }
477 
478  } else {
479  if (strcmp(rsc->id, id) == 0) {
480  match = TRUE;
481 
482  } else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
483  match = TRUE;
484  }
485  }
486 
487  if (match) {
488  result = g_list_prepend(result, rsc);
489  }
490 
491  if (rsc->children) {
492  gIter = rsc->children;
493  for (; gIter != NULL; gIter = gIter->next) {
494  pe_resource_t *child = (pe_resource_t *) gIter->data;
495 
496  result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
497  }
498  }
499 
500  return result;
501 }
502 
503 static void
504 check_actions(pe_working_set_t * data_set)
505 {
506  const char *id = NULL;
507  pe_node_t *node = NULL;
508  xmlNode *lrm_rscs = NULL;
509  xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
510 
511  xmlNode *node_state = NULL;
512 
513  for (node_state = pcmk__xe_first_child(status); node_state != NULL;
514  node_state = pcmk__xe_next(node_state)) {
515 
516  if (pcmk__str_eq((const char *)node_state->name, XML_CIB_TAG_STATE,
517  pcmk__str_none)) {
518  id = crm_element_value(node_state, XML_ATTR_ID);
519  lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
520  lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE);
521 
522  node = pe_find_node_id(data_set->nodes, id);
523 
524  if (node == NULL) {
525  continue;
526 
527  /* Still need to check actions for a maintenance node to cancel existing monitor operations */
528  } else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
529  crm_trace("Skipping param check for %s: can't run resources",
530  node->details->uname);
531  continue;
532  }
533 
534  crm_trace("Processing node %s", node->details->uname);
535  if (node->details->online
536  || pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
537  xmlNode *rsc_entry = NULL;
538 
539  for (rsc_entry = pcmk__xe_first_child(lrm_rscs);
540  rsc_entry != NULL;
541  rsc_entry = pcmk__xe_next(rsc_entry)) {
542 
543  if (pcmk__str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, pcmk__str_none)) {
544 
545  if (xml_has_children(rsc_entry)) {
546  GList *gIter = NULL;
547  GList *result = NULL;
548  const char *rsc_id = ID(rsc_entry);
549 
550  CRM_CHECK(rsc_id != NULL, return);
551 
552  result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set);
553  for (gIter = result; gIter != NULL; gIter = gIter->next) {
554  pe_resource_t *rsc = (pe_resource_t *) gIter->data;
555 
556  if (rsc->variant != pe_native) {
557  continue;
558  }
559  check_actions_for(rsc_entry, rsc, node, data_set);
560  }
561  g_list_free(result);
562  }
563  }
564  }
565  }
566  }
567  }
568 }
569 
570 static gboolean
571 failcount_clear_action_exists(pe_node_t * node, pe_resource_t * rsc)
572 {
573  gboolean rc = FALSE;
574  GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
575 
576  if (list) {
577  rc = TRUE;
578  }
579  g_list_free(list);
580  return rc;
581 }
582 
583 static void
584 common_apply_stickiness(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set)
585 {
586  if (rsc->children) {
587  GList *gIter = rsc->children;
588 
589  for (; gIter != NULL; gIter = gIter->next) {
590  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
591 
592  common_apply_stickiness(child_rsc, node, data_set);
593  }
594  return;
595  }
596 
597  if (pcmk_is_set(rsc->flags, pe_rsc_managed)
598  && rsc->stickiness != 0 && pcmk__list_of_1(rsc->running_on)) {
599  pe_node_t *current = pe_find_node_id(rsc->running_on, node->details->id);
600  pe_node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
601 
602  if (current == NULL) {
603 
604  } else if ((match != NULL)
606  pe_resource_t *sticky_rsc = rsc;
607 
608  resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set);
609  pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location"
610  " (node=%s, weight=%d)", sticky_rsc->id,
611  node->details->uname, rsc->stickiness);
612  } else {
613  GHashTableIter iter;
614  pe_node_t *nIter = NULL;
615 
616  pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric"
617  " and node %s is not explicitly allowed", rsc->id, node->details->uname);
618  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
619  while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) {
620  crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight);
621  }
622  }
623  }
624 
625  /* Check the migration threshold only if a failcount clear action
626  * has not already been placed for this resource on the node.
627  * There is no sense in potentially forcing the resource from this
628  * node if the failcount is being reset anyway.
629  *
630  * @TODO A clear_failcount operation can be scheduled in stage4() via
631  * check_actions_for(), or in stage5() via check_params(). This runs in
632  * stage2(), so it cannot detect those, meaning we might check the migration
633  * threshold when we shouldn't -- worst case, we stop or move the resource,
634  * then move it back next transition.
635  */
636  if (failcount_clear_action_exists(node, rsc) == FALSE) {
637  pe_resource_t *failed = NULL;
638 
639  if (pcmk__threshold_reached(rsc, node, data_set, &failed)) {
640  resource_location(failed, node, -INFINITY, "__fail_limit__",
641  data_set);
642  }
643  }
644 }
645 
646 void
648 {
649  GList *gIter = rsc->children;
650 
652 
653  for (; gIter != NULL; gIter = gIter->next) {
654  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
655 
656  complex_set_cmds(child_rsc);
657  }
658 }
659 
660 void
662 {
663 
664  GList *gIter = data_set->resources;
665 
666  for (; gIter != NULL; gIter = gIter->next) {
667  pe_resource_t *rsc = (pe_resource_t *) gIter->data;
668 
669  complex_set_cmds(rsc);
670  }
671 }
672 
673 static void
674 calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data)
675 {
676  const char *key = (const char *)gKey;
677  const char *value = (const char *)gValue;
678  int *system_health = (int *)user_data;
679 
680  if (!gKey || !gValue || !user_data) {
681  return;
682  }
683 
684  if (pcmk__starts_with(key, "#health")) {
685  int score;
686 
687  /* Convert the value into an integer */
688  score = char2score(value);
689 
690  /* Add it to the running total */
691  *system_health = pe__add_scores(score, *system_health);
692  }
693 }
694 
695 static gboolean
696 apply_system_health(pe_working_set_t * data_set)
697 {
698  GList *gIter = NULL;
699  const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy");
700  int base_health = 0;
701 
702  if (pcmk__str_eq(health_strategy, "none", pcmk__str_null_matches | pcmk__str_casei)) {
703  /* Prevent any accidental health -> score translation */
704  pcmk__score_red = 0;
705  pcmk__score_yellow = 0;
706  pcmk__score_green = 0;
707  return TRUE;
708 
709  } else if (pcmk__str_eq(health_strategy, "migrate-on-red", pcmk__str_casei)) {
710 
711  /* Resources on nodes which have health values of red are
712  * weighted away from that node.
713  */
715  pcmk__score_yellow = 0;
716  pcmk__score_green = 0;
717 
718  } else if (pcmk__str_eq(health_strategy, "only-green", pcmk__str_casei)) {
719 
720  /* Resources on nodes which have health values of red or yellow
721  * are forced away from that node.
722  */
725  pcmk__score_green = 0;
726 
727  } else if (pcmk__str_eq(health_strategy, "progressive", pcmk__str_casei)) {
728  /* Same as the above, but use the r/y/g scores provided by the user
729  * Defaults are provided by the pe_prefs table
730  * Also, custom health "base score" can be used
731  */
732  base_health = char2score(pe_pref(data_set->config_hash,
733  "node-health-base"));
734 
735  } else if (pcmk__str_eq(health_strategy, "custom", pcmk__str_casei)) {
736 
737  /* Requires the admin to configure the rsc_location constaints for
738  * processing the stored health scores
739  */
740  /* TODO: Check for the existence of appropriate node health constraints */
741  return TRUE;
742 
743  } else {
744  crm_err("Unknown node health strategy: %s", health_strategy);
745  return FALSE;
746  }
747 
748  crm_info("Applying automated node health strategy: %s", health_strategy);
749 
750  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
751  int system_health = base_health;
752  pe_node_t *node = (pe_node_t *) gIter->data;
753 
754  /* Search through the node hash table for system health entries. */
755  g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health);
756 
757  crm_info(" Node %s has an combined system health of %d",
758  node->details->uname, system_health);
759 
760  /* If the health is non-zero, then create a new location constraint so
761  * that the weight will be added later on.
762  */
763  if (system_health != 0) {
764 
765  GList *gIter2 = data_set->resources;
766 
767  for (; gIter2 != NULL; gIter2 = gIter2->next) {
768  pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
769 
770  pcmk__new_location(health_strategy, rsc, system_health, NULL,
771  node, data_set);
772  }
773  }
774  }
775 
776  return TRUE;
777 }
778 
779 gboolean
781 {
782  if (data_set->input == NULL) {
783  return FALSE;
784  }
785 
786  if (!pcmk_is_set(data_set->flags, pe_flag_have_status)) {
787  crm_trace("Calculating status");
788  cluster_status(data_set);
789  }
790 
791  set_alloc_actions(data_set);
792  apply_system_health(data_set);
793  pcmk__unpack_constraints(data_set);
794 
795  return TRUE;
796 }
797 
798 /*
799  * Check nodes for resources started outside of the LRM
800  */
801 gboolean
803 {
804  pe_action_t *probe_node_complete = NULL;
805 
806  for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
807  pe_node_t *node = (pe_node_t *) gIter->data;
808  const char *probed = pe_node_attribute_raw(node, CRM_OP_PROBED);
809 
810  if (node->details->online == FALSE) {
811 
812  if (pcmk__is_failed_remote_node(node)) {
813  pe_fence_node(data_set, node, "the connection is unrecoverable", FALSE);
814  }
815  continue;
816 
817  } else if (node->details->unclean) {
818  continue;
819 
820  } else if (node->details->rsc_discovery_enabled == FALSE) {
821  /* resource discovery is disabled for this node */
822  continue;
823  }
824 
825  if (probed != NULL && crm_is_true(probed) == FALSE) {
826  pe_action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname),
827  CRM_OP_REPROBE, node, FALSE, TRUE, data_set);
828 
830  continue;
831  }
832 
833  for (GList *gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) {
834  pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
835 
836  rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set);
837  }
838  }
839  return TRUE;
840 }
841 
842 static void
843 rsc_discover_filter(pe_resource_t *rsc, pe_node_t *node)
844 {
845  pe_resource_t *top = uber_parent(rsc);
846  pe_node_t *match;
847 
848  if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
849  return;
850  }
851 
852  g_list_foreach(rsc->children, (GFunc) rsc_discover_filter, node);
853 
854  match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
855  if (match && match->rsc_discover_mode != pe_discover_exclusive) {
856  match->weight = -INFINITY;
857  }
858 }
859 
860 static time_t
861 shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
862 {
863  const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
864  time_t result = 0;
865 
866  if (shutdown) {
867  long long result_ll;
868 
869  if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
870  result = (time_t) result_ll;
871  }
872  }
873  return result? result : get_effective_time(data_set);
874 }
875 
876 static void
877 apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set)
878 {
879  const char *class;
880 
881  // Only primitives and (uncloned) groups may be locked
882  if (rsc->variant == pe_group) {
883  g_list_foreach(rsc->children, (GFunc) apply_shutdown_lock, data_set);
884  } else if (rsc->variant != pe_native) {
885  return;
886  }
887 
888  // Fence devices and remote connections can't be locked
889  class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
890  if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
891  || pe__resource_is_remote_conn(rsc, data_set)) {
892  return;
893  }
894 
895  if (rsc->lock_node != NULL) {
896  // The lock was obtained from resource history
897 
898  if (rsc->running_on != NULL) {
899  /* The resource was started elsewhere even though it is now
900  * considered locked. This shouldn't be possible, but as a
901  * failsafe, we don't want to disturb the resource now.
902  */
903  pe_rsc_info(rsc,
904  "Cancelling shutdown lock because %s is already active",
905  rsc->id);
906  pe__clear_resource_history(rsc, rsc->lock_node, data_set);
907  rsc->lock_node = NULL;
908  rsc->lock_time = 0;
909  }
910 
911  // Only a resource active on exactly one node can be locked
912  } else if (pcmk__list_of_1(rsc->running_on)) {
913  pe_node_t *node = rsc->running_on->data;
914 
915  if (node->details->shutdown) {
916  if (node->details->unclean) {
917  pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
918  rsc->id, node->details->uname);
919  } else {
920  rsc->lock_node = node;
921  rsc->lock_time = shutdown_time(node, data_set);
922  }
923  }
924  }
925 
926  if (rsc->lock_node == NULL) {
927  // No lock needed
928  return;
929  }
930 
931  if (data_set->shutdown_lock > 0) {
932  time_t lock_expiration = rsc->lock_time + data_set->shutdown_lock;
933 
934  pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
935  rsc->id, rsc->lock_node->details->uname,
936  (long long) lock_expiration);
937  pe__update_recheck_time(++lock_expiration, data_set);
938  } else {
939  pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
940  rsc->id, rsc->lock_node->details->uname);
941  }
942 
943  // If resource is locked to one node, ban it from all other nodes
944  for (GList *item = data_set->nodes; item != NULL; item = item->next) {
945  pe_node_t *node = item->data;
946 
947  if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
950  }
951  }
952 }
953 
954 /*
955  * \internal
956  * \brief Stage 2 of cluster status: apply node-specific criteria
957  *
958  * Count known nodes, and apply location constraints, stickiness, and exclusive
959  * resource discovery.
960  */
961 gboolean
963 {
964  GList *gIter = NULL;
965 
966  if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
967  g_list_foreach(data_set->resources, (GFunc) apply_shutdown_lock, data_set);
968  }
969 
970  if (!pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
971  // @COMPAT API backward compatibility
972  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
973  pe_node_t *node = (pe_node_t *) gIter->data;
974 
975  if (node && (node->weight >= 0) && node->details->online
976  && (node->details->type != node_ping)) {
977  data_set->max_valid_nodes++;
978  }
979  }
980  }
981 
982  pcmk__apply_locations(data_set);
983 
984  gIter = data_set->nodes;
985  for (; gIter != NULL; gIter = gIter->next) {
986  GList *gIter2 = NULL;
987  pe_node_t *node = (pe_node_t *) gIter->data;
988 
989  gIter2 = data_set->resources;
990  for (; gIter2 != NULL; gIter2 = gIter2->next) {
991  pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
992 
993  common_apply_stickiness(rsc, node, data_set);
994  rsc_discover_filter(rsc, node);
995  }
996  }
997 
998  return TRUE;
999 }
1000 
1001 /*
1002  * Check for orphaned or redefined actions
1003  */
1004 gboolean
1006 {
1007  check_actions(data_set);
1008  return TRUE;
1009 }
1010 
1011 static void *
1012 convert_const_pointer(const void *ptr)
1013 {
1014  /* Worst function ever */
1015  return (void *)ptr;
1016 }
1017 
1018 static gint
1019 sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data)
1020 {
1021  int rc = 0;
1022  int r1_weight = -INFINITY;
1023  int r2_weight = -INFINITY;
1024 
1025  const char *reason = "existence";
1026 
1027  GList *nodes = (GList *) data;
1028  const pe_resource_t *resource1 = a;
1029  const pe_resource_t *resource2 = b;
1030 
1031  pe_node_t *r1_node = NULL;
1032  pe_node_t *r2_node = NULL;
1033  GList *gIter = NULL;
1034  GHashTable *r1_nodes = NULL;
1035  GHashTable *r2_nodes = NULL;
1036 
1037  reason = "priority";
1038  r1_weight = resource1->priority;
1039  r2_weight = resource2->priority;
1040 
1041  if (r1_weight > r2_weight) {
1042  rc = -1;
1043  goto done;
1044  }
1045 
1046  if (r1_weight < r2_weight) {
1047  rc = 1;
1048  goto done;
1049  }
1050 
1051  reason = "no node list";
1052  if (nodes == NULL) {
1053  goto done;
1054  }
1055 
1056  r1_nodes = pcmk__native_merge_weights(convert_const_pointer(resource1),
1057  resource1->id, NULL, NULL, 1,
1059  pe__show_node_weights(true, NULL, resource1->id, r1_nodes,
1060  resource1->cluster);
1061 
1062  r2_nodes = pcmk__native_merge_weights(convert_const_pointer(resource2),
1063  resource2->id, NULL, NULL, 1,
1065  pe__show_node_weights(true, NULL, resource2->id, r2_nodes,
1066  resource2->cluster);
1067 
1068  /* Current location score */
1069  reason = "current location";
1070  r1_weight = -INFINITY;
1071  r2_weight = -INFINITY;
1072 
1073  if (resource1->running_on) {
1074  r1_node = pe__current_node(resource1);
1075  r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id);
1076  if (r1_node != NULL) {
1077  r1_weight = r1_node->weight;
1078  }
1079  }
1080  if (resource2->running_on) {
1081  r2_node = pe__current_node(resource2);
1082  r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id);
1083  if (r2_node != NULL) {
1084  r2_weight = r2_node->weight;
1085  }
1086  }
1087 
1088  if (r1_weight > r2_weight) {
1089  rc = -1;
1090  goto done;
1091  }
1092 
1093  if (r1_weight < r2_weight) {
1094  rc = 1;
1095  goto done;
1096  }
1097 
1098  reason = "score";
1099  for (gIter = nodes; gIter != NULL; gIter = gIter->next) {
1100  pe_node_t *node = (pe_node_t *) gIter->data;
1101 
1102  r1_node = NULL;
1103  r2_node = NULL;
1104 
1105  r1_weight = -INFINITY;
1106  if (r1_nodes) {
1107  r1_node = g_hash_table_lookup(r1_nodes, node->details->id);
1108  }
1109  if (r1_node) {
1110  r1_weight = r1_node->weight;
1111  }
1112 
1113  r2_weight = -INFINITY;
1114  if (r2_nodes) {
1115  r2_node = g_hash_table_lookup(r2_nodes, node->details->id);
1116  }
1117  if (r2_node) {
1118  r2_weight = r2_node->weight;
1119  }
1120 
1121  if (r1_weight > r2_weight) {
1122  rc = -1;
1123  goto done;
1124  }
1125 
1126  if (r1_weight < r2_weight) {
1127  rc = 1;
1128  goto done;
1129  }
1130  }
1131 
1132  done:
1133  crm_trace("%s (%d) on %s %c %s (%d) on %s: %s",
1134  resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a",
1135  rc < 0 ? '>' : rc > 0 ? '<' : '=',
1136  resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason);
1137 
1138  if (r1_nodes) {
1139  g_hash_table_destroy(r1_nodes);
1140  }
1141  if (r2_nodes) {
1142  g_hash_table_destroy(r2_nodes);
1143  }
1144 
1145  return rc;
1146 }
1147 
1148 static void
1149 allocate_resources(pe_working_set_t * data_set)
1150 {
1151  GList *gIter = NULL;
1152 
1153  if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
1154  /* Allocate remote connection resources first (which will also allocate
1155  * any colocation dependencies). If the connection is migrating, always
1156  * prefer the partial migration target.
1157  */
1158  for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
1159  pe_resource_t *rsc = (pe_resource_t *) gIter->data;
1160  if (rsc->is_remote_node == FALSE) {
1161  continue;
1162  }
1163  pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
1164  rsc->id);
1165  rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
1166  }
1167  }
1168 
1169  /* now do the rest of the resources */
1170  for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
1171  pe_resource_t *rsc = (pe_resource_t *) gIter->data;
1172  if (rsc->is_remote_node == TRUE) {
1173  continue;
1174  }
1175  pe_rsc_trace(rsc, "Allocating %s resource '%s'",
1176  crm_element_name(rsc->xml), rsc->id);
1177  rsc->cmds->allocate(rsc, NULL, data_set);
1178  }
1179 }
1180 
1181 // Clear fail counts for orphaned rsc on all online nodes
1182 static void
1183 cleanup_orphans(pe_resource_t * rsc, pe_working_set_t * data_set)
1184 {
1185  GList *gIter = NULL;
1186 
1187  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
1188  pe_node_t *node = (pe_node_t *) gIter->data;
1189 
1190  if (node->details->online
1191  && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
1192  data_set)) {
1193 
1194  pe_action_t *clear_op = NULL;
1195 
1196  clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
1197  data_set);
1198 
1199  /* We can't use order_action_then_stop() here because its
1200  * pe_order_preserve breaks things
1201  */
1202  pcmk__new_ordering(clear_op->rsc, NULL, clear_op,
1203  rsc, stop_key(rsc), NULL,
1204  pe_order_optional, data_set);
1205  }
1206  }
1207 }
1208 
1209 gboolean
1211 {
1212  pcmk__output_t *out = data_set->priv;
1213  GList *gIter = NULL;
1214 
1215  if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
1216  GList *nodes = g_list_copy(data_set->nodes);
1217 
1218  nodes = sort_nodes_by_weight(nodes, NULL, data_set);
1219  data_set->resources =
1220  g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes);
1221 
1222  g_list_free(nodes);
1223  }
1224 
1225  gIter = data_set->nodes;
1226  for (; gIter != NULL; gIter = gIter->next) {
1227  pe_node_t *node = (pe_node_t *) gIter->data;
1228 
1229  if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
1230  out->message(out, "node-capacity", node, "Original");
1231  }
1232  }
1233 
1234  crm_trace("Allocating services");
1235  /* Take (next) highest resource, assign it and create its actions */
1236 
1237  allocate_resources(data_set);
1238 
1239  gIter = data_set->nodes;
1240  for (; gIter != NULL; gIter = gIter->next) {
1241  pe_node_t *node = (pe_node_t *) gIter->data;
1242 
1243  if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
1244  out->message(out, "node-capacity", node, "Remaining");
1245  }
1246  }
1247 
1248  // Process deferred action checks
1249  pe__foreach_param_check(data_set, check_params);
1250  pe__free_param_checks(data_set);
1251 
1252  if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
1253  crm_trace("Calculating needed probes");
1254  /* This code probably needs optimization
1255  * ptest -x with 100 nodes, 100 clones and clone-max=100:
1256 
1257  With probes:
1258 
1259  ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
1260  ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
1261  ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
1262  ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions
1263  ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
1264  ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services
1265  ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes
1266  ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions
1267  ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done
1268  ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
1269  ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
1270  36s
1271  ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
1272 
1273  Without probes:
1274 
1275  ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
1276  ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
1277  ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
1278  ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions
1279  ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
1280  ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services
1281  ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions
1282  ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done
1283  ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
1284  ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
1285  ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
1286  */
1287 
1288  probe_resources(data_set);
1289  }
1290 
1291  crm_trace("Handle orphans");
1292  if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
1293  for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
1294  pe_resource_t *rsc = (pe_resource_t *) gIter->data;
1295 
1296  /* There's no need to recurse into rsc->children because those
1297  * should just be unallocated clone instances.
1298  */
1299  if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
1300  cleanup_orphans(rsc, data_set);
1301  }
1302  }
1303  }
1304 
1305  crm_trace("Creating actions");
1306 
1307  for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
1308  pe_resource_t *rsc = (pe_resource_t *) gIter->data;
1309 
1310  rsc->cmds->create_actions(rsc, data_set);
1311  }
1312 
1313  crm_trace("Creating done");
1314  return TRUE;
1315 }
1316 
1317 static gboolean
1318 is_managed(const pe_resource_t * rsc)
1319 {
1320  GList *gIter = rsc->children;
1321 
1322  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1323  return TRUE;
1324  }
1325 
1326  for (; gIter != NULL; gIter = gIter->next) {
1327  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
1328 
1329  if (is_managed(child_rsc)) {
1330  return TRUE;
1331  }
1332  }
1333 
1334  return FALSE;
1335 }
1336 
1337 static gboolean
1338 any_managed_resources(pe_working_set_t * data_set)
1339 {
1340 
1341  GList *gIter = data_set->resources;
1342 
1343  for (; gIter != NULL; gIter = gIter->next) {
1344  pe_resource_t *rsc = (pe_resource_t *) gIter->data;
1345 
1346  if (is_managed(rsc)) {
1347  return TRUE;
1348  }
1349  }
1350  return FALSE;
1351 }
1352 
1353 /*
1354  * Create dependencies for stonith and shutdown operations
1355  */
1356 gboolean
1358 {
1359  pe_action_t *dc_down = NULL;
1360  pe_action_t *stonith_op = NULL;
1361  gboolean integrity_lost = FALSE;
1362  gboolean need_stonith = TRUE;
1363  GList *gIter;
1364  GList *stonith_ops = NULL;
1365  GList *shutdown_ops = NULL;
1366 
1367  /* Remote ordering constraints need to happen prior to calculating fencing
1368  * because it is one more place we can mark nodes as needing fencing.
1369  */
1371 
1372  crm_trace("Processing fencing and shutdown cases");
1373  if (any_managed_resources(data_set) == FALSE) {
1374  crm_notice("Delaying fencing operations until there are resources to manage");
1375  need_stonith = FALSE;
1376  }
1377 
1378  /* Check each node for stonith/shutdown */
1379  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
1380  pe_node_t *node = (pe_node_t *) gIter->data;
1381 
1382  /* Guest nodes are "fenced" by recovering their container resource,
1383  * so handle them separately.
1384  */
1385  if (pe__is_guest_node(node)) {
1386  if (node->details->remote_requires_reset && need_stonith
1387  && pe_can_fence(data_set, node)) {
1388  pcmk__fence_guest(node, data_set);
1389  }
1390  continue;
1391  }
1392 
1393  stonith_op = NULL;
1394 
1395  if (node->details->unclean
1396  && need_stonith && pe_can_fence(data_set, node)) {
1397 
1398  stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", FALSE, data_set);
1399  pe_warn("Scheduling Node %s for STONITH", node->details->uname);
1400 
1401  pcmk__order_vs_fence(stonith_op, data_set);
1402 
1403  if (node->details->is_dc) {
1404  // Remember if the DC is being fenced
1405  dc_down = stonith_op;
1406 
1407  } else {
1408 
1410  && (stonith_ops != NULL)) {
1411  /* Concurrent fencing is disabled, so order each non-DC
1412  * fencing in a chain. If there is any DC fencing or
1413  * shutdown, it will be ordered after the last action in the
1414  * chain later.
1415  */
1416  order_actions((pe_action_t *) stonith_ops->data,
1417  stonith_op, pe_order_optional);
1418  }
1419 
1420  // Remember all non-DC fencing actions in a separate list
1421  stonith_ops = g_list_prepend(stonith_ops, stonith_op);
1422  }
1423 
1424  } else if (node->details->online && node->details->shutdown &&
1425  /* TODO define what a shutdown op means for a remote node.
1426  * For now we do not send shutdown operations for remote nodes, but
1427  * if we can come up with a good use for this in the future, we will. */
1428  pe__is_guest_or_remote_node(node) == FALSE) {
1429 
1430  pe_action_t *down_op = sched_shutdown_op(node, data_set);
1431 
1432  if (node->details->is_dc) {
1433  // Remember if the DC is being shut down
1434  dc_down = down_op;
1435  } else {
1436  // Remember non-DC shutdowns for later ordering
1437  shutdown_ops = g_list_prepend(shutdown_ops, down_op);
1438  }
1439  }
1440 
1441  if (node->details->unclean && stonith_op == NULL) {
1442  integrity_lost = TRUE;
1443  pe_warn("Node %s is unclean!", node->details->uname);
1444  }
1445  }
1446 
1447  if (integrity_lost) {
1448  if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
1449  pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
1450  pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
1451 
1452  } else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
1453  crm_notice("Cannot fence unclean nodes until quorum is"
1454  " attained (or no-quorum-policy is set to ignore)");
1455  }
1456  }
1457 
1458  if (dc_down != NULL) {
1459  /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
1460  * DC elections. However, we don't want to order non-DC shutdowns before
1461  * a DC *fencing*, because even though we don't want a node that's
1462  * shutting down to become DC, the DC fencing could be ordered before a
1463  * clone stop that's also ordered before the shutdowns, thus leading to
1464  * a graph loop.
1465  */
1466  if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
1467  for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) {
1468  pe_action_t *node_stop = (pe_action_t *) gIter->data;
1469 
1470  crm_debug("Ordering shutdown on %s before %s on DC %s",
1471  node_stop->node->details->uname,
1472  dc_down->task, dc_down->node->details->uname);
1473 
1474  order_actions(node_stop, dc_down, pe_order_optional);
1475  }
1476  }
1477 
1478  // Order any non-DC fencing before any DC fencing or shutdown
1479 
1480  if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
1481  /* With concurrent fencing, order each non-DC fencing action
1482  * separately before any DC fencing or shutdown.
1483  */
1484  for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) {
1485  order_actions((pe_action_t *) gIter->data, dc_down,
1487  }
1488  } else if (stonith_ops) {
1489  /* Without concurrent fencing, the non-DC fencing actions are
1490  * already ordered relative to each other, so we just need to order
1491  * the DC fencing after the last action in the chain (which is the
1492  * first item in the list).
1493  */
1494  order_actions((pe_action_t *) stonith_ops->data, dc_down,
1496  }
1497  }
1498  g_list_free(stonith_ops);
1499  g_list_free(shutdown_ops);
1500  return TRUE;
1501 }
1502 
1503 static gboolean
1504 order_first_probe_unneeded(pe_action_t * probe, pe_action_t * rh_action)
1505 {
1506  /* No need to probe the resource on the node that is being
1507  * unfenced. Otherwise it might introduce transition loop
1508  * since probe will be performed after the node is
1509  * unfenced.
1510  */
1511  if (pcmk__str_eq(rh_action->task, CRM_OP_FENCE, pcmk__str_casei)
1512  && probe->node && rh_action->node
1513  && probe->node->details == rh_action->node->details) {
1514  const char *op = g_hash_table_lookup(rh_action->meta, "stonith_action");
1515 
1516  if (pcmk__str_eq(op, "on", pcmk__str_casei)) {
1517  return TRUE;
1518  }
1519  }
1520 
1521  // Shutdown waits for probe to complete only if it's on the same node
1522  if ((pcmk__str_eq(rh_action->task, CRM_OP_SHUTDOWN, pcmk__str_casei))
1523  && probe->node && rh_action->node
1524  && probe->node->details != rh_action->node->details) {
1525  return TRUE;
1526  }
1527  return FALSE;
1528 }
1529 
1530 static void
1531 order_first_probes_imply_stops(pe_working_set_t * data_set)
1532 {
1533  GList *gIter = NULL;
1534 
1535  for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
1536  pe__ordering_t *order = gIter->data;
1537  enum pe_ordering order_type = pe_order_optional;
1538 
1539  pe_resource_t *lh_rsc = order->lh_rsc;
1540  pe_resource_t *rh_rsc = order->rh_rsc;
1541  pe_action_t *lh_action = order->lh_action;
1542  pe_action_t *rh_action = order->rh_action;
1543  const char *lh_action_task = order->lh_action_task;
1544  const char *rh_action_task = order->rh_action_task;
1545 
1546  GList *probes = NULL;
1547  GList *rh_actions = NULL;
1548 
1549  GList *pIter = NULL;
1550 
1551  if (lh_rsc == NULL) {
1552  continue;
1553 
1554  } else if (rh_rsc && lh_rsc == rh_rsc) {
1555  continue;
1556  }
1557 
1558  if (lh_action == NULL && lh_action_task == NULL) {
1559  continue;
1560  }
1561 
1562  if (rh_action == NULL && rh_action_task == NULL) {
1563  continue;
1564  }
1565 
1566  /* Technically probe is expected to return "not running", which could be
1567  * the alternative of stop action if the status of the resource is
1568  * unknown yet.
1569  */
1570  if (lh_action && !pcmk__str_eq(lh_action->task, RSC_STOP, pcmk__str_casei)) {
1571  continue;
1572 
1573  } else if (lh_action == NULL
1574  && lh_action_task
1575  && !pcmk__ends_with(lh_action_task, "_" RSC_STOP "_0")) {
1576  continue;
1577  }
1578 
1579  /* Do not probe the resource inside of a stopping container. Otherwise
1580  * it might introduce transition loop since probe will be performed
1581  * after the container starts again.
1582  */
1583  if (rh_rsc && lh_rsc->container == rh_rsc) {
1584  if (rh_action && pcmk__str_eq(rh_action->task, RSC_STOP, pcmk__str_casei)) {
1585  continue;
1586 
1587  } else if (rh_action == NULL && rh_action_task
1588  && pcmk__ends_with(rh_action_task,"_" RSC_STOP "_0")) {
1589  continue;
1590  }
1591  }
1592 
1593  if (order->type == pe_order_none) {
1594  continue;
1595  }
1596 
1597  // Preserve the order options for future filtering
1599  pe__set_order_flags(order_type,
1601  }
1602 
1603  if (pcmk_is_set(order->type, pe_order_same_node)) {
1605  }
1606 
1607  // Keep the order types for future filtering
1608  if (order->type == pe_order_anti_colocation
1609  || order->type == pe_order_load) {
1610  order_type = order->type;
1611  }
1612 
1613  probes = pe__resource_actions(lh_rsc, NULL, RSC_STATUS, FALSE);
1614  if (probes == NULL) {
1615  continue;
1616  }
1617 
1618  if (rh_action) {
1619  rh_actions = g_list_prepend(rh_actions, rh_action);
1620 
1621  } else if (rh_rsc && rh_action_task) {
1622  rh_actions = find_actions(rh_rsc->actions, rh_action_task, NULL);
1623  }
1624 
1625  if (rh_actions == NULL) {
1626  g_list_free(probes);
1627  continue;
1628  }
1629 
1630  crm_trace("Processing for LH probe based on ordering constraint %s -> %s"
1631  " (id=%d, type=%.6x)",
1632  lh_action ? lh_action->uuid : lh_action_task,
1633  rh_action ? rh_action->uuid : rh_action_task,
1634  order->id, order->type);
1635 
1636  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
1637  pe_action_t *probe = (pe_action_t *) pIter->data;
1638  GList *rIter = NULL;
1639 
1640  for (rIter = rh_actions; rIter != NULL; rIter = rIter->next) {
1641  pe_action_t *rh_action_iter = (pe_action_t *) rIter->data;
1642 
1643  if (order_first_probe_unneeded(probe, rh_action_iter)) {
1644  continue;
1645  }
1646  order_actions(probe, rh_action_iter, order_type);
1647  }
1648  }
1649 
1650  g_list_free(rh_actions);
1651  g_list_free(probes);
1652  }
1653 }
1654 
1655 static void
1656 order_first_probe_then_restart_repromote(pe_action_t * probe,
1657  pe_action_t * after,
1658  pe_working_set_t * data_set)
1659 {
1660  GList *gIter = NULL;
1661  bool interleave = FALSE;
1662  pe_resource_t *compatible_rsc = NULL;
1663 
1664  if (probe == NULL
1665  || probe->rsc == NULL
1666  || probe->rsc->variant != pe_native) {
1667  return;
1668  }
1669 
1670  if (after == NULL
1671  // Avoid running into any possible loop
1672  || pcmk_is_set(after->flags, pe_action_tracking)) {
1673  return;
1674  }
1675 
1676  if (!pcmk__str_eq(probe->task, RSC_STATUS, pcmk__str_casei)) {
1677  return;
1678  }
1679 
1681 
1682  crm_trace("Processing based on %s %s -> %s %s",
1683  probe->uuid,
1684  probe->node ? probe->node->details->uname: "",
1685  after->uuid,
1686  after->node ? after->node->details->uname : "");
1687 
1688  if (after->rsc
1689  /* Better not build a dependency directly with a clone/group.
1690  * We are going to proceed through the ordering chain and build
1691  * dependencies with its children.
1692  */
1693  && after->rsc->variant == pe_native
1694  && probe->rsc != after->rsc) {
1695 
1696  GList *then_actions = NULL;
1697  enum pe_ordering probe_order_type = pe_order_optional;
1698 
1699  if (pcmk__str_eq(after->task, RSC_START, pcmk__str_casei)) {
1700  then_actions = pe__resource_actions(after->rsc, NULL, RSC_STOP, FALSE);
1701 
1702  } else if (pcmk__str_eq(after->task, RSC_PROMOTE, pcmk__str_casei)) {
1703  then_actions = pe__resource_actions(after->rsc, NULL, RSC_DEMOTE, FALSE);
1704  }
1705 
1706  for (gIter = then_actions; gIter != NULL; gIter = gIter->next) {
1707  pe_action_t *then = (pe_action_t *) gIter->data;
1708 
1709  // Skip any pseudo action which for example is implied by fencing
1710  if (pcmk_is_set(then->flags, pe_action_pseudo)) {
1711  continue;
1712  }
1713 
1714  order_actions(probe, then, probe_order_type);
1715  }
1716  g_list_free(then_actions);
1717  }
1718 
1719  if (after->rsc
1720  && after->rsc->variant > pe_group) {
1721  const char *interleave_s = g_hash_table_lookup(after->rsc->meta,
1723 
1724  interleave = crm_is_true(interleave_s);
1725 
1726  if (interleave) {
1727  /* For an interleaved clone, we should build a dependency only
1728  * with the relevant clone child.
1729  */
1730  compatible_rsc = find_compatible_child(probe->rsc,
1731  after->rsc,
1733  FALSE, data_set);
1734  }
1735  }
1736 
1737  for (gIter = after->actions_after; gIter != NULL; gIter = gIter->next) {
1738  pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) gIter->data;
1739  /* pe_order_implies_then is the reason why a required A.start
1740  * implies/enforces B.start to be required too, which is the cause of
1741  * B.restart/re-promote.
1742  *
1743  * Not sure about pe_order_implies_then_on_node though. It's now only
1744  * used for unfencing case, which tends to introduce transition
1745  * loops...
1746  */
1747 
1748  if (!pcmk_is_set(after_wrapper->type, pe_order_implies_then)) {
1749  /* The order type between a group/clone and its child such as
1750  * B.start-> B_child.start is:
1751  * pe_order_implies_first_printed | pe_order_runnable_left
1752  *
1753  * Proceed through the ordering chain and build dependencies with
1754  * its children.
1755  */
1756  if (after->rsc == NULL
1757  || after->rsc->variant < pe_group
1758  || probe->rsc->parent == after->rsc
1759  || after_wrapper->action->rsc == NULL
1760  || after_wrapper->action->rsc->variant > pe_group
1761  || after->rsc != after_wrapper->action->rsc->parent) {
1762  continue;
1763  }
1764 
1765  /* Proceed to the children of a group or a non-interleaved clone.
1766  * For an interleaved clone, proceed only to the relevant child.
1767  */
1768  if (after->rsc->variant > pe_group
1769  && interleave == TRUE
1770  && (compatible_rsc == NULL
1771  || compatible_rsc != after_wrapper->action->rsc)) {
1772  continue;
1773  }
1774  }
1775 
1776  crm_trace("Proceeding through %s %s -> %s %s (type=0x%.6x)",
1777  after->uuid,
1778  after->node ? after->node->details->uname: "",
1779  after_wrapper->action->uuid,
1780  after_wrapper->action->node ? after_wrapper->action->node->details->uname : "",
1781  after_wrapper->type);
1782 
1783  order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
1784  }
1785 }
1786 
1787 static void clear_actions_tracking_flag(pe_working_set_t * data_set)
1788 {
1789  GList *gIter = NULL;
1790 
1791  for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
1792  pe_action_t *action = (pe_action_t *) gIter->data;
1793 
1794  if (pcmk_is_set(action->flags, pe_action_tracking)) {
1796  }
1797  }
1798 }
1799 
1800 static void
1801 order_first_rsc_probes(pe_resource_t * rsc, pe_working_set_t * data_set)
1802 {
1803  GList *gIter = NULL;
1804  GList *probes = NULL;
1805 
1806  g_list_foreach(rsc->children, (GFunc) order_first_rsc_probes, data_set);
1807 
1808  if (rsc->variant != pe_native) {
1809  return;
1810  }
1811 
1812  probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
1813 
1814  for (gIter = probes; gIter != NULL; gIter= gIter->next) {
1815  pe_action_t *probe = (pe_action_t *) gIter->data;
1816  GList *aIter = NULL;
1817 
1818  for (aIter = probe->actions_after; aIter != NULL; aIter = aIter->next) {
1819  pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) aIter->data;
1820 
1821  order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
1822  clear_actions_tracking_flag(data_set);
1823  }
1824  }
1825 
1826  g_list_free(probes);
1827 }
1828 
1829 static void
1830 order_first_probes(pe_working_set_t * data_set)
1831 {
1832  GList *gIter = NULL;
1833 
1834  for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
1835  pe_resource_t *rsc = (pe_resource_t *) gIter->data;
1836 
1837  order_first_rsc_probes(rsc, data_set);
1838  }
1839 
1840  order_first_probes_imply_stops(data_set);
1841 }
1842 
1843 static void
1844 order_then_probes(pe_working_set_t * data_set)
1845 {
1846 #if 0
1847  GList *gIter = NULL;
1848 
1849  for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
1850  pe_resource_t *rsc = (pe_resource_t *) gIter->data;
1851 
1852  /* Given "A then B", we would prefer to wait for A to be
1853  * started before probing B.
1854  *
1855  * If A was a filesystem on which the binaries and data for B
1856  * lived, it would have been useful if the author of B's agent
1857  * could assume that A is running before B.monitor will be
1858  * called.
1859  *
1860  * However we can't _only_ probe once A is running, otherwise
1861  * we'd not detect the state of B if A could not be started
1862  * for some reason.
1863  *
1864  * In practice however, we cannot even do an opportunistic
1865  * version of this because B may be moving:
1866  *
1867  * B.probe -> B.start
1868  * B.probe -> B.stop
1869  * B.stop -> B.start
1870  * A.stop -> A.start
1871  * A.start -> B.probe
1872  *
1873  * So far so good, but if we add the result of this code:
1874  *
1875  * B.stop -> A.stop
1876  *
1877  * Then we get a loop:
1878  *
1879  * B.probe -> B.stop -> A.stop -> A.start -> B.probe
1880  *
1881  * We could kill the 'B.probe -> B.stop' dependency, but that
1882  * could mean stopping B "too" soon, because B.start must wait
1883  * for the probes to complete.
1884  *
1885  * Another option is to allow it only if A is a non-unique
1886  * clone with clone-max == node-max (since we'll never be
1887  * moving it). However, we could still be stopping one
1888  * instance at the same time as starting another.
1889 
1890  * The complexity of checking for allowed conditions combined
1891  * with the ever narrowing usecase suggests that this code
1892  * should remain disabled until someone gets smarter.
1893  */
1894  pe_action_t *start = NULL;
1895  GList *actions = NULL;
1896  GList *probes = NULL;
1897 
1898  actions = pe__resource_actions(rsc, NULL, RSC_START, FALSE);
1899 
1900  if (actions) {
1901  start = actions->data;
1902  g_list_free(actions);
1903  }
1904 
1905  if(start == NULL) {
1906  crm_err("No start action for %s", rsc->id);
1907  continue;
1908  }
1909 
1910  probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
1911 
1912  for (actions = start->actions_before; actions != NULL; actions = actions->next) {
1913  pe_action_wrapper_t *before = (pe_action_wrapper_t *) actions->data;
1914 
1915  GList *pIter = NULL;
1916  pe_action_t *first = before->action;
1917  pe_resource_t *first_rsc = first->rsc;
1918 
1919  if(first->required_runnable_before) {
1920  GList *clone_actions = NULL;
1921  for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) {
1922  before = (pe_action_wrapper_t *) clone_actions->data;
1923 
1924  crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid);
1925 
1926  CRM_ASSERT(before->action->rsc);
1927  first_rsc = before->action->rsc;
1928  break;
1929  }
1930 
1931  } else if(!pcmk__str_eq(first->task, RSC_START, pcmk__str_casei)) {
1932  crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
1933  }
1934 
1935  if(first_rsc == NULL) {
1936  continue;
1937 
1938  } else if(uber_parent(first_rsc) == uber_parent(start->rsc)) {
1939  crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
1940  continue;
1941 
1942  } else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) {
1943  crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
1944  continue;
1945  }
1946 
1947  crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant);
1948 
1949  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
1950  pe_action_t *probe = (pe_action_t *) pIter->data;
1951 
1952  crm_err("Ordering %s before %s", first->uuid, probe->uuid);
1953  order_actions(first, probe, pe_order_optional);
1954  }
1955  }
1956  }
1957 #endif
1958 }
1959 
1960 void
1962 {
1963  order_first_probes(data_set);
1964  order_then_probes(data_set);
1965 }
1966 
1967 static int transition_id = -1;
1968 
1975 void
1976 pcmk__log_transition_summary(const char *filename)
1977 {
1978  if (was_processing_error) {
1979  crm_err("Calculated transition %d (with errors)%s%s",
1980  transition_id,
1981  (filename == NULL)? "" : ", saving inputs in ",
1982  (filename == NULL)? "" : filename);
1983 
1984  } else if (was_processing_warning) {
1985  crm_warn("Calculated transition %d (with warnings)%s%s",
1986  transition_id,
1987  (filename == NULL)? "" : ", saving inputs in ",
1988  (filename == NULL)? "" : filename);
1989 
1990  } else {
1991  crm_notice("Calculated transition %d%s%s",
1992  transition_id,
1993  (filename == NULL)? "" : ", saving inputs in ",
1994  (filename == NULL)? "" : filename);
1995  }
1996  if (crm_config_error) {
1997  crm_notice("Configuration errors found during scheduler processing,"
1998  " please run \"crm_verify -L\" to identify issues");
1999  }
2000 }
2001 
2002 /*
2003  * Create a dependency graph to send to the transitioner (via the controller)
2004  */
2005 gboolean
2007 {
2008  GList *gIter = NULL;
2009  const char *value = NULL;
2010  long long limit = 0LL;
2011 
2012  transition_id++;
2013  crm_trace("Creating transition graph %d.", transition_id);
2014 
2015  data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
2016 
2017  value = pe_pref(data_set->config_hash, "cluster-delay");
2018  crm_xml_add(data_set->graph, "cluster-delay", value);
2019 
2020  value = pe_pref(data_set->config_hash, "stonith-timeout");
2021  crm_xml_add(data_set->graph, "stonith-timeout", value);
2022 
2023  crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
2024 
2025  if (pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)) {
2026  crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
2027  } else {
2028  crm_xml_add(data_set->graph, "failed-start-offset", "1");
2029  }
2030 
2031  value = pe_pref(data_set->config_hash, "batch-limit");
2032  crm_xml_add(data_set->graph, "batch-limit", value);
2033 
2034  crm_xml_add_int(data_set->graph, "transition_id", transition_id);
2035 
2036  value = pe_pref(data_set->config_hash, "migration-limit");
2037  if ((pcmk__scan_ll(value, &limit, 0LL) == pcmk_rc_ok) && (limit > 0)) {
2038  crm_xml_add(data_set->graph, "migration-limit", value);
2039  }
2040 
2041  if (data_set->recheck_by > 0) {
2042  char *recheck_epoch = NULL;
2043 
2044  recheck_epoch = crm_strdup_printf("%llu",
2045  (long long) data_set->recheck_by);
2046  crm_xml_add(data_set->graph, "recheck-by", recheck_epoch);
2047  free(recheck_epoch);
2048  }
2049 
2050  /* The following code will de-duplicate action inputs, so nothing past this
2051  * should rely on the action input type flags retaining their original
2052  * values.
2053  */
2054 
2055  gIter = data_set->resources;
2056  for (; gIter != NULL; gIter = gIter->next) {
2057  pe_resource_t *rsc = (pe_resource_t *) gIter->data;
2058 
2059  pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id);
2060  rsc->cmds->expand(rsc, data_set);
2061  }
2062 
2063  crm_log_xml_trace(data_set->graph, "created resource-driven action list");
2064 
2065  /* pseudo action to distribute list of nodes with maintenance state update */
2066  add_maintenance_update(data_set);
2067 
2068  /* catch any non-resource specific actions */
2069  crm_trace("processing non-resource actions");
2070 
2071  gIter = data_set->actions;
2072  for (; gIter != NULL; gIter = gIter->next) {
2073  pe_action_t *action = (pe_action_t *) gIter->data;
2074 
2075  if (action->rsc
2076  && action->node
2077  && action->node->details->shutdown
2078  && !pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)
2079  && !pcmk_any_flags_set(action->flags,
2081  && pcmk__str_eq(action->task, RSC_STOP, pcmk__str_none)
2082  ) {
2083  /* Eventually we should just ignore the 'fence' case
2084  * But for now it's the best way to detect (in CTS) when
2085  * CIB resource updates are being lost
2086  */
2087  if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)
2088  || data_set->no_quorum_policy == no_quorum_ignore) {
2089  crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)",
2090  action->node->details->unclean ? "fence" : "shut down",
2091  action->node->details->uname, action->rsc->id,
2092  pcmk_is_set(action->rsc->flags, pe_rsc_managed)? " blocked" : " unmanaged",
2093  pcmk_is_set(action->rsc->flags, pe_rsc_failed)? " failed" : "",
2094  action->uuid);
2095  }
2096  }
2097 
2098  graph_element_from_action(action, data_set);
2099  }
2100 
2101  crm_log_xml_trace(data_set->graph, "created generic action list");
2102  crm_trace("Created transition graph %d.", transition_id);
2103 
2104  return TRUE;
2105 }
2106 
2107 void
2109 {
2110  pcmk__output_t *out = data_set->priv;
2111  GList *gIter = NULL;
2112 
2113  for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
2114  char *node_name = NULL;
2115  char *task = NULL;
2116  pe_action_t *action = (pe_action_t *) gIter->data;
2117 
2118  if (action->rsc != NULL) {
2119  continue;
2120  } else if (pcmk_is_set(action->flags, pe_action_optional)) {
2121  continue;
2122  }
2123 
2124  if (pe__is_guest_node(action->node)) {
2125  node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id);
2126  } else if(action->node) {
2127  node_name = crm_strdup_printf("%s", action->node->details->uname);
2128  }
2129 
2130 
2131  if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
2132  task = strdup("Shutdown");
2133  } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
2134  const char *op = g_hash_table_lookup(action->meta, "stonith_action");
2135  task = crm_strdup_printf("Fence (%s)", op);
2136  }
2137 
2138  out->message(out, "node-action", task, node_name, action->reason);
2139 
2140  free(node_name);
2141  free(task);
2142  }
2143 }
pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set)
void pe__foreach_param_check(pe_working_set_t *data_set, void(*cb)(pe_resource_t *, pe_node_t *, xmlNode *, enum pe_check_parameters, pe_working_set_t *))
Definition: remote.c:246
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:225
void complex_set_cmds(pe_resource_t *rsc)
xmlNode * find_xml_node(xmlNode *cib, const char *node_path, gboolean must_find)
Definition: xml.c:445
void group_append_meta(pe_resource_t *rsc, xmlNode *xml)
enum pe_action_flags clone_action_flags(pe_action_t *action, pe_node_t *node)
pe_action_t * lh_action
Definition: internal.h:182
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:149
#define RSC_STOP
Definition: crm.h:204
void clone_append_meta(pe_resource_t *rsc, xmlNode *xml)
A dumping ground.
#define crm_notice(fmt, args...)
Definition: logging.h:359
#define pe_flag_stop_action_orphans
Definition: pe_types.h:104
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition: remote.c:41
GHashTable * attrs
Definition: pe_types.h:234
enum pe_action_flags pcmk__bundle_action_flags(pe_action_t *action, pe_node_t *node)
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:19
#define XML_CONFIG_ATTR_SHUTDOWN_LOCK
Definition: msg_xml.h:387
#define crm_crit(fmt, args...)
Definition: logging.h:356
char data[0]
Definition: cpg.c:55
#define INFINITY
Definition: crm.h:99
#define pe__set_action_flags(action, flags_to_set)
Definition: internal.h:59
#define pe__show_node_weights(level, rsc, text, nodes, data_set)
Definition: internal.h:353
#define CRM_OP_FENCE
Definition: crm.h:145
#define XML_ATTR_TRANSITION_MAGIC
Definition: msg_xml.h:398
G_GNUC_INTERNAL GList * pcmk__colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc, GList *colocated_rscs)
GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set)
pe_check_parameters
Definition: pe_types.h:195
#define XML_TAG_GRAPH
Definition: msg_xml.h:325
#define stop_action(rsc, node, optional)
Definition: internal.h:377
void pe__add_param_check(xmlNode *rsc_op, pe_resource_t *rsc, pe_node_t *node, enum pe_check_parameters, pe_working_set_t *data_set)
Definition: remote.c:220
pe_resource_t * container
Definition: pe_types.h:380
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:931
int(* message)(pcmk__output_t *out, const char *message_id,...)
#define pe_flag_concurrent_fencing
Definition: pe_types.h:101
#define XML_ATTR_TYPE
Definition: msg_xml.h:132
void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
Definition: utils.c:2396
#define CRM_OP_REPROBE
Definition: crm.h:154
GList * children
Definition: pe_types.h:377
resource_alloc_functions_t * cmds
Definition: pe_types.h:334
#define pe_flag_symmetric_cluster
Definition: pe_types.h:95
GList * find_actions(GList *input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1570
gboolean stage4(pe_working_set_t *data_set)
xmlNode * get_object_root(const char *object_type, xmlNode *the_root)
Definition: cib_utils.c:145
#define pe_flag_no_compat
Definition: pe_types.h:131
xmlNode * xml
Definition: pe_types.h:324
G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *lh_rsc, char *lh_task, pe_action_t *lh_action, pe_resource_t *rh_rsc, char *rh_task, pe_action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
pe_resource_t * rsc
Definition: pe_types.h:410
void add_maintenance_update(pe_working_set_t *data_set)
const char * crm_xml_add_int(xmlNode *node, const char *name, int value)
Create an XML attribute with specified name and integer value.
Definition: nvpair.c:431
gboolean exclusive_discover
Definition: pe_types.h:352
int char2score(const char *score)
Definition: utils.c:61
G_GNUC_INTERNAL bool pcmk__is_failed_remote_node(pe_node_t *node)
pe_action_t * sched_shutdown_op(pe_node_t *node, pe_working_set_t *data_set)
GHashTable * meta
Definition: pe_types.h:373
#define pe_rsc_unique
Definition: pe_types.h:254
#define XML_LRM_TAG_RESOURCE
Definition: msg_xml.h:264
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:323
void ReloadRsc(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
bool pe__bundle_needs_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set)
Definition: bundle.c:943
gint sort_op_by_callid(gconstpointer a, gconstpointer b)
Definition: utils.c:1735
#define pe_flag_have_status
Definition: pe_types.h:116
gboolean stage2(pe_working_set_t *data_set)
void group_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
time_t get_effective_time(pe_working_set_t *data_set)
Definition: utils.c:1852
GList * actions
Definition: pe_types.h:164
gboolean stage5(pe_working_set_t *data_set)
const char * pe_pref(GHashTable *options, const char *name)
Definition: common.c:308
pe_action_t * rh_action
Definition: internal.h:187
int(* info)(pcmk__output_t *out, const char *format,...) G_GNUC_PRINTF(2
void resource_location(pe_resource_t *rsc, pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1691
xmlNode * params_restart
Definition: internal.h:481
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
void native_expand(pe_resource_t *rsc, pe_working_set_t *data_set)
#define XML_CIB_TAG_LRM
Definition: msg_xml.h:262
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
#define RSC_START
Definition: crm.h:201
void native_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
pe_action_t * action
Definition: pe_types.h:534
GHashTable * pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, uint32_t flags)
bool pcmk__ends_with(const char *s, const char *match)
Definition: strings.c:536
gboolean can_run_resources(const pe_node_t *node)
#define pe_flag_have_quorum
Definition: pe_types.h:94
G_GNUC_INTERNAL void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set)
#define CRM_SCORE_INFINITY
Definition: crm.h:85
gboolean remote_requires_reset
Definition: pe_types.h:224
void native_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
void pcmk__bundle_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
const char * action
Definition: pcmk_fence.c:30
void LogNodeActions(pe_working_set_t *data_set)
#define pe__set_resource_flags(resource, flags_to_set)
Definition: internal.h:47
GList * resources
Definition: pe_types.h:158
void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t *data_set)
Definition: utils.c:2242
pe_node_t * lock_node
Definition: pe_types.h:384
GList * nodes
Definition: pe_types.h:157
void pcmk__bundle_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
gboolean is_dc
Definition: pe_types.h:221
#define XML_LRM_ATTR_TASK
Definition: msg_xml.h:297
G_GNUC_INTERNAL void pcmk__apply_locations(pe_working_set_t *data_set)
void native_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
void group_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
#define CRM_OP_CLEAR_FAILCOUNT
Definition: crm.h:155
#define pe_warn(fmt...)
Definition: internal.h:27
int weight
Definition: pe_types.h:241
gboolean crm_config_error
Definition: utils.c:52
int pcmk__scan_ll(const char *text, long long *result, long long default_value)
Definition: strings.c:97
#define crm_warn(fmt, args...)
Definition: logging.h:358
void native_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: utils.c:2347
gboolean(* create_probe)(pe_resource_t *, pe_node_t *, pe_action_t *, gboolean, pe_working_set_t *)
void clone_expand(pe_resource_t *rsc, pe_working_set_t *data_set)
int rc
Definition: pcmk_fence.c:35
int crm_element_value_ms(const xmlNode *data, const char *name, guint *dest)
Retrieve the millisecond value of an XML attribute.
Definition: nvpair.c:622
#define pe_rsc_failed
Definition: pe_types.h:267
#define crm_debug(fmt, args...)
Definition: logging.h:362
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:903
#define pe_flag_sanitized
Definition: pe_types.h:120
pe_node_t * pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *preferred, pe_working_set_t *data_set)
#define XML_CIB_ATTR_SHUTDOWN
Definition: msg_xml.h:283
#define XML_ATTR_ID
Definition: msg_xml.h:129
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:529
#define XML_BOOLEAN_TRUE
Definition: msg_xml.h:140
#define XML_CIB_TAG_STATE
Definition: msg_xml.h:198
bool pe__is_guest_node(const pe_node_t *node)
Definition: remote.c:33
gboolean stage0(pe_working_set_t *data_set)
#define stop_key(rsc)
Definition: internal.h:376
enum pe_graph_flags group_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
#define pe_rsc_start_pending
Definition: pe_types.h:269
char * task
Definition: pe_types.h:414
GList * actions_after
Definition: pe_types.h:448
#define pe__clear_action_flags(action, flags_to_clear)
Definition: internal.h:68
G_GNUC_INTERNAL void pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
#define crm_trace(fmt, args...)
Definition: logging.h:363
enum rsc_digest_cmp_val rc
Definition: internal.h:478
void pe_fence_node(pe_working_set_t *data_set, pe_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition: unpack.c:96
G_GNUC_INTERNAL GList * pcmk__group_colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc, GList *colocated_rscs)
char * digest_secure_calc
Definition: internal.h:483
void calculate_active_ops(GList *sorted_op_list, int *start_index, int *stop_index)
Definition: unpack.c:2250
char * crm_strdup_printf(char const *format,...) G_GNUC_PRINTF(1
GHashTable * meta
Definition: pe_types.h:424
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:114
struct pe_node_shared_s * details
Definition: pe_types.h:244
pe_node_t * node
Definition: pe_types.h:411
#define XML_AGENT_ATTR_PROVIDER
Definition: msg_xml.h:267
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1905
unsigned long long flags
Definition: pe_types.h:348
const char * uname
Definition: pe_types.h:209
void(* expand)(pe_resource_t *, pe_working_set_t *)
GHashTable * pcmk__group_merge_weights(pe_resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, uint32_t flags)
void pcmk__unpack_constraints(pe_working_set_t *data_set)
Wrappers for and extensions to libxml2.
#define XML_ATTR_TE_NOWAIT
Definition: msg_xml.h:401
GHashTable * config_hash
Definition: pe_types.h:151
char * clone_name
Definition: pe_types.h:323
xmlNode * find_rsc_op_entry(pe_resource_t *rsc, const char *key)
Definition: utils.c:1439
pe_resource_t * lh_rsc
Definition: internal.h:181
xmlNode * create_xml_node(xmlNode *parent, const char *name)
Definition: xml.c:696
time_t lock_time
Definition: pe_types.h:385
time_t recheck_by
Definition: pe_types.h:187
void pcmk__log_transition_summary(const char *filename)
#define pe_flag_stonith_enabled
Definition: pe_types.h:98
#define PCMK__NELEM(a)
Definition: internal.h:40
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
Definition: common.c:635
GList * actions
Definition: pe_types.h:359
gboolean native_create_probe(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set)
enum pe_ordering type
Definition: internal.h:178
char * uuid
Definition: pe_types.h:415
#define XML_LRM_ATTR_RESTART_DIGEST
Definition: msg_xml.h:313
void group_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
pe_node_t * pcmk__bundle_allocate(pe_resource_t *rsc, pe_node_t *preferred, pe_working_set_t *data_set)
enum pe_obj_types variant
Definition: pe_types.h:331
gboolean xml_has_children(const xmlNode *root)
Definition: xml.c:2023
xmlNode * input
Definition: pe_types.h:137
const char * placement_strategy
Definition: pe_types.h:144
int rsc_discover_mode
Definition: pe_types.h:245
xmlNode * params_all
Definition: internal.h:479
#define CRM_OP_SHUTDOWN
Definition: crm.h:144
void pe__free_param_checks(pe_working_set_t *data_set)
Definition: remote.c:261
const char * id
Definition: pe_types.h:208
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:45
gboolean pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set)
guint shutdown_lock
Definition: pe_types.h:189
pe_node_t * pe_find_node_id(GList *node_list, const char *id)
Definition: status.c:418
bool pe_can_fence(pe_working_set_t *data_set, pe_node_t *node)
Definition: utils.c:90
int pcmk__score_green
Definition: utils.c:57
pe_resource_t * find_compatible_child(pe_resource_t *local_child, pe_resource_t *rsc, enum rsc_role_e filter, gboolean current, pe_working_set_t *data_set)
G_GNUC_INTERNAL pe__location_t * pcmk__new_location(const char *id, pe_resource_t *rsc, int node_weight, const char *discover_mode, pe_node_t *foo_node, pe_working_set_t *data_set)
#define PCMK_RESOURCE_CLASS_STONITH
Definition: services.h:45
gboolean rsc_discovery_enabled
Definition: pe_types.h:223
#define XML_LRM_ATTR_SECURE_DIGEST
Definition: msg_xml.h:314
void group_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
gboolean probe_resources(pe_working_set_t *data_set)
enum pe_action_flags group_action_flags(pe_action_t *action, pe_node_t *node)
Cluster status and scheduling.
bool pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set, pe_resource_t **failed)
GList * ordering_constraints
Definition: pe_types.h:160
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:579
#define pe_flag_show_utilization
Definition: pe_types.h:134
void pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
int pcmk__score_red
Definition: utils.c:56
bool pcmk__is_daemon
Definition: logging.c:47
void clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
#define XML_LRM_TAG_RESOURCES
Definition: msg_xml.h:263
void group_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
int pe__add_scores(int score1, int score2)
Definition: common.c:516
#define crm_err(fmt, args...)
Definition: logging.h:357
#define CRM_ASSERT(expr)
Definition: results.h:42
#define RSC_STATUS
Definition: crm.h:215
pe_action_t * pe__clear_failcount(pe_resource_t *rsc, pe_node_t *node, const char *reason, pe_working_set_t *data_set)
Schedule a controller operation to clear a fail count.
Definition: failcounts.c:364
Cluster Configuration.
op_digest_cache_t * rsc_action_digest_cmp(pe_resource_t *rsc, xmlNode *xml_op, pe_node_t *node, pe_working_set_t *data_set)
Definition: pe_digest.c:392
#define RSC_PROMOTE
Definition: crm.h:207
gboolean cluster_status(pe_working_set_t *data_set)
Definition: status.c:71
This structure contains everything that makes up a single output formatter.
int pcmk__score_yellow
Definition: utils.c:58
#define XML_LRM_ATTR_INTERVAL_MS
Definition: msg_xml.h:295
#define crm_log_xml_info(xml, text)
Definition: logging.h:369
gboolean stage8(pe_working_set_t *data_set)
void clone_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
#define XML_LRM_ATTR_CALLID
Definition: msg_xml.h:309
gboolean shutdown
Definition: pe_types.h:219
#define crm_str(x)
Definition: logging.h:383
void pcmk__bundle_expand(pe_resource_t *rsc, pe_working_set_t *data_set)
gboolean clone_create_probe(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set)
void pcmk__order_probes(pe_working_set_t *data_set)
void clone_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
void native_append_meta(pe_resource_t *rsc, xmlNode *xml)
GList * running_on
Definition: pe_types.h:366
void(* create_actions)(pe_resource_t *, pe_working_set_t *)
enum pe_action_flags flags
Definition: pe_types.h:419
gboolean maintenance
Definition: pe_types.h:222
#define CRM_OP_PROBED
Definition: crm.h:153
#define pe_rsc_maintenance
Definition: pe_types.h:276
pe_working_set_t * cluster
Definition: pe_types.h:328
pe_resource_t * rh_rsc
Definition: internal.h:186
#define XML_CIB_TAG_STATUS
Definition: msg_xml.h:179
G_GNUC_INTERNAL void pcmk__fence_guest(pe_node_t *node, pe_working_set_t *data_set)
void clone_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
bool pe__resource_is_remote_conn(const pe_resource_t *rsc, const pe_working_set_t *data_set)
Definition: remote.c:17
#define crm_log_xml_trace(xml, text)
Definition: logging.h:371
gboolean crm_is_true(const char *s)
Definition: strings.c:416
bool pcmk__starts_with(const char *str, const char *prefix)
Check whether a string starts with a certain sequence.
Definition: strings.c:484
CRM_TRACE_INIT_DATA(pacemaker)
#define XML_LRM_TAG_RSC_OP
Definition: msg_xml.h:265
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:20
#define pe__set_order_flags(order_flags, flags_to_set)
Definition: internal.h:111
#define ID(x)
Definition: msg_xml.h:456
unsigned long long flags
Definition: pe_types.h:146
#define pe_err(fmt...)
Definition: internal.h:22
gboolean was_processing_error
Definition: common.c:20
int stickiness
Definition: pe_types.h:341
#define XML_RSC_ATTR_INTERLEAVE
Definition: msg_xml.h:224
#define PCMK__OP_FMT
Definition: internal.h:171
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1643
pe_action_t * pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
Definition: utils.c:2456
gboolean stage6(pe_working_set_t *data_set)
resource_alloc_functions_t resource_class_alloc_functions[]
gboolean was_processing_warning
Definition: common.c:21
void clone_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
enum pe_ordering type
Definition: pe_types.h:532
gboolean unclean
Definition: pe_types.h:217
#define pe_flag_start_failure_fatal
Definition: pe_types.h:107
enum node_type type
Definition: pe_types.h:210
gboolean DeleteRsc(pe_resource_t *rsc, pe_node_t *node, gboolean optional, pe_working_set_t *data_set)
void group_expand(pe_resource_t *rsc, pe_working_set_t *data_set)
void pcmk__bundle_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
void graph_element_from_action(pe_action_t *action, pe_working_set_t *data_set)
#define crm_info(fmt, args...)
Definition: logging.h:360
enum pe_action_flags native_action_flags(pe_action_t *action, pe_node_t *node)
#define pe_rsc_managed
Definition: pe_types.h:249
#define pe_rsc_orphan
Definition: pe_types.h:248
enum pe_graph_flags pcmk__multi_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
void pcmk__bundle_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
void set_alloc_actions(pe_working_set_t *data_set)
void pcmk__bundle_append_meta(pe_resource_t *rsc, xmlNode *xml)
pe_ordering
Definition: pe_types.h:483
gboolean online
Definition: pe_types.h:213
int pe_get_failcount(pe_node_t *node, pe_resource_t *rsc, time_t *last_failure, uint32_t flags, xmlNode *xml_op, pe_working_set_t *data_set)
Definition: failcounts.c:251
pe_resource_t * parent
Definition: pe_types.h:329
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
Definition: utils.c:2135
#define pe_flag_shutdown_lock
Definition: pe_types.h:113
#define RSC_DEMOTE
Definition: crm.h:209
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:18
pe_node_t * pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *preferred, pe_working_set_t *data_set)
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:266
xmlNode * graph
Definition: pe_types.h:176
char * id
Definition: pe_types.h:322
GHashTable * allowed_nodes
Definition: pe_types.h:368
pe_action_t * custom_action(pe_resource_t *rsc, char *key, const char *task, pe_node_t *on_node, gboolean optional, gboolean foo, pe_working_set_t *data_set)
Create or update an action object.
Definition: utils.c:731
#define RSC_MIGRATED
Definition: crm.h:199
#define pe_flag_startup_probes
Definition: pe_types.h:115
pe_node_t * pcmk__group_allocate(pe_resource_t *rsc, pe_node_t *preferred, pe_working_set_t *data_set)
#define pe_flag_stop_rsc_orphans
Definition: pe_types.h:103