pacemaker  2.1.3-ea053b43a
Scalable High-Availability cluster resource manager
pcmk_sched_native.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2022 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <stdbool.h>
13 
14 #include <crm/pengine/rules.h>
15 #include <crm/msg_xml.h>
17 #include <pacemaker-internal.h>
18 #include <crm/services.h>
19 
20 #include "libpacemaker_private.h"
21 
22 // The controller removes the resource from the CIB, making this redundant
23 // #define DELETE_THEN_REFRESH 1
24 
25 #define INFINITY_HACK (INFINITY * -100)
26 
27 #define VARIANT_NATIVE 1
28 #include <lib/pengine/variant.h>
29 
30 extern bool pcmk__is_daemon;
31 
32 static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
34 static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
35  xmlNode *operation, pe_working_set_t *data_set);
36 static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
38 static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
39  xmlNode *operation, pe_working_set_t *data_set);
40 
41 gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
42 gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
43 gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
44 gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
45 gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional,
47 gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
48 gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
49 
50 /* This array says what the *next* role should be when transitioning from one
51  * role to another. For example going from Stopped to Promoted, the next role is
52  * RSC_ROLE_UNPROMOTED, because the resource must be started before being promoted.
53  * The current state then becomes Started, which is fed into this array again,
54  * giving a next role of RSC_ROLE_PROMOTED.
55  */
56 static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
57 /* Current state Next state*/
58 /* Unknown Stopped Started Unpromoted Promoted */
64 };
65 
66 typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next,
67  gboolean optional,
69 
70 // This array picks the function needed to transition from one role to another
71 static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
72 /* Current state Next state */
73 /* Unknown Stopped Started Unpromoted Promoted */
74 /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
75 /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
76 /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
77 /* Unpromoted */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
78 /* Promoted */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
79 };
80 
81 #define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear) do { \
82  flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
83  "Node weight", (nw_rsc)->id, (flags), \
84  (flags_to_clear), #flags_to_clear); \
85  } while (0)
86 
87 static bool
88 native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set)
89 {
90  GList *nodes = NULL;
91  pe_node_t *chosen = NULL;
92  pe_node_t *best = NULL;
93  int multiple = 1;
94  int length = 0;
95  bool result = false;
96 
98 
99  if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
100  return rsc->allocated_to != NULL;
101  }
102 
103  // Sort allowed nodes by weight
104  if (rsc->allowed_nodes) {
105  length = g_hash_table_size(rsc->allowed_nodes);
106  }
107  if (length > 0) {
108  nodes = g_hash_table_get_values(rsc->allowed_nodes);
109  nodes = pcmk__sort_nodes(nodes, pe__current_node(rsc), data_set);
110 
111  // First node in sorted list has the best score
112  best = g_list_nth_data(nodes, 0);
113  }
114 
115  if (prefer && nodes) {
116  chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
117 
118  if (chosen == NULL) {
119  pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
120  prefer->details->uname, rsc->id);
121 
122  /* Favor the preferred node as long as its weight is at least as good as
123  * the best allowed node's.
124  *
125  * An alternative would be to favor the preferred node even if the best
126  * node is better, when the best node's weight is less than INFINITY.
127  */
128  } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
129  pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
130  chosen->details->uname, rsc->id);
131  chosen = NULL;
132 
133  } else if (!pcmk__node_available(chosen)) {
134  pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
135  chosen->details->uname, rsc->id);
136  chosen = NULL;
137 
138  } else {
139  pe_rsc_trace(rsc,
140  "Chose preferred node %s for %s (ignoring %d candidates)",
141  chosen->details->uname, rsc->id, length);
142  }
143  }
144 
145  if ((chosen == NULL) && nodes) {
146  /* Either there is no preferred node, or the preferred node is not
147  * available, but there are other nodes allowed to run the resource.
148  */
149 
150  chosen = best;
151  pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
152  chosen ? chosen->details->uname : "<none>", rsc->id, length);
153 
154  if (!pe_rsc_is_unique_clone(rsc->parent)
155  && chosen && (chosen->weight > 0) && pcmk__node_available(chosen)) {
156  /* If the resource is already running on a node, prefer that node if
157  * it is just as good as the chosen node.
158  *
159  * We don't do this for unique clone instances, because
160  * distribute_children() has already assigned instances to their
161  * running nodes when appropriate, and if we get here, we don't want
162  * remaining unallocated instances to prefer a node that's already
163  * running another instance.
164  */
165  pe_node_t *running = pe__current_node(rsc);
166 
167  if ((running != NULL) && !pcmk__node_available(running)) {
168  pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
169  rsc->id, running->details->uname);
170  } else if (running) {
171  for (GList *iter = nodes->next; iter; iter = iter->next) {
172  pe_node_t *tmp = (pe_node_t *) iter->data;
173 
174  if (tmp->weight != chosen->weight) {
175  // The nodes are sorted by weight, so no more are equal
176  break;
177  }
178  if (tmp->details == running->details) {
179  // Scores are equal, so prefer the current node
180  chosen = tmp;
181  }
182  multiple++;
183  }
184  }
185  }
186  }
187 
188  if (multiple > 1) {
189  static char score[33];
190  int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
191 
192  score2char_stack(chosen->weight, score, sizeof(score));
193  do_crm_log(log_level,
194  "Chose node %s for %s from %d nodes with score %s",
195  chosen->details->uname, rsc->id, multiple, score);
196  }
197 
198  result = pcmk__assign_primitive(rsc, chosen, false);
199  g_list_free(nodes);
200  return result;
201 }
202 
211 static int
212 best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
213  const char *value)
214 {
215  GHashTableIter iter;
216  pe_node_t *node = NULL;
217  int best_score = -INFINITY;
218  const char *best_node = NULL;
219 
220  // Find best allowed node with matching attribute
221  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
222  while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
223 
224  if ((node->weight > best_score) && pcmk__node_available(node)
225  && pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) {
226 
227  best_score = node->weight;
228  best_node = node->details->uname;
229  }
230  }
231 
232  if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) {
233  if (best_node == NULL) {
234  crm_info("No allowed node for %s matches node attribute %s=%s",
235  rsc->id, attr, value);
236  } else {
237  crm_info("Allowed node %s for %s had best score (%d) "
238  "of those matching node attribute %s=%s",
239  best_node, rsc->id, best_score, attr, value);
240  }
241  }
242  return best_score;
243 }
244 
259 static void
260 add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
261  const char *attr, float factor,
262  bool only_positive)
263 {
264  GHashTableIter iter;
265  pe_node_t *node = NULL;
266 
267  if (attr == NULL) {
268  attr = CRM_ATTR_UNAME;
269  }
270 
271  // Iterate through each node
272  g_hash_table_iter_init(&iter, nodes);
273  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
274  float weight_f = 0;
275  int weight = 0;
276  int score = 0;
277  int new_score = 0;
278 
279  score = best_node_score_matching_attr(rsc, attr,
280  pe_node_attribute_raw(node, attr));
281 
282  if ((factor < 0) && (score < 0)) {
283  /* Negative preference for a node with a negative score
284  * should not become a positive preference.
285  *
286  * @TODO Consider filtering only if weight is -INFINITY
287  */
288  crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
289  node->details->uname, node->weight, factor, score);
290  continue;
291  }
292 
293  if (node->weight == INFINITY_HACK) {
294  crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
295  node->details->uname, node->weight, factor, score);
296  continue;
297  }
298 
299  weight_f = factor * score;
300 
301  // Round the number; see http://c-faq.com/fp/round.html
302  weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
303 
304  /* Small factors can obliterate the small scores that are often actually
305  * used in configurations. If the score and factor are nonzero, ensure
306  * that the result is nonzero as well.
307  */
308  if ((weight == 0) && (score != 0)) {
309  if (factor > 0.0) {
310  weight = 1;
311  } else if (factor < 0.0) {
312  weight = -1;
313  }
314  }
315 
316  new_score = pcmk__add_scores(weight, node->weight);
317 
318  if (only_positive && (new_score < 0) && (node->weight > 0)) {
319  crm_trace("%s: Filtering %d + %f * %d = %d "
320  "(negative disallowed, marking node unusable)",
321  node->details->uname, node->weight, factor, score,
322  new_score);
323  node->weight = INFINITY_HACK;
324  continue;
325  }
326 
327  if (only_positive && (new_score < 0) && (node->weight == 0)) {
328  crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
329  node->details->uname, node->weight, factor, score,
330  new_score);
331  continue;
332  }
333 
334  crm_trace("%s: %d + %f * %d = %d", node->details->uname,
335  node->weight, factor, score, new_score);
336  node->weight = new_score;
337  }
338 }
339 
340 static inline bool
341 is_nonempty_group(pe_resource_t *rsc)
342 {
343  return rsc && (rsc->variant == pe_group) && (rsc->children != NULL);
344 }
345 
361 GHashTable *
362 pcmk__native_merge_weights(pe_resource_t *rsc, const char *primary_id,
363  GHashTable *nodes, const char *attr, float factor,
364  uint32_t flags)
365 {
366  GHashTable *work = NULL;
367 
368  // Avoid infinite recursion
369  if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
370  pe_rsc_info(rsc, "%s: Breaking dependency loop at %s",
371  primary_id, rsc->id);
372  return nodes;
373  }
375 
377  if (is_nonempty_group(rsc)) {
378  GList *last = g_list_last(rsc->children);
379  pe_resource_t *last_rsc = last->data;
380 
381  pe_rsc_trace(rsc, "%s: Merging scores from group %s "
382  "using last member %s (at %.6f)",
383  primary_id, rsc->id, last_rsc->id, factor);
384  work = pcmk__native_merge_weights(last_rsc, primary_id, NULL, attr,
385  factor, flags);
386  } else {
388  }
390 
391  } else if (is_nonempty_group(rsc)) {
392  /* The first member of the group will recursively incorporate any
393  * constraints involving other members (including the group internal
394  * colocation).
395  *
396  * @TODO The indirect colocations from the dependent group's other
397  * members will be incorporated at full strength rather than by
398  * factor, so the group's combined stickiness will be treated as
399  * (factor + (#members - 1)) * stickiness. It is questionable what
400  * the right approach should be.
401  */
402  pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s "
403  "(at %.6f)", primary_id, rsc->id, factor);
404  work = pcmk__copy_node_table(nodes);
405  work = pcmk__native_merge_weights(rsc->children->data, primary_id, work,
406  attr, factor, flags);
407 
408  } else {
409  pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
410  primary_id, rsc->id, factor);
411  work = pcmk__copy_node_table(nodes);
412  add_node_scores_matching_attr(work, rsc, attr, factor,
414  }
415 
416  if (pcmk__any_node_available(work)) {
417  GList *gIter = NULL;
418  int multiplier = (factor < 0)? -1 : 1;
419 
421  gIter = rsc->rsc_cons;
422  pe_rsc_trace(rsc,
423  "Checking additional %d optional '%s with' constraints",
424  g_list_length(gIter), rsc->id);
425 
426  } else if (is_nonempty_group(rsc)) {
427  pe_resource_t *last_rsc = g_list_last(rsc->children)->data;
428 
429  gIter = last_rsc->rsc_cons_lhs;
430  pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' "
431  "constraints using last member %s",
432  g_list_length(gIter), rsc->id, last_rsc->id);
433 
434  } else {
435  gIter = rsc->rsc_cons_lhs;
436  pe_rsc_trace(rsc,
437  "Checking additional %d optional 'with %s' constraints",
438  g_list_length(gIter), rsc->id);
439  }
440 
441  for (; gIter != NULL; gIter = gIter->next) {
442  pe_resource_t *other = NULL;
443  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
444 
446  other = constraint->primary;
447  } else if (!pcmk__colocation_has_influence(constraint, NULL)) {
448  continue;
449  } else {
450  other = constraint->dependent;
451  }
452 
453  pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
454  constraint->id, constraint->dependent->id,
455  constraint->primary->id);
456  work = pcmk__native_merge_weights(other, primary_id, work,
457  constraint->node_attribute,
458  multiplier * constraint->score / (float) INFINITY,
460  pe__show_node_weights(true, NULL, primary_id, work, rsc->cluster);
461  }
462 
463  } else if (pcmk_is_set(flags, pe_weights_rollback)) {
464  pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
465  primary_id, rsc->id);
466  g_hash_table_destroy(work);
468  return nodes;
469  }
470 
471 
473  pe_node_t *node = NULL;
474  GHashTableIter iter;
475 
476  g_hash_table_iter_init(&iter, work);
477  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
478  if (node->weight == INFINITY_HACK) {
479  node->weight = 1;
480  }
481  }
482  }
483 
484  if (nodes) {
485  g_hash_table_destroy(nodes);
486  }
487 
489  return work;
490 }
491 
492 pe_node_t *
495 {
496  GList *gIter = NULL;
497 
498  if (rsc->parent && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
499  /* never allocate children on their own */
500  pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
501  rsc->parent->id);
502  rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
503  }
504 
505  if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
506  return rsc->allocated_to;
507  }
508 
509  if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
510  pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
511  return NULL;
512  }
513 
515  pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes, data_set);
516 
517  for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
518  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
519 
520  GHashTable *archive = NULL;
521  pe_resource_t *primary = constraint->primary;
522 
523  if ((constraint->dependent_role >= RSC_ROLE_PROMOTED)
524  || (constraint->score < 0 && constraint->score > -INFINITY)) {
525  archive = pcmk__copy_node_table(rsc->allowed_nodes);
526  }
527 
528  pe_rsc_trace(rsc,
529  "%s: Allocating %s first (constraint=%s score=%d role=%s)",
530  rsc->id, primary->id, constraint->id,
531  constraint->score, role2text(constraint->dependent_role));
532  primary->cmds->allocate(primary, NULL, data_set);
533  rsc->cmds->rsc_colocation_lh(rsc, primary, constraint, data_set);
534  if (archive && !pcmk__any_node_available(rsc->allowed_nodes)) {
535  pe_rsc_info(rsc, "%s: Rolling back scores from %s",
536  rsc->id, primary->id);
537  g_hash_table_destroy(rsc->allowed_nodes);
538  rsc->allowed_nodes = archive;
539  archive = NULL;
540  }
541  if (archive) {
542  g_hash_table_destroy(archive);
543  }
544  }
545 
546  pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes, data_set);
547 
548  for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
549  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
550 
551  if (!pcmk__colocation_has_influence(constraint, NULL)) {
552  continue;
553  }
554  pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
555  constraint->id, constraint->dependent->id,
556  constraint->primary->id);
557  rsc->allowed_nodes = constraint->dependent->cmds->merge_weights(
558  constraint->dependent, rsc->id, rsc->allowed_nodes,
559  constraint->node_attribute, constraint->score / (float) INFINITY,
561  }
562 
563  if (rsc->next_role == RSC_ROLE_STOPPED) {
564  pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
565  /* make sure it doesn't come up again */
567 
568  } else if(rsc->next_role > rsc->role
571  crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
572  rsc->id, role2text(rsc->role), role2text(rsc->next_role));
573  pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
574  }
575 
577  rsc, __func__, rsc->allowed_nodes, data_set);
581  }
582 
583  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
584  const char *reason = NULL;
585  pe_node_t *assign_to = NULL;
586 
587  pe__set_next_role(rsc, rsc->role, "unmanaged");
588  assign_to = pe__current_node(rsc);
589  if (assign_to == NULL) {
590  reason = "inactive";
591  } else if (rsc->role == RSC_ROLE_PROMOTED) {
592  reason = "promoted";
593  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
594  reason = "failed";
595  } else {
596  reason = "active";
597  }
598  pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
599  (assign_to? assign_to->details->uname : "no node"), reason);
600  pcmk__assign_primitive(rsc, assign_to, true);
601 
603  pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
604  pcmk__assign_primitive(rsc, NULL, true);
605 
606  } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
607  && native_choose_node(rsc, prefer, data_set)) {
608  pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
609  rsc->allocated_to->details->uname);
610 
611  } else if (rsc->allocated_to == NULL) {
612  if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
613  pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
614  } else if (rsc->running_on != NULL) {
615  pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
616  }
617 
618  } else {
619  pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
620  rsc->allocated_to->details->uname);
621  }
622 
624 
625  if (rsc->is_remote_node) {
626  pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
627 
628  CRM_ASSERT(remote_node != NULL);
629  if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
630  crm_trace("Setting Pacemaker Remote node %s to ONLINE",
631  remote_node->details->id);
632  remote_node->details->online = TRUE;
633  /* We shouldn't consider an unseen remote-node unclean if we are going
634  * to try and connect to it. Otherwise we get an unnecessary fence */
635  if (remote_node->details->unseen == TRUE) {
636  remote_node->details->unclean = FALSE;
637  }
638 
639  } else {
640  crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
641  remote_node->details->id, role2text(rsc->next_role),
642  (rsc->allocated_to? "" : "un"));
643  remote_node->details->shutdown = TRUE;
644  }
645  }
646 
647  return rsc->allocated_to;
648 }
649 
650 static gboolean
651 is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
652 {
653  gboolean dup = FALSE;
654  const char *id = NULL;
655  const char *value = NULL;
656  xmlNode *operation = NULL;
657  guint interval2_ms = 0;
658 
659  CRM_ASSERT(rsc);
660  for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
661  operation = pcmk__xe_next(operation)) {
662 
663  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
664  value = crm_element_value(operation, "name");
665  if (!pcmk__str_eq(value, name, pcmk__str_casei)) {
666  continue;
667  }
668 
669  value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
670  interval2_ms = crm_parse_interval_spec(value);
671  if (interval_ms != interval2_ms) {
672  continue;
673  }
674 
675  if (id == NULL) {
676  id = ID(operation);
677 
678  } else {
679  pcmk__config_err("Operation %s is duplicate of %s (do not use "
680  "same name and interval combination more "
681  "than once per resource)", ID(operation), id);
682  dup = TRUE;
683  }
684  }
685  }
686 
687  return dup;
688 }
689 
690 static bool
691 op_cannot_recur(const char *name)
692 {
694 }
695 
696 static void
697 RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
698  xmlNode * operation, pe_working_set_t * data_set)
699 {
700  char *key = NULL;
701  const char *name = NULL;
702  const char *role = NULL;
703  const char *interval_spec = NULL;
704  const char *node_uname = node? node->details->uname : "n/a";
705 
706  guint interval_ms = 0;
707  pe_action_t *mon = NULL;
708  gboolean is_optional = TRUE;
709  GList *possible_matches = NULL;
710 
711  CRM_ASSERT(rsc);
712 
713  /* Only process for the operations without role="Stopped" */
714  role = crm_element_value(operation, "role");
715  if (role && text2role(role) == RSC_ROLE_STOPPED) {
716  return;
717  }
718 
719  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
720  interval_ms = crm_parse_interval_spec(interval_spec);
721  if (interval_ms == 0) {
722  return;
723  }
724 
725  name = crm_element_value(operation, "name");
726  if (is_op_dup(rsc, name, interval_ms)) {
727  crm_trace("Not creating duplicate recurring action %s for %dms %s",
728  ID(operation), interval_ms, name);
729  return;
730  }
731 
732  if (op_cannot_recur(name)) {
733  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
734  ID(operation), name);
735  return;
736  }
737 
738  key = pcmk__op_key(rsc->id, name, interval_ms);
739  if (find_rsc_op_entry(rsc, key) == NULL) {
740  crm_trace("Not creating recurring action %s for disabled resource %s",
741  ID(operation), rsc->id);
742  free(key);
743  return;
744  }
745 
746  pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
747  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
748 
749  if (start != NULL) {
750  pe_rsc_trace(rsc, "Marking %s %s due to %s", key,
751  pcmk_is_set(start->flags, pe_action_optional)? "optional" : "mandatory",
752  start->uuid);
753  is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
754  } else {
755  pe_rsc_trace(rsc, "Marking %s optional", key);
756  is_optional = TRUE;
757  }
758 
759  /* start a monitor for an already active resource */
760  possible_matches = find_actions_exact(rsc->actions, key, node);
761  if (possible_matches == NULL) {
762  is_optional = FALSE;
763  pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
764 
765  } else {
766  GList *gIter = NULL;
767 
768  for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
769  pe_action_t *op = (pe_action_t *) gIter->data;
770 
772  is_optional = FALSE;
773  break;
774  }
775  }
776  g_list_free(possible_matches);
777  }
778 
779  if (((rsc->next_role == RSC_ROLE_PROMOTED) && (role == NULL))
780  || (role != NULL && text2role(role) != rsc->next_role)) {
781  int log_level = LOG_TRACE;
782  const char *result = "Ignoring";
783 
784  if (is_optional) {
785  char *after_key = NULL;
786  pe_action_t *cancel_op = NULL;
787 
788  // It's running, so cancel it
789  log_level = LOG_INFO;
790  result = "Cancelling";
791  cancel_op = pcmk__new_cancel_action(rsc, name, interval_ms, node);
792 
793  switch (rsc->role) {
794  case RSC_ROLE_UNPROMOTED:
795  case RSC_ROLE_STARTED:
796  if (rsc->next_role == RSC_ROLE_PROMOTED) {
797  after_key = promote_key(rsc);
798 
799  } else if (rsc->next_role == RSC_ROLE_STOPPED) {
800  after_key = stop_key(rsc);
801  }
802 
803  break;
804  case RSC_ROLE_PROMOTED:
805  after_key = demote_key(rsc);
806  break;
807  default:
808  break;
809  }
810 
811  if (after_key) {
812  pcmk__new_ordering(rsc, NULL, cancel_op, rsc, after_key, NULL,
814  }
815  }
816 
817  do_crm_log(log_level, "%s action %s (%s vs. %s)",
818  result, key, role ? role : role2text(RSC_ROLE_UNPROMOTED),
819  role2text(rsc->next_role));
820 
821  free(key);
822  return;
823  }
824 
825  mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
826  key = mon->uuid;
827  if (is_optional) {
828  pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
829  }
830 
831  if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
832  pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
833  node_uname, mon->uuid);
835 
836  } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
837  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
838  node_uname, mon->uuid);
840 
841  } else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
842  pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
843  mon->task, interval_ms / 1000, rsc->id, node_uname);
844  }
845 
846  if (rsc->next_role == RSC_ROLE_PROMOTED) {
847  char *running_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
848 
849  add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_promoted);
850  free(running_promoted);
851  }
852 
853  if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
854  pcmk__new_ordering(rsc, start_key(rsc), NULL, NULL, strdup(key), mon,
856  data_set);
857 
858  pcmk__new_ordering(rsc, reload_key(rsc), NULL, NULL, strdup(key), mon,
860  data_set);
861 
862  if (rsc->next_role == RSC_ROLE_PROMOTED) {
863  pcmk__new_ordering(rsc, promote_key(rsc), NULL, rsc, NULL, mon,
865  data_set);
866 
867  } else if (rsc->role == RSC_ROLE_PROMOTED) {
868  pcmk__new_ordering(rsc, demote_key(rsc), NULL, rsc, NULL, mon,
870  data_set);
871  }
872  }
873 }
874 
875 static void
876 Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
877 {
878  if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
879  (node == NULL || node->details->maintenance == FALSE)) {
880  xmlNode *operation = NULL;
881 
882  for (operation = pcmk__xe_first_child(rsc->ops_xml);
883  operation != NULL;
884  operation = pcmk__xe_next(operation)) {
885 
886  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
887  RecurringOp(rsc, start, node, operation, data_set);
888  }
889  }
890  }
891 }
892 
893 static void
894 RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
895  xmlNode * operation, pe_working_set_t * data_set)
896 {
897  char *key = NULL;
898  const char *name = NULL;
899  const char *role = NULL;
900  const char *interval_spec = NULL;
901  const char *node_uname = node? node->details->uname : "n/a";
902 
903  guint interval_ms = 0;
904  GList *possible_matches = NULL;
905  GList *gIter = NULL;
906 
907  /* Only process for the operations with role="Stopped" */
908  role = crm_element_value(operation, "role");
909  if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
910  return;
911  }
912 
913  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
914  interval_ms = crm_parse_interval_spec(interval_spec);
915  if (interval_ms == 0) {
916  return;
917  }
918 
919  name = crm_element_value(operation, "name");
920  if (is_op_dup(rsc, name, interval_ms)) {
921  crm_trace("Not creating duplicate recurring action %s for %dms %s",
922  ID(operation), interval_ms, name);
923  return;
924  }
925 
926  if (op_cannot_recur(name)) {
927  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
928  ID(operation), name);
929  return;
930  }
931 
932  key = pcmk__op_key(rsc->id, name, interval_ms);
933  if (find_rsc_op_entry(rsc, key) == NULL) {
934  crm_trace("Not creating recurring action %s for disabled resource %s",
935  ID(operation), rsc->id);
936  free(key);
937  return;
938  }
939 
940  // @TODO add support
941  if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
942  crm_notice("Ignoring %s (recurring monitors for Stopped role are "
943  "not supported for anonymous clones)",
944  ID(operation));
945  return;
946  }
947 
948  pe_rsc_trace(rsc,
949  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
950  ID(operation), rsc->id, role2text(rsc->next_role));
951 
952  /* if the monitor exists on the node where the resource will be running, cancel it */
953  if (node != NULL) {
954  possible_matches = find_actions_exact(rsc->actions, key, node);
955  if (possible_matches) {
956  pe_action_t *cancel_op = NULL;
957 
958  g_list_free(possible_matches);
959 
960  cancel_op = pcmk__new_cancel_action(rsc, name, interval_ms, node);
961 
962  if ((rsc->next_role == RSC_ROLE_STARTED)
963  || (rsc->next_role == RSC_ROLE_UNPROMOTED)) {
964  /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
965  /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
966  pcmk__new_ordering(rsc, NULL, cancel_op, rsc, start_key(rsc),
968  }
969 
970  pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
971  key, role, role2text(rsc->next_role), node_uname);
972  }
973  }
974 
975  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
976  pe_node_t *stop_node = (pe_node_t *) gIter->data;
977  const char *stop_node_uname = stop_node->details->uname;
978  gboolean is_optional = TRUE;
979  gboolean probe_is_optional = TRUE;
980  gboolean stop_is_optional = TRUE;
981  pe_action_t *stopped_mon = NULL;
982  char *rc_inactive = NULL;
983  GList *stop_ops = NULL;
984  GList *local_gIter = NULL;
985 
986  if (node && pcmk__str_eq(stop_node_uname, node_uname, pcmk__str_casei)) {
987  continue;
988  }
989 
990  pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
991  ID(operation), rsc->id, crm_str(stop_node_uname));
992 
993  /* start a monitor for an already stopped resource */
994  possible_matches = find_actions_exact(rsc->actions, key, stop_node);
995  if (possible_matches == NULL) {
996  pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
997  crm_str(stop_node_uname));
998  is_optional = FALSE;
999  } else {
1000  pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
1001  crm_str(stop_node_uname));
1002  is_optional = TRUE;
1003  g_list_free(possible_matches);
1004  }
1005 
1006  stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
1007 
1008  rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
1009  add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
1010  free(rc_inactive);
1011 
1012  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1013  GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
1014  FALSE);
1015  GList *pIter = NULL;
1016 
1017  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
1018  pe_action_t *probe = (pe_action_t *) pIter->data;
1019 
1020  order_actions(probe, stopped_mon, pe_order_runnable_left);
1021  crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
1022  }
1023 
1024  g_list_free(probes);
1025  }
1026 
1027  stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
1028 
1029  for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
1030  pe_action_t *stop = (pe_action_t *) local_gIter->data;
1031 
1032  if (!pcmk_is_set(stop->flags, pe_action_optional)) {
1033  stop_is_optional = FALSE;
1034  }
1035 
1036  if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
1037  crm_debug("%s\t %s (cancelled : stop un-runnable)",
1038  crm_str(stop_node_uname), stopped_mon->uuid);
1040  }
1041 
1042  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1043  pcmk__new_ordering(rsc, stop_key(rsc), stop, NULL, strdup(key),
1044  stopped_mon,
1046  data_set);
1047  }
1048 
1049  }
1050 
1051  if (stop_ops) {
1052  g_list_free(stop_ops);
1053  }
1054 
1055  if (is_optional == FALSE && probe_is_optional && stop_is_optional
1056  && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1057  pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
1058  key, crm_str(stop_node_uname));
1060  }
1061 
1062  if (pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
1063  pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
1064  }
1065 
1066  if (stop_node->details->online == FALSE || stop_node->details->unclean) {
1067  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
1068  crm_str(stop_node_uname), stopped_mon->uuid);
1070  }
1071 
1072  if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
1073  && !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
1074  crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
1075  interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
1076  }
1077  }
1078 
1079  free(key);
1080 }
1081 
1082 static void
1083 Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
1084 {
1085  if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
1086  (node == NULL || node->details->maintenance == FALSE)) {
1087  xmlNode *operation = NULL;
1088 
1089  for (operation = pcmk__xe_first_child(rsc->ops_xml);
1090  operation != NULL;
1091  operation = pcmk__xe_next(operation)) {
1092 
1093  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
1094  RecurringOp_Stopped(rsc, start, node, operation, data_set);
1095  }
1096  }
1097  }
1098 }
1099 
1100 static void
1101 handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
1102 {
1103  pe_action_t *migrate_to = NULL;
1104  pe_action_t *migrate_from = NULL;
1105  pe_action_t *start = NULL;
1106  pe_action_t *stop = NULL;
1107  gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
1108 
1109  pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
1110  rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
1111  start = start_action(rsc, chosen, TRUE);
1112  stop = stop_action(rsc, current, TRUE);
1113 
1114  if (partial == FALSE) {
1115  migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1116  RSC_MIGRATE, current, TRUE, TRUE, data_set);
1117  }
1118 
1119  migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1120  RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
1121 
1122  if ((migrate_to && migrate_from) || (migrate_from && partial)) {
1123 
1126 
1127  // This is easier than trying to delete it from the graph
1129 
1130  /* order probes before migrations */
1131  if (partial) {
1133  migrate_from->needs = start->needs;
1134 
1135  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1136  rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1137  NULL, pe_order_optional, data_set);
1138 
1139  } else {
1142  migrate_to->needs = start->needs;
1143 
1144  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1145  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1146  NULL, pe_order_optional, data_set);
1147  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL,
1148  rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1149  NULL,
1151  data_set);
1152  }
1153 
1154  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1155  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1157  data_set);
1158  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1159  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1161  data_set);
1162  }
1163 
1164  if (migrate_to) {
1165  add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1167 
1168  /* Pacemaker Remote connections don't require pending to be recorded in
1169  * the CIB. We can reduce CIB writes by not setting PENDING for them.
1170  */
1171  if (rsc->is_remote_node == FALSE) {
1172  /* migrate_to takes place on the source node, but can
1173  * have an effect on the target node depending on how
1174  * the agent is written. Because of this, we have to maintain
1175  * a record that the migrate_to occurred, in case the source node
1176  * loses membership while the migrate_to action is still in-flight.
1177  */
1178  add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
1179  }
1180  }
1181 
1182  if (migrate_from) {
1183  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1184  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
1185  }
1186 }
1187 
1200 static void
1201 schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
1202  pe_node_t *chosen, bool need_stop, bool need_promote)
1203 {
1204  enum rsc_role_e role = rsc->role;
1205  enum rsc_role_e next_role;
1206 
1208 
1209  // Bring resource down to a stop on its current node
1210  while (role != RSC_ROLE_STOPPED) {
1211  next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
1212  pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
1213  (need_stop? "required" : "optional"), rsc->id,
1214  role2text(role), role2text(next_role));
1215  if (!rsc_action_matrix[role][next_role](rsc, current, !need_stop,
1216  rsc->cluster)) {
1217  break;
1218  }
1219  role = next_role;
1220  }
1221 
1222  // Bring resource up to its next role on its next node
1223  while ((rsc->role <= rsc->next_role) && (role != rsc->role)
1224  && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
1225  bool required = need_stop;
1226 
1227  next_role = rsc_state_matrix[role][rsc->role];
1228  if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
1229  required = true;
1230  }
1231  pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
1232  (required? "required" : "optional"), rsc->id,
1233  role2text(role), role2text(next_role));
1234  if (!rsc_action_matrix[role][next_role](rsc, chosen, !required,
1235  rsc->cluster)) {
1236  break;
1237  }
1238  role = next_role;
1239  }
1240 
1242 }
1243 
1244 void
1246 {
1247  pe_action_t *start = NULL;
1248  pe_node_t *chosen = NULL;
1249  pe_node_t *current = NULL;
1250  gboolean need_stop = FALSE;
1251  bool need_promote = FALSE;
1252  gboolean is_moving = FALSE;
1253  gboolean allow_migrate = FALSE;
1254 
1255  GList *gIter = NULL;
1256  unsigned int num_all_active = 0;
1257  unsigned int num_clean_active = 0;
1258  bool multiply_active = FALSE;
1259  enum rsc_role_e role = RSC_ROLE_UNKNOWN;
1260  enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
1261 
1262  CRM_ASSERT(rsc != NULL);
1263  allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
1264 
1265  chosen = rsc->allocated_to;
1266  next_role = rsc->next_role;
1267  if (next_role == RSC_ROLE_UNKNOWN) {
1268  pe__set_next_role(rsc,
1269  (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED,
1270  "allocation");
1271  }
1272  pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s",
1273  rsc->id, role2text(rsc->role), role2text(rsc->next_role),
1274  ((next_role == RSC_ROLE_UNKNOWN)? "implicit" : "explicit"),
1275  ((chosen == NULL)? "no node" : chosen->details->uname));
1276 
1277  current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
1278 
1279  for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
1280  pe_node_t *dangling_source = (pe_node_t *) gIter->data;
1281 
1282  pe_action_t *stop = NULL;
1283 
1284  pe_rsc_trace(rsc, "Creating stop action %sfor %s on %s due to dangling migration",
1285  pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)? "and cleanup " : "",
1286  rsc->id, dangling_source->details->uname);
1287  stop = stop_action(rsc, dangling_source, FALSE);
1290  DeleteRsc(rsc, dangling_source, FALSE, data_set);
1291  }
1292  }
1293 
1294  if ((num_all_active == 2) && (num_clean_active == 2) && chosen
1296  && (current->details == rsc->partial_migration_source->details)
1297  && (chosen->details == rsc->partial_migration_target->details)) {
1298 
1299  /* The chosen node is still the migration target from a partial
1300  * migration. Attempt to continue the migration instead of recovering
1301  * by stopping the resource everywhere and starting it on a single node.
1302  */
1303  pe_rsc_trace(rsc, "Will attempt to continue with partial migration "
1304  "to target %s from %s",
1307 
1308  } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
1309  /* If a resource has "requires" set to nothing or quorum, don't consider
1310  * it active on unclean nodes (similar to how all resources behave when
1311  * stonith-enabled is false). We can start such resources elsewhere
1312  * before fencing completes, and if we considered the resource active on
1313  * the failed node, we would attempt recovery for being active on
1314  * multiple nodes.
1315  */
1316  multiply_active = (num_clean_active > 1);
1317  } else {
1318  multiply_active = (num_all_active > 1);
1319  }
1320 
1321  if (multiply_active) {
1323  // Migration was in progress, but we've chosen a different target
1324  crm_notice("Resource %s can no longer migrate from %s to %s "
1325  "(will stop on both nodes)",
1328  multiply_active = false;
1329 
1330  } else {
1331  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
1332 
1333  // Resource was (possibly) incorrectly multiply active
1334  pe_proc_err("%s resource %s might be active on %u nodes (%s)",
1335  crm_str(class), rsc->id, num_all_active,
1336  recovery2text(rsc->recovery_type));
1337  crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
1338  }
1339 
1340  switch (rsc->recovery_type) {
1341  case recovery_stop_start:
1342  need_stop = TRUE;
1343  break;
1345  need_stop = TRUE; // StopRsc() will skip expected node
1347  break;
1348  default:
1349  break;
1350  }
1351 
1352  /* If by chance a partial migration is in process, but the migration
1353  * target is not chosen still, clear all partial migration data.
1354  */
1356  allow_migrate = FALSE;
1357  }
1358 
1359  if (!multiply_active) {
1361  }
1362 
1363  if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
1364  pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start",
1365  rsc->id);
1366  start = start_action(rsc, chosen, TRUE);
1368  }
1369 
1370  if (current && chosen && current->details != chosen->details) {
1371  pe_rsc_trace(rsc, "Moving %s from %s to %s",
1372  rsc->id, crm_str(current->details->uname),
1373  crm_str(chosen->details->uname));
1374  is_moving = TRUE;
1375  need_stop = TRUE;
1376 
1377  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
1378  if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
1379  need_stop = TRUE;
1380  pe_rsc_trace(rsc, "Recovering %s", rsc->id);
1381  } else {
1382  pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
1383  if (rsc->next_role == RSC_ROLE_PROMOTED) {
1384  need_promote = TRUE;
1385  }
1386  }
1387 
1388  } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
1389  pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
1390  need_stop = TRUE;
1391 
1392  } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
1393  pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
1394  rsc->id);
1395  start = start_action(rsc, chosen, TRUE);
1396  if (!pcmk_is_set(start->flags, pe_action_optional)) {
1397  // Recovery of a promoted resource
1398  pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
1399  need_stop = TRUE;
1400  }
1401  }
1402 
1403  /* Create any additional actions required when bringing resource down and
1404  * back up to same level.
1405  */
1406  schedule_restart_actions(rsc, current, chosen, need_stop, need_promote);
1407 
1408  /* Required steps from this role to the next */
1409  role = rsc->role;
1410  while (role != rsc->next_role) {
1411  next_role = rsc_state_matrix[role][rsc->next_role];
1412  pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)",
1413  rsc->id, role2text(role), role2text(next_role),
1414  role2text(rsc->next_role));
1415  if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
1416  break;
1417  }
1418  role = next_role;
1419  }
1420 
1421  if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
1422  pe_rsc_trace(rsc, "Not creating recurring monitors for blocked resource %s",
1423  rsc->id);
1424 
1425  } else if ((rsc->next_role != RSC_ROLE_STOPPED)
1426  || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1427  pe_rsc_trace(rsc, "Creating recurring monitors for %s resource %s",
1428  ((rsc->next_role == RSC_ROLE_STOPPED)? "unmanaged" : "active"),
1429  rsc->id);
1430  start = start_action(rsc, chosen, TRUE);
1431  Recurring(rsc, start, chosen, data_set);
1432  Recurring_Stopped(rsc, start, chosen, data_set);
1433 
1434  } else {
1435  pe_rsc_trace(rsc, "Creating recurring monitors for inactive resource %s",
1436  rsc->id);
1437  Recurring_Stopped(rsc, NULL, NULL, data_set);
1438  }
1439 
1440  /* if we are stuck in a partial migration, where the target
1441  * of the partial migration no longer matches the chosen target.
1442  * A full stop/start is required */
1443  if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
1444  pe_rsc_trace(rsc, "Not allowing partial migration of %s to continue",
1445  rsc->id);
1446  allow_migrate = FALSE;
1447 
1448  } else if (!is_moving || !pcmk_is_set(rsc->flags, pe_rsc_managed)
1449  || pcmk_any_flags_set(rsc->flags,
1451  || (current && current->details->unclean)
1452  || rsc->next_role < RSC_ROLE_STARTED) {
1453 
1454  allow_migrate = FALSE;
1455  }
1456 
1457  if (allow_migrate) {
1458  handle_migration_actions(rsc, current, chosen, data_set);
1459  }
1460 }
1461 
1462 static void
1463 rsc_avoids_remote_nodes(pe_resource_t *rsc)
1464 {
1465  GHashTableIter iter;
1466  pe_node_t *node = NULL;
1467  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
1468  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1469  if (node->details->remote_rsc) {
1470  node->weight = -INFINITY;
1471  }
1472  }
1473 }
1474 
1489 static GList *
1490 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
1491 {
1492  GList *allowed_nodes = NULL;
1493 
1494  if (rsc->allowed_nodes) {
1495  allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
1496  }
1497 
1498  if (!pcmk__is_daemon) {
1499  allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
1500  }
1501 
1502  return allowed_nodes;
1503 }
1504 
1505 void
1507 {
1508  /* This function is on the critical path and worth optimizing as much as possible */
1509 
1510  pe_resource_t *top = NULL;
1511  GList *allowed_nodes = NULL;
1512  bool check_unfencing = FALSE;
1513  bool check_utilization = false;
1514 
1515  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1516  pe_rsc_trace(rsc,
1517  "Skipping native constraints for unmanaged resource: %s",
1518  rsc->id);
1519  return;
1520  }
1521 
1522  top = uber_parent(rsc);
1523 
1524  // Whether resource requires unfencing
1525  check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
1528 
1529  // Whether a non-default placement strategy is used
1530  check_utilization = (g_hash_table_size(rsc->utilization) > 0)
1531  && !pcmk__str_eq(data_set->placement_strategy,
1532  "default", pcmk__str_casei);
1533 
1534  // Order stops before starts (i.e. restart)
1535  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1536  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1538  data_set);
1539 
1540  // Promotable ordering: demote before stop, start before promote
1542  || (rsc->role > RSC_ROLE_UNPROMOTED)) {
1543 
1544  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
1545  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1547 
1548  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1549  rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
1551  }
1552 
1553  // Don't clear resource history if probing on same node
1555  NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
1557  data_set);
1558 
1559  // Certain checks need allowed nodes
1560  if (check_unfencing || check_utilization || rsc->container) {
1561  allowed_nodes = allowed_nodes_as_list(rsc, data_set);
1562  }
1563 
1564  if (check_unfencing) {
1565  /* Check if the node needs to be unfenced first */
1566 
1567  for (GList *item = allowed_nodes; item; item = item->next) {
1568  pe_node_t *node = item->data;
1569  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
1570 
1571  crm_debug("Ordering any stops of %s before %s, and any starts after",
1572  rsc->id, unfence->uuid);
1573 
1574  /*
1575  * It would be more efficient to order clone resources once,
1576  * rather than order each instance, but ordering the instance
1577  * allows us to avoid unnecessary dependencies that might conflict
1578  * with user constraints.
1579  *
1580  * @TODO: This constraint can still produce a transition loop if the
1581  * resource has a stop scheduled on the node being unfenced, and
1582  * there is a user ordering constraint to start some other resource
1583  * (which will be ordered after the unfence) before stopping this
1584  * resource. An example is "start some slow-starting cloned service
1585  * before stopping an associated virtual IP that may be moving to
1586  * it":
1587  * stop this -> unfencing -> start that -> stop this
1588  */
1589  pcmk__new_ordering(rsc, stop_key(rsc), NULL,
1590  NULL, strdup(unfence->uuid), unfence,
1592 
1593  pcmk__new_ordering(NULL, strdup(unfence->uuid), unfence,
1594  rsc, start_key(rsc), NULL,
1596  data_set);
1597  }
1598  }
1599 
1600  if (check_utilization) {
1601  pcmk__create_utilization_constraints(rsc, allowed_nodes);
1602  }
1603 
1604  if (rsc->container) {
1605  pe_resource_t *remote_rsc = NULL;
1606 
1607  if (rsc->is_remote_node) {
1608  // rsc is the implicit remote connection for a guest or bundle node
1609 
1610  /* Do not allow a guest resource to live on a Pacemaker Remote node,
1611  * to avoid nesting remotes. However, allow bundles to run on remote
1612  * nodes.
1613  */
1615  rsc_avoids_remote_nodes(rsc->container);
1616  }
1617 
1618  /* If someone cleans up a guest or bundle node's container, we will
1619  * likely schedule a (re-)probe of the container and recovery of the
1620  * connection. Order the connection stop after the container probe,
1621  * so that if we detect the container running, we will trigger a new
1622  * transition and avoid the unnecessary recovery.
1623  */
1626 
1627  /* A user can specify that a resource must start on a Pacemaker Remote
1628  * node by explicitly configuring it with the container=NODENAME
1629  * meta-attribute. This is of questionable merit, since location
1630  * constraints can accomplish the same thing. But we support it, so here
1631  * we check whether a resource (that is not itself a remote connection)
1632  * has container set to a remote node or guest node resource.
1633  */
1634  } else if (rsc->container->is_remote_node) {
1635  remote_rsc = rsc->container;
1636  } else {
1638  rsc->container);
1639  }
1640 
1641  if (remote_rsc) {
1642  /* Force the resource on the Pacemaker Remote node instead of
1643  * colocating the resource with the container resource.
1644  */
1645  for (GList *item = allowed_nodes; item; item = item->next) {
1646  pe_node_t *node = item->data;
1647 
1648  if (node->details->remote_rsc != remote_rsc) {
1649  node->weight = -INFINITY;
1650  }
1651  }
1652 
1653  } else {
1654  /* This resource is either a filler for a container that does NOT
1655  * represent a Pacemaker Remote node, or a Pacemaker Remote
1656  * connection resource for a guest node or bundle.
1657  */
1658  int score;
1659 
1660  crm_trace("Order and colocate %s relative to its container %s",
1661  rsc->id, rsc->container->id);
1662 
1664  pcmk__op_key(rsc->container->id, RSC_START, 0),
1665  NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
1666  NULL,
1668  data_set);
1669 
1670  pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1671  rsc->container,
1672  pcmk__op_key(rsc->container->id, RSC_STOP, 0),
1674 
1676  score = 10000; /* Highly preferred but not essential */
1677  } else {
1678  score = INFINITY; /* Force them to run on the same host */
1679  }
1680  pcmk__new_colocation("resource-with-container", NULL, score, rsc,
1681  rsc->container, NULL, NULL, true, data_set);
1682  }
1683  }
1684 
1685  if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
1686  /* don't allow remote nodes to run stonith devices
1687  * or remote connection resources.*/
1688  rsc_avoids_remote_nodes(rsc);
1689  }
1690  g_list_free(allowed_nodes);
1691 }
1692 
1693 void
1695  pcmk__colocation_t *constraint,
1697 {
1698  if (dependent == NULL) {
1699  pe_err("dependent was NULL for %s", constraint->id);
1700  return;
1701 
1702  } else if (constraint->primary == NULL) {
1703  pe_err("primary was NULL for %s", constraint->id);
1704  return;
1705  }
1706 
1707  pe_rsc_trace(dependent,
1708  "Processing colocation constraint between %s and %s",
1709  dependent->id, primary->id);
1710 
1711  primary->cmds->rsc_colocation_rh(dependent, primary, constraint, data_set);
1712 }
1713 
1714 void
1716  pcmk__colocation_t *constraint,
1718 {
1719  enum pcmk__coloc_affects filter_results;
1720 
1721  CRM_ASSERT((dependent != NULL) && (primary != NULL));
1722  filter_results = pcmk__colocation_affects(dependent, primary, constraint,
1723  false);
1724  pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
1725  ((constraint->score > 0)? "Colocating" : "Anti-colocating"),
1726  dependent->id, primary->id, constraint->id, constraint->score,
1727  filter_results);
1728 
1729  switch (filter_results) {
1731  pcmk__apply_coloc_to_priority(dependent, primary, constraint);
1732  break;
1734  pcmk__apply_coloc_to_weights(dependent, primary, constraint);
1735  break;
1737  default:
1738  return;
1739  }
1740 }
1741 
1742 enum pe_action_flags
1744 {
1745  return action->flags;
1746 }
1747 
1748 static inline bool
1749 is_primitive_action(pe_action_t *action)
1750 {
1751  return action && action->rsc && (action->rsc->variant == pe_native);
1752 }
1753 
1762 #define clear_action_flag_because(action, flag, reason) do { \
1763  if (pcmk_is_set((action)->flags, (flag))) { \
1764  pe__clear_action_flags(action, flag); \
1765  if ((action)->rsc != (reason)->rsc) { \
1766  char *reason_text = pe__action2reason((reason), (flag)); \
1767  pe_action_set_reason((action), reason_text, \
1768  ((flag) == pe_action_migrate_runnable)); \
1769  free(reason_text); \
1770  } \
1771  } \
1772  } while (0)
1773 
1785 static void
1786 handle_restart_ordering(pe_action_t *first, pe_action_t *then,
1787  enum pe_action_flags filter)
1788 {
1789  const char *reason = NULL;
1790 
1791  CRM_ASSERT(is_primitive_action(first));
1792  CRM_ASSERT(is_primitive_action(then));
1793 
1794  // We need to update the action in two cases:
1795 
1796  // ... if 'then' is required
1797  if (pcmk_is_set(filter, pe_action_optional)
1798  && !pcmk_is_set(then->flags, pe_action_optional)) {
1799  reason = "restart";
1800  }
1801 
1802  /* ... if 'then' is unrunnable action on same resource (if a resource
1803  * should restart but can't start, we still want to stop)
1804  */
1805  if (pcmk_is_set(filter, pe_action_runnable)
1807  && pcmk_is_set(then->rsc->flags, pe_rsc_managed)
1808  && (first->rsc == then->rsc)) {
1809  reason = "stop";
1810  }
1811 
1812  if (reason == NULL) {
1813  return;
1814  }
1815 
1816  pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
1817  first->uuid, then->uuid, reason);
1818 
1819  // Make 'first' required if it is runnable
1820  if (pcmk_is_set(first->flags, pe_action_runnable)) {
1822  }
1823 
1824  // Make 'first' required if 'then' is required
1825  if (!pcmk_is_set(then->flags, pe_action_optional)) {
1827  }
1828 
1829  // Make 'first' unmigratable if 'then' is unmigratable
1832  }
1833 
1834  // Make 'then' unrunnable if 'first' is required but unrunnable
1835  if (!pcmk_is_set(first->flags, pe_action_optional)
1836  && !pcmk_is_set(first->flags, pe_action_runnable)) {
1838  }
1839 }
1840 
1841 /* \param[in] flags Flags from action_flags_for_ordering()
1842  */
1843 enum pe_graph_flags
1845  enum pe_action_flags flags, enum pe_action_flags filter,
1847 {
1848  enum pe_graph_flags changed = pe_graph_none;
1849  enum pe_action_flags then_flags = then->flags;
1850  enum pe_action_flags first_flags = first->flags;
1851 
1852  if (type & pe_order_asymmetrical) {
1853  pe_resource_t *then_rsc = then->rsc;
1854  enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
1855 
1856  if (!then_rsc) {
1857  /* ignore */
1858  } else if ((then_rsc_role == RSC_ROLE_STOPPED) && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_casei)) {
1859  /* ignore... if 'then' is supposed to be stopped after 'first', but
1860  * then is already stopped, there is nothing to be done when non-symmetrical. */
1861  } else if ((then_rsc_role >= RSC_ROLE_STARTED)
1862  && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)
1864  && then->node
1865  && pcmk__list_of_1(then_rsc->running_on)
1866  && then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) {
1867  /* Ignore. If 'then' is supposed to be started after 'first', but
1868  * 'then' is already started, there is nothing to be done when
1869  * asymmetrical -- unless the start is mandatory, which indicates
1870  * the resource is restarting, and the ordering is still needed.
1871  */
1872  } else if (!(first->flags & pe_action_runnable)) {
1873  /* prevent 'then' action from happening if 'first' is not runnable and
1874  * 'then' has not yet occurred. */
1877  } else {
1878  /* ignore... then is allowed to start/stop if it wants to. */
1879  }
1880  }
1881 
1883  && !pcmk_is_set(then_flags, pe_action_optional)) {
1884  // Then is required, and implies first should be, too
1885 
1886  if (pcmk_is_set(filter, pe_action_optional)
1888  && pcmk_is_set(first_flags, pe_action_optional)) {
1890  }
1891 
1895  }
1896  }
1897 
1899  if ((filter & pe_action_optional) &&
1900  ((then->flags & pe_action_optional) == FALSE) &&
1901  (then->rsc != NULL) && (then->rsc->role == RSC_ROLE_PROMOTED)) {
1902 
1904 
1908  then);
1909  }
1910  }
1911  }
1912 
1914  && pcmk_is_set(filter, pe_action_optional)) {
1915 
1916  if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
1917  ((then->flags & pe_action_runnable) == FALSE)) {
1919  }
1920 
1921  if ((then->flags & pe_action_optional) == 0) {
1923  }
1924  }
1925 
1926  if ((type & pe_order_pseudo_left)
1927  && pcmk_is_set(filter, pe_action_optional)) {
1928 
1929  if ((first->flags & pe_action_runnable) == FALSE) {
1932  }
1933  }
1934 
1936  && pcmk_is_set(filter, pe_action_runnable)
1939 
1942  }
1943 
1945  && pcmk_is_set(filter, pe_action_optional)
1949 
1951  }
1952 
1954  handle_restart_ordering(first, then, filter);
1955  }
1956 
1957  if (then_flags != then->flags) {
1958  pe__set_graph_flags(changed, first, pe_graph_updated_then);
1959  pe_rsc_trace(then->rsc,
1960  "%s on %s: flags are now %#.6x (was %#.6x) "
1961  "because of 'first' %s (%#.6x)",
1962  then->uuid,
1963  then->node? then->node->details->uname : "no node",
1964  then->flags, then_flags, first->uuid, first->flags);
1965 
1966  if(then->rsc && then->rsc->parent) {
1967  /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
1969  }
1970  }
1971 
1972  if (first_flags != first->flags) {
1974  pe_rsc_trace(first->rsc,
1975  "%s on %s: flags are now %#.6x (was %#.6x) "
1976  "because of 'then' %s (%#.6x)",
1977  first->uuid,
1978  first->node? first->node->details->uname : "no node",
1979  first->flags, first_flags, then->uuid, then->flags);
1980  }
1981 
1982  return changed;
1983 }
1984 
1985 void
1987 {
1988  pcmk__apply_location(constraint, rsc);
1989 }
1990 
1991 void
1993 {
1994  GList *gIter = NULL;
1995 
1996  CRM_ASSERT(rsc);
1997  pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
1998 
1999  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2000  pe_action_t *action = (pe_action_t *) gIter->data;
2001 
2002  crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
2004  }
2005 
2006  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2007  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2008 
2009  child_rsc->cmds->expand(child_rsc, data_set);
2010  }
2011 }
2012 
2026 static bool
2027 is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
2028 {
2029  return pcmk_all_flags_set(rsc->flags,
2031  && (rsc->next_role > RSC_ROLE_STOPPED)
2032  && (rsc->allocated_to != NULL) && (node != NULL)
2033  && (rsc->allocated_to->details == node->details);
2034 }
2035 
2036 gboolean
2037 StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2038 {
2039  GList *gIter = NULL;
2040 
2041  CRM_ASSERT(rsc);
2042 
2043  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2044  pe_node_t *current = (pe_node_t *) gIter->data;
2045  pe_action_t *stop;
2046 
2047  if (is_expected_node(rsc, current)) {
2048  /* We are scheduling restart actions for a multiply active resource
2049  * with multiple-active=stop_unexpected, and this is where it should
2050  * not be stopped.
2051  */
2052  pe_rsc_trace(rsc,
2053  "Skipping stop of multiply active resource %s "
2054  "on expected node %s",
2055  rsc->id, current->details->uname);
2056  continue;
2057  }
2058 
2059  if (rsc->partial_migration_target) {
2060  if (rsc->partial_migration_target->details == current->details) {
2061  pe_rsc_trace(rsc,
2062  "Skipping stop of %s on %s "
2063  "because migration to %s in progress",
2064  rsc->id, current->details->uname,
2065  next->details->uname);
2066  continue;
2067  } else {
2068  pe_rsc_trace(rsc,
2069  "Forcing stop of %s on %s "
2070  "because migration target changed",
2071  rsc->id, current->details->uname);
2072  optional = FALSE;
2073  }
2074  }
2075 
2076  pe_rsc_trace(rsc, "Scheduling stop of %s on %s",
2077  rsc->id, current->details->uname);
2078  stop = stop_action(rsc, current, optional);
2079 
2080  if(rsc->allocated_to == NULL) {
2081  pe_action_set_reason(stop, "node availability", TRUE);
2082  } else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting
2084  /* We are stopping a multiply active resource on a node that is
2085  * not its expected node, and we are still scheduling restart
2086  * actions, so the stop is for being multiply active.
2087  */
2088  pe_action_set_reason(stop, "being multiply active", TRUE);
2089  }
2090 
2091  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
2093  }
2094 
2096  DeleteRsc(rsc, current, optional, data_set);
2097  }
2098 
2100  pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set);
2101 
2102  order_actions(stop, unfence, pe_order_implies_first);
2103  if (!pcmk__node_unfenced(current)) {
2104  pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
2105  }
2106  }
2107  }
2108 
2109  return TRUE;
2110 }
2111 
2112 gboolean
2113 StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2114 {
2115  pe_action_t *start = NULL;
2116 
2117  CRM_ASSERT(rsc);
2118 
2119  pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (weight=%d)",
2120  (optional? "optional" : "required"), rsc->id,
2121  ((next == NULL)? "N/A" : next->details->uname),
2122  ((next == NULL)? 0 : next->weight));
2123  start = start_action(rsc, next, TRUE);
2124 
2126 
2127  if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
2129  }
2130 
2131  if (is_expected_node(rsc, next)) {
2132  /* This could be a problem if the start becomes necessary for other
2133  * reasons later.
2134  */
2135  pe_rsc_trace(rsc,
2136  "Start of multiply active resouce %s "
2137  "on expected node %s will be a pseudo-action",
2138  rsc->id, next->details->uname);
2140  }
2141 
2142  return TRUE;
2143 }
2144 
2145 gboolean
2146 PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2147 {
2148  GList *gIter = NULL;
2149  gboolean runnable = TRUE;
2150  GList *action_list = NULL;
2151 
2152  CRM_ASSERT(rsc);
2153  CRM_CHECK(next != NULL, return FALSE);
2154 
2155  pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
2156 
2157  action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
2158 
2159  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2160  pe_action_t *start = (pe_action_t *) gIter->data;
2161 
2162  if (!pcmk_is_set(start->flags, pe_action_runnable)) {
2163  runnable = FALSE;
2164  }
2165  }
2166  g_list_free(action_list);
2167 
2168  if (runnable) {
2169  pe_action_t *promote = promote_action(rsc, next, optional);
2170 
2171  if (is_expected_node(rsc, next)) {
2172  /* This could be a problem if the promote becomes necessary for
2173  * other reasons later.
2174  */
2175  pe_rsc_trace(rsc,
2176  "Promotion of multiply active resouce %s "
2177  "on expected node %s will be a pseudo-action",
2178  rsc->id, next->details->uname);
2180  }
2181 
2182  return TRUE;
2183  }
2184 
2185  pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
2186 
2187  action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
2188 
2189  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2190  pe_action_t *promote = (pe_action_t *) gIter->data;
2191 
2193  }
2194 
2195  g_list_free(action_list);
2196  return TRUE;
2197 }
2198 
2199 gboolean
2200 DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2201 {
2202  GList *gIter = NULL;
2203 
2204  CRM_ASSERT(rsc);
2205 
2206  if (is_expected_node(rsc, next)) {
2207  pe_rsc_trace(rsc,
2208  "Skipping demote of multiply active resource %s "
2209  "on expected node %s",
2210  rsc->id, next->details->uname);
2211  return TRUE;
2212  }
2213 
2214  pe_rsc_trace(rsc, "%s", rsc->id);
2215 
2216  /* CRM_CHECK(rsc->next_role == RSC_ROLE_UNPROMOTED, return FALSE); */
2217  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2218  pe_node_t *current = (pe_node_t *) gIter->data;
2219 
2220  pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
2221  demote_action(rsc, current, optional);
2222  }
2223  return TRUE;
2224 }
2225 
2226 gboolean
2227 RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2228 {
2229  CRM_ASSERT(rsc);
2230  crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
2231  CRM_CHECK(FALSE, return FALSE);
2232  return FALSE;
2233 }
2234 
2235 gboolean
2236 NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2237 {
2238  CRM_ASSERT(rsc);
2239  pe_rsc_trace(rsc, "%s", rsc->id);
2240  return FALSE;
2241 }
2242 
2243 gboolean
2244 DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
2245 {
2246  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
2247  pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
2248  return FALSE;
2249 
2250  } else if (node == NULL) {
2251  pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
2252  return FALSE;
2253 
2254  } else if (node->details->unclean || node->details->online == FALSE) {
2255  pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
2256  node->details->uname);
2257  return FALSE;
2258  }
2259 
2260  crm_notice("Removing %s from %s", rsc->id, node->details->uname);
2261 
2262  delete_action(rsc, node, optional);
2263 
2266  data_set);
2267 
2270  data_set);
2271 
2272  return TRUE;
2273 }
2274 
2275 gboolean
2277  gboolean force, pe_working_set_t * data_set)
2278 {
2280  char *key = NULL;
2281  pe_action_t *probe = NULL;
2282  pe_node_t *running = NULL;
2283  pe_node_t *allowed = NULL;
2284  pe_resource_t *top = uber_parent(rsc);
2285 
2286  static const char *rc_promoted = NULL;
2287  static const char *rc_inactive = NULL;
2288 
2289  if (rc_inactive == NULL) {
2290  rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
2291  rc_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
2292  }
2293 
2294  CRM_CHECK(node != NULL, return FALSE);
2295  if (!force && !pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
2296  pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
2297  return FALSE;
2298  }
2299 
2300  if (pe__is_guest_or_remote_node(node)) {
2301  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2302 
2303  if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
2304  pe_rsc_trace(rsc,
2305  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
2306  rsc->id, node->details->id);
2307  return FALSE;
2308  } else if (pe__is_guest_node(node)
2310  pe_rsc_trace(rsc,
2311  "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
2312  rsc->id, node->details->id);
2313  return FALSE;
2314  } else if (rsc->is_remote_node) {
2315  pe_rsc_trace(rsc,
2316  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
2317  rsc->id, node->details->id);
2318  return FALSE;
2319  }
2320  }
2321 
2322  if (rsc->children) {
2323  GList *gIter = NULL;
2324  gboolean any_created = FALSE;
2325 
2326  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2327  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2328 
2329  any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
2330  || any_created;
2331  }
2332 
2333  return any_created;
2334 
2335  } else if ((rsc->container) && (!rsc->is_remote_node)) {
2336  pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
2337  return FALSE;
2338  }
2339 
2340  if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
2341  pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
2342  return FALSE;
2343  }
2344 
2345  // Check whether resource is already known on node
2346  if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
2347  pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
2348  return FALSE;
2349  }
2350 
2351  allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2352 
2353  if (rsc->exclusive_discover || top->exclusive_discover) {
2354  if (allowed == NULL) {
2355  /* exclusive discover is enabled and this node is not in the allowed list. */
2356  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
2357  return FALSE;
2358  } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
2359  /* exclusive discover is enabled and this node is not marked
2360  * as a node this resource should be discovered on */
2361  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
2362  return FALSE;
2363  }
2364  }
2365 
2366  if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
2367  /* If this node was allowed to host this resource it would
2368  * have been explicitly added to the 'allowed_nodes' list.
2369  * However it wasn't and the node has discovery disabled, so
2370  * no need to probe for this resource.
2371  */
2372  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
2373  return FALSE;
2374  }
2375 
2376  if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
2377  /* this resource is marked as not needing to be discovered on this node */
2378  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
2379  return FALSE;
2380  }
2381 
2382  if (pe__is_guest_node(node)) {
2383  pe_resource_t *remote = node->details->remote_rsc->container;
2384 
2385  if(remote->role == RSC_ROLE_STOPPED) {
2386  /* If the container is stopped, then we know anything that
2387  * might have been inside it is also stopped and there is
2388  * no need to probe.
2389  *
2390  * If we don't know the container's state on the target
2391  * either:
2392  *
2393  * - the container is running, the transition will abort
2394  * and we'll end up in a different case next time, or
2395  *
2396  * - the container is stopped
2397  *
2398  * Either way there is no need to probe.
2399  *
2400  */
2401  if(remote->allocated_to
2402  && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
2403  /* For safety, we order the 'rsc' start after 'remote'
2404  * has been probed.
2405  *
2406  * Using 'top' helps for groups, but we may need to
2407  * follow the start's ordering chain backwards.
2408  */
2409  pcmk__new_ordering(remote,
2410  pcmk__op_key(remote->id, RSC_STATUS, 0),
2411  NULL, top,
2412  pcmk__op_key(top->id, RSC_START, 0), NULL,
2414  }
2415  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
2416  rsc->id, node->details->id, remote->id);
2417  return FALSE;
2418 
2419  /* Here we really we want to check if remote->stop is required,
2420  * but that information doesn't exist yet
2421  */
2422  } else if(node->details->remote_requires_reset
2423  || node->details->unclean
2424  || pcmk_is_set(remote->flags, pe_rsc_failed)
2425  || remote->next_role == RSC_ROLE_STOPPED
2426  || (remote->allocated_to
2427  && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
2428  ) {
2429  /* The container is stopping or restarting, don't start
2430  * 'rsc' until 'remote' stops as this also implies that
2431  * 'rsc' is stopped - avoiding the need to probe
2432  */
2433  pcmk__new_ordering(remote, pcmk__op_key(remote->id, RSC_STOP, 0),
2434  NULL, top, pcmk__op_key(top->id, RSC_START, 0),
2435  NULL, pe_order_optional, data_set);
2436  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
2437  rsc->id, node->details->id, remote->id);
2438  return FALSE;
2439 /* } else {
2440  * The container is running so there is no problem probing it
2441  */
2442  }
2443  }
2444 
2445  key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
2446  probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
2448 
2450 
2451  /*
2452  * We need to know if it's running_on (not just known_on) this node
2453  * to correctly determine the target rc.
2454  */
2455  running = pe_find_node_id(rsc->running_on, node->details->id);
2456  if (running == NULL) {
2457  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
2458 
2459  } else if (rsc->role == RSC_ROLE_PROMOTED) {
2460  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_promoted);
2461  }
2462 
2463  crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
2465 
2466  if (pcmk__is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
2467  top = rsc;
2468  } else {
2469  crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
2470  }
2471 
2472  if (!pcmk_is_set(probe->flags, pe_action_runnable)
2473  && (rsc->running_on == NULL)) {
2474  /* Prevent the start from occurring if rsc isn't active, but
2475  * don't cause it to stop if it was active already
2476  */
2478  }
2479 
2480  pcmk__new_ordering(rsc, NULL, probe, top,
2481  pcmk__op_key(top->id, RSC_START, 0), NULL, flags,
2482  data_set);
2483 
2484  // Order the probe before any agent reload
2485  pcmk__new_ordering(rsc, NULL, probe, top, reload_key(rsc), NULL,
2487 
2488  return TRUE;
2489 }
2490 
2491 void
2492 native_append_meta(pe_resource_t * rsc, xmlNode * xml)
2493 {
2494  char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
2496 
2497  if (value) {
2498  char *name = NULL;
2499 
2501  crm_xml_add(xml, name, value);
2502  free(name);
2503  }
2504 
2505  value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
2506  if (value) {
2507  char *name = NULL;
2508 
2510  crm_xml_add(xml, name, value);
2511  free(name);
2512  }
2513 
2514  for (parent = rsc; parent != NULL; parent = parent->parent) {
2515  if (parent->container) {
2516  crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id);
2517  }
2518  }
2519 }
2520 
2521 // Primitive implementation of resource_alloc_functions_t:add_utilization()
2522 void
2524  GList *all_rscs, GHashTable *utilization)
2525 {
2526  if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
2527  return;
2528  }
2529 
2530  pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization",
2531  orig_rsc->id, rsc->id);
2532  pcmk__release_node_capacity(utilization, rsc);
2533 }
2534 
2544 static time_t
2545 shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
2546 {
2547  const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
2548  time_t result = 0;
2549 
2550  if (shutdown != NULL) {
2551  long long result_ll;
2552 
2553  if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
2554  result = (time_t) result_ll;
2555  }
2556  }
2557  return (result == 0)? get_effective_time(data_set) : result;
2558 }
2559 
2560 // Primitive implementation of resource_alloc_functions_t:shutdown_lock()
2561 void
2563 {
2564  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2565 
2566  // Fence devices and remote connections can't be locked
2567  if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
2568  || pe__resource_is_remote_conn(rsc, rsc->cluster)) {
2569  return;
2570  }
2571 
2572  if (rsc->lock_node != NULL) {
2573  // The lock was obtained from resource history
2574 
2575  if (rsc->running_on != NULL) {
2576  /* The resource was started elsewhere even though it is now
2577  * considered locked. This shouldn't be possible, but as a
2578  * failsafe, we don't want to disturb the resource now.
2579  */
2580  pe_rsc_info(rsc,
2581  "Cancelling shutdown lock because %s is already active",
2582  rsc->id);
2583  pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster);
2584  rsc->lock_node = NULL;
2585  rsc->lock_time = 0;
2586  }
2587 
2588  // Only a resource active on exactly one node can be locked
2589  } else if (pcmk__list_of_1(rsc->running_on)) {
2590  pe_node_t *node = rsc->running_on->data;
2591 
2592  if (node->details->shutdown) {
2593  if (node->details->unclean) {
2594  pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
2595  rsc->id, node->details->uname);
2596  } else {
2597  rsc->lock_node = node;
2598  rsc->lock_time = shutdown_time(node, rsc->cluster);
2599  }
2600  }
2601  }
2602 
2603  if (rsc->lock_node == NULL) {
2604  // No lock needed
2605  return;
2606  }
2607 
2608  if (rsc->cluster->shutdown_lock > 0) {
2609  time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock;
2610 
2611  pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
2612  rsc->id, rsc->lock_node->details->uname,
2613  (long long) lock_expiration);
2614  pe__update_recheck_time(++lock_expiration, rsc->cluster);
2615  } else {
2616  pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
2617  rsc->id, rsc->lock_node->details->uname);
2618  }
2619 
2620  // If resource is locked to one node, ban it from all other nodes
2621  for (GList *item = rsc->cluster->nodes; item != NULL; item = item->next) {
2622  pe_node_t *node = item->data;
2623 
2624  if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
2626  XML_CONFIG_ATTR_SHUTDOWN_LOCK, rsc->cluster);
2627  }
2628  }
2629 }
Services API.
#define LOG_TRACE
Definition: logging.h:37
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:226
pe_node_t * pe_find_node(GList *node_list, const char *uname)
Definition: status.c:443
enum rsc_start_requirement needs
Definition: pe_types.h:434
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:156
#define RSC_STOP
Definition: crm.h:204
void native_rsc_colocation_rh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
#define crm_notice(fmt, args...)
Definition: logging.h:360
GHashTable * known_on
Definition: pe_types.h:381
xmlNode * ops_xml
Definition: pe_types.h:340
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition: remote.c:41
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:21
gboolean unseen
Definition: pe_types.h:225
#define XML_CONFIG_ATTR_SHUTDOWN_LOCK
Definition: msg_xml.h:393
#define INFINITY
Definition: crm.h:99
GList * rsc_cons
Definition: pe_types.h:371
Service active and promoted.
Definition: results.h:170
#define pe__set_action_flags(action, flags_to_set)
Definition: internal.h:61
#define pe__show_node_weights(level, rsc, text, nodes, data_set)
Definition: internal.h:355
pcmk__coloc_affects
#define promote_action(rsc, node, optional)
Definition: internal.h:395
G_GNUC_INTERNAL bool pcmk__node_unfenced(pe_node_t *node)
#define stop_action(rsc, node, optional)
Definition: internal.h:379
pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: complex.c:1009
pe_resource_t * container
Definition: pe_types.h:394
const char * name
Definition: cib.c:24
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:931
pe_node_t * partial_migration_source
Definition: pe_types.h:379
G_GNUC_INTERNAL bool pcmk__node_available(const pe_node_t *node)
void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
Definition: utils.c:2393
enum rsc_role_e role
Definition: pe_types.h:384
G_GNUC_INTERNAL bool pcmk__any_node_available(GHashTable *nodes)
gboolean PromoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
GList * children
Definition: pe_types.h:391
resource_alloc_functions_t * cmds
Definition: pe_types.h:348
#define pe_rsc_stop
Definition: pe_types.h:271
#define delete_action(rsc, node, optional)
Definition: internal.h:369
#define pe_flag_remove_after_stop
Definition: pe_types.h:111
G_GNUC_INTERNAL void pcmk__new_ordering(pe_resource_t *lh_rsc, char *lh_task, pe_action_t *lh_action, pe_resource_t *rh_rsc, char *rh_task, pe_action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
pe_resource_t * rsc
Definition: pe_types.h:424
#define XML_RSC_ATTR_INCARNATION
Definition: msg_xml.h:231
enum rsc_role_e next_role
Definition: pe_types.h:385
#define INFINITY_HACK
gboolean exclusive_discover
Definition: pe_types.h:366
#define reload_key(rsc)
Definition: internal.h:383
#define pcmk__config_err(fmt...)
gboolean StartRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_resource_t * remote_rsc
Definition: pe_types.h:237
GHashTable * meta
Definition: pe_types.h:387
#define pe_rsc_unique
Definition: pe_types.h:262
#define pe_rsc_restarting
Definition: pe_types.h:269
char * score2char_stack(int score, char *buf, size_t len)
Convert an integer score to a string, using a provided buffer.
Definition: scores.c:85
Service safely stopped.
Definition: results.h:169
resource_object_functions_t * fns
Definition: pe_types.h:347
#define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear)
#define XML_LRM_ATTR_INTERVAL
Definition: msg_xml.h:297
#define RSC_DELETE
Definition: crm.h:195
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:323
pe_resource_t * dependent
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
time_t get_effective_time(pe_working_set_t *data_set)
Definition: utils.c:1853
void resource_location(pe_resource_t *rsc, pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1692
GList * rsc_cons_lhs
Definition: pe_types.h:370
enum crm_ais_msg_types type
Definition: cpg.c:48
#define demote_key(rsc)
Definition: internal.h:404
pe_node_t * partial_migration_target
Definition: pe_types.h:378
#define RSC_START
Definition: crm.h:201
pe_node_t *(* allocate)(pe_resource_t *, pe_node_t *, pe_working_set_t *)
#define pe_rsc_stop_unexpected
Definition: pe_types.h:286
gboolean RoleError(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_node_t * allocated_to
Definition: pe_types.h:377
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:273
#define pe_flag_have_quorum
Definition: pe_types.h:95
#define CRM_SCORE_INFINITY
Definition: crm.h:85
G_GNUC_INTERNAL void pcmk__apply_coloc_to_priority(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint)
#define pe_proc_err(fmt...)
Definition: internal.h:34
gboolean remote_requires_reset
Definition: pe_types.h:231
#define RSC_MIGRATE
Definition: crm.h:198
char * crm_meta_name(const char *field)
Definition: utils.c:439
const char * action
Definition: pcmk_fence.c:29
G_GNUC_INTERNAL void pcmk__new_colocation(const char *id, const char *node_attr, int score, pe_resource_t *dependent, pe_resource_t *primary, const char *dependent_role, const char *primary_role, bool influence, pe_working_set_t *data_set)
#define pe__set_resource_flags(resource, flags_to_set)
Definition: internal.h:49
GList * nodes
Definition: pe_types.h:164
G_GNUC_INTERNAL void pcmk__create_utilization_constraints(pe_resource_t *rsc, GList *allowed_nodes)
#define clear_action_flag_because(action, flag, reason)
#define pe_flag_stop_everything
Definition: pe_types.h:106
G_GNUC_INTERNAL pe_action_t * pcmk__new_cancel_action(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node)
#define demote_action(rsc, node, optional)
Definition: internal.h:405
#define pe_rsc_provisional
Definition: pe_types.h:266
const char * role2text(enum rsc_role_e role)
Definition: common.c:454
int weight
Definition: pe_types.h:249
#define pe_rsc_merging
Definition: pe_types.h:268
int pcmk__scan_ll(const char *text, long long *result, long long default_value)
Definition: strings.c:97
GList * dangling_migrations
Definition: pe_types.h:392
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: utils.c:2349
gboolean(* create_probe)(pe_resource_t *, pe_node_t *, pe_action_t *, gboolean, pe_working_set_t *)
gboolean DemoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_action_flags
Definition: pe_types.h:305
#define pe_rsc_allow_migrate
Definition: pe_types.h:287
#define pe_rsc_failed
Definition: pe_types.h:276
#define crm_debug(fmt, args...)
Definition: logging.h:363
void native_expand(pe_resource_t *rsc, pe_working_set_t *data_set)
gboolean(* rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
G_GNUC_INTERNAL GList * pcmk__sort_nodes(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set)
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:913
#define XML_CIB_ATTR_SHUTDOWN
Definition: msg_xml.h:289
pe_resource_t * pe__resource_contains_guest_node(const pe_working_set_t *data_set, const pe_resource_t *rsc)
Definition: remote.c:66
#define XML_RSC_ATTR_CONTAINER
Definition: msg_xml.h:247
void native_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:529
bool pe__is_guest_node(const pe_node_t *node)
Definition: remote.c:33
void(* rsc_colocation_lh)(pe_resource_t *, pe_resource_t *, pcmk__colocation_t *, pe_working_set_t *)
pe_resource_t * primary
#define stop_key(rsc)
Definition: internal.h:378
void native_append_meta(pe_resource_t *rsc, xmlNode *xml)
#define pe_rsc_start_pending
Definition: pe_types.h:278
char * task
Definition: pe_types.h:428
#define pe__clear_action_flags(action, flags_to_clear)
Definition: internal.h:70
#define CRM_ATTR_UNAME
Definition: crm.h:114
#define crm_trace(fmt, args...)
Definition: logging.h:364
#define do_crm_log(level, fmt, args...)
Log a message.
Definition: logging.h:167
#define promote_key(rsc)
Definition: internal.h:394
GHashTable * meta
Definition: pe_types.h:438
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:122
struct pe_node_shared_s * details
Definition: pe_types.h:252
enum rsc_recovery_type recovery_type
Definition: pe_types.h:350
pe_node_t * node
Definition: pe_types.h:425
#define pe_rsc_needs_fencing
Definition: pe_types.h:294
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1906
unsigned long long flags
Definition: pe_types.h:362
const char * uname
Definition: pe_types.h:216
#define pe_rsc_promotable
Definition: pe_types.h:264
void(* expand)(pe_resource_t *, pe_working_set_t *)
pe_working_set_t * data_set
#define pcmk__order_resource_actions(lh_rsc, lh_task, rh_rsc, rh_task, flags, data_set)
xmlNode * find_rsc_op_entry(pe_resource_t *rsc, const char *key)
Definition: utils.c:1440
bool pcmk__is_daemon
Definition: logging.c:47
#define pe_flag_stonith_enabled
Definition: pe_types.h:99
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
Definition: common.c:560
#define pe__set_graph_flags(graph_flags, gr_action, flags_to_set)
Definition: internal.h:127
GList * actions
Definition: pe_types.h:373
pe_graph_flags
Definition: pe_types.h:297
GHashTable * utilization
Definition: pe_types.h:389
#define XML_RSC_ATTR_TARGET_ROLE
Definition: msg_xml.h:239
#define XML_LRM_ATTR_MIGRATE_TARGET
Definition: msg_xml.h:329
#define XML_RSC_ATTR_REMOTE_NODE
Definition: msg_xml.h:250
char * uuid
Definition: pe_types.h:429
void(* rsc_colocation_rh)(pe_resource_t *, pe_resource_t *, pcmk__colocation_t *, pe_working_set_t *)
enum pe_action_flags(* action_flags)(pe_action_t *, pe_node_t *)
#define pe_rsc_allocating
Definition: pe_types.h:267
enum rsc_role_e text2role(const char *role)
Definition: common.c:483
enum pe_obj_types variant
Definition: pe_types.h:345
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
const char * placement_strategy
Definition: pe_types.h:151
int rsc_discover_mode
Definition: pe_types.h:253
const char * id
Definition: pe_types.h:215
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:45
G_GNUC_INTERNAL void pcmk__add_action_to_graph(pe_action_t *action, pe_working_set_t *data_set)
#define pe_rsc_fence_device
Definition: pe_types.h:263
pe_node_t * pe_find_node_id(GList *node_list, const char *id)
Definition: status.c:427
void native_rsc_colocation_lh(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
#define PCMK_RESOURCE_CLASS_STONITH
Definition: services.h:45
enum pe_action_flags native_action_flags(pe_action_t *action, pe_node_t *node)
enum rsc_role_e(* state)(const pe_resource_t *, gboolean)
Definition: pe_types.h:54
G_GNUC_INTERNAL void pcmk__update_action_for_orderings(pe_action_t *action, pe_working_set_t *data_set)
gboolean is_remote_node
Definition: pe_types.h:365
pcmk__action_result_t result
Definition: pcmk_fence.c:34
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:504
G_GNUC_INTERNAL bool pcmk__assign_primitive(pe_resource_t *rsc, pe_node_t *chosen, bool force)
#define start_action(rsc, node, optional)
Definition: internal.h:385
G_GNUC_INTERNAL void pcmk__release_node_capacity(GHashTable *current_utilization, pe_resource_t *rsc)
G_GNUC_INTERNAL GHashTable * pcmk__copy_node_table(GHashTable *nodes)
#define CRM_META
Definition: crm.h:78
#define crm_err(fmt, args...)
Definition: logging.h:358
G_GNUC_INTERNAL void pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action, enum pe_ordering order, pe_working_set_t *data_set)
void pcmk__primitive_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc, GList *all_rscs, GHashTable *utilization)
#define CRM_ASSERT(expr)
Definition: results.h:42
#define RSC_STATUS
Definition: crm.h:215
char guint crm_parse_interval_spec(const char *input)
Parse milliseconds from a Pacemaker interval specification.
Definition: utils.c:242
G_GNUC_INTERNAL void pcmk__ban_insufficient_capacity(pe_resource_t *rsc, pe_node_t **prefer, pe_working_set_t *data_set)
#define RSC_PROMOTE
Definition: crm.h:207
void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
Definition: complex.c:1126
gboolean StopRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
void pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
pe_node_t * pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, pe_working_set_t *data_set)
#define pe_rsc_needs_unfencing
Definition: pe_types.h:295
void native_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
gboolean shutdown
Definition: pe_types.h:226
#define pe__clear_resource_flags(resource, flags_to_clear)
Definition: internal.h:55
#define crm_str(x)
Definition: logging.h:384
rsc_role_e
Possible roles that a resource can be in.
Definition: common.h:92
GList * running_on
Definition: pe_types.h:380
#define pe_rsc_block
Definition: pe_types.h:258
enum pe_action_flags flags
Definition: pe_types.h:433
gboolean maintenance
Definition: pe_types.h:229
#define pe_rsc_maintenance
Definition: pe_types.h:290
pe_working_set_t * cluster
Definition: pe_types.h:342
const char * node_attribute
int pcmk__add_scores(int score1, int score2)
Definition: scores.c:140
#define XML_OP_ATTR_PENDING
Definition: msg_xml.h:264
gboolean DeleteRsc(pe_resource_t *rsc, pe_node_t *node, gboolean optional, pe_working_set_t *data_set)
bool pe__resource_is_remote_conn(const pe_resource_t *rsc, const pe_working_set_t *data_set)
Definition: remote.c:17
#define pe_flag_have_stonith_resource
Definition: pe_types.h:100
#define RSC_ROLE_MAX
Definition: common.h:108
G_GNUC_INTERNAL enum pcmk__coloc_affects pcmk__colocation_affects(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint, bool preview)
GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1605
#define pe_flag_enable_unfencing
Definition: pe_types.h:101
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:22
G_GNUC_INTERNAL void pcmk__apply_coloc_to_weights(pe_resource_t *dependent, pe_resource_t *primary, pcmk__colocation_t *constraint)
#define pe__set_order_flags(order_flags, flags_to_set)
Definition: internal.h:113
#define start_key(rsc)
Definition: internal.h:384
#define ID(x)
Definition: msg_xml.h:460
unsigned long long flags
Definition: pe_types.h:153
#define pe_err(fmt...)
Definition: internal.h:24
const char * parent
Definition: cib.c:25
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1644
pe_action_t * pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
Definition: utils.c:2453
#define XML_LRM_ATTR_MIGRATE_SOURCE
Definition: msg_xml.h:328
gboolean native_create_probe(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set)
#define CRM_OP_LRM_DELETE
Definition: crm.h:151
gint sort_node_uname(gconstpointer a, gconstpointer b)
Definition: utils.c:217
gboolean unclean
Definition: pe_types.h:224
#define pe_flag_show_scores
Definition: pe_types.h:134
#define crm_info(fmt, args...)
Definition: logging.h:361
#define pe_rsc_managed
Definition: pe_types.h:257
#define pe_rsc_orphan
Definition: pe_types.h:256
pe_ordering
Definition: pe_types.h:497
gboolean online
Definition: pe_types.h:220
G_GNUC_INTERNAL void pcmk__apply_location(pe__location_t *constraint, pe_resource_t *rsc)
uint64_t flags
Definition: remote.c:149
#define XML_ATTR_TE_TARGET_RC
Definition: msg_xml.h:408
G_GNUC_INTERNAL bool pcmk__is_unfence_device(const pe_resource_t *rsc, const pe_working_set_t *data_set)
pe_resource_t * parent
Definition: pe_types.h:343
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
Definition: utils.c:2136
GHashTable * pcmk__native_merge_weights(pe_resource_t *rsc, const char *primary_id, GHashTable *nodes, const char *attr, float factor, uint32_t flags)
GHashTable *(* merge_weights)(pe_resource_t *, const char *, GHashTable *, const char *, float, enum pe_weights)
#define RSC_DEMOTE
Definition: crm.h:209
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:20
gboolean NullOp(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:272
char * id
Definition: pe_types.h:336
pe_action_t * custom_action(pe_resource_t *rsc, char *key, const char *task, pe_node_t *on_node, gboolean optional, gboolean foo, pe_working_set_t *data_set)
Create or update an action object.
Definition: utils.c:730
GHashTable * allowed_nodes
Definition: pe_types.h:382
#define RSC_MIGRATED
Definition: crm.h:199
#define pe_flag_startup_probes
Definition: pe_types.h:116