pacemaker  2.1.1-52dc28db4
Scalable High-Availability cluster resource manager
pcmk_sched_native.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2021 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <stdbool.h>
13 
14 #include <crm/pengine/rules.h>
15 #include <crm/msg_xml.h>
17 #include <pacemaker-internal.h>
18 #include <crm/services.h>
19 
20 // The controller removes the resource from the CIB, making this redundant
21 // #define DELETE_THEN_REFRESH 1
22 
23 #define INFINITY_HACK (INFINITY * -100)
24 
25 #define VARIANT_NATIVE 1
26 #include <lib/pengine/variant.h>
27 
28 extern bool pcmk__is_daemon;
29 
30 static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
31  pe_working_set_t *data_set);
32 static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
33  xmlNode *operation, pe_working_set_t *data_set);
34 static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
35  pe_working_set_t *data_set);
36 static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
37  xmlNode *operation, pe_working_set_t *data_set);
38 
39 void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
40 gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
41 gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
42 gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
43 gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
44 gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional,
45  pe_working_set_t * data_set);
46 gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
47 gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
48 
49 /* This array says what the *next* role should be when transitioning from one
50  * role to another. For example going from Stopped to Promoted, the next role is
51  * RSC_ROLE_UNPROMOTED, because the resource must be started before being promoted.
52  * The current state then becomes Started, which is fed into this array again,
53  * giving a next role of RSC_ROLE_PROMOTED.
54  */
55 static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
56 /* Current state Next state*/
57 /* Unknown Stopped Started Unpromoted Promoted */
63 };
64 
65 typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next,
66  gboolean optional,
67  pe_working_set_t *data_set);
68 
69 // This array picks the function needed to transition from one role to another
70 static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
71 /* Current state Next state */
72 /* Unknown Stopped Started Unpromoted Promoted */
73 /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
74 /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
75 /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
76 /* Unpromoted */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
77 /* Promoted */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
78 };
79 
80 #define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear) do { \
81  flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
82  "Node weight", (nw_rsc)->id, (flags), \
83  (flags_to_clear), #flags_to_clear); \
84  } while (0)
85 
86 static gboolean
87 native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set)
88 {
89  GList *nodes = NULL;
90  pe_node_t *chosen = NULL;
91  pe_node_t *best = NULL;
92  int multiple = 1;
93  int length = 0;
94  gboolean result = FALSE;
95 
96  process_utilization(rsc, &prefer, data_set);
97 
98  if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
99  return rsc->allocated_to ? TRUE : FALSE;
100  }
101 
102  // Sort allowed nodes by weight
103  if (rsc->allowed_nodes) {
104  length = g_hash_table_size(rsc->allowed_nodes);
105  }
106  if (length > 0) {
107  nodes = g_hash_table_get_values(rsc->allowed_nodes);
108  nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
109 
110  // First node in sorted list has the best score
111  best = g_list_nth_data(nodes, 0);
112  }
113 
114  if (prefer && nodes) {
115  chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
116 
117  if (chosen == NULL) {
118  pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
119  prefer->details->uname, rsc->id);
120 
121  /* Favor the preferred node as long as its weight is at least as good as
122  * the best allowed node's.
123  *
124  * An alternative would be to favor the preferred node even if the best
125  * node is better, when the best node's weight is less than INFINITY.
126  */
127  } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
128  pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
129  chosen->details->uname, rsc->id);
130  chosen = NULL;
131 
132  } else if (!can_run_resources(chosen)) {
133  pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
134  chosen->details->uname, rsc->id);
135  chosen = NULL;
136 
137  } else {
138  pe_rsc_trace(rsc,
139  "Chose preferred node %s for %s (ignoring %d candidates)",
140  chosen->details->uname, rsc->id, length);
141  }
142  }
143 
144  if ((chosen == NULL) && nodes) {
145  /* Either there is no preferred node, or the preferred node is not
146  * available, but there are other nodes allowed to run the resource.
147  */
148 
149  chosen = best;
150  pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
151  chosen ? chosen->details->uname : "<none>", rsc->id, length);
152 
153  if (!pe_rsc_is_unique_clone(rsc->parent)
154  && chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
155  /* If the resource is already running on a node, prefer that node if
156  * it is just as good as the chosen node.
157  *
158  * We don't do this for unique clone instances, because
159  * distribute_children() has already assigned instances to their
160  * running nodes when appropriate, and if we get here, we don't want
161  * remaining unallocated instances to prefer a node that's already
162  * running another instance.
163  */
164  pe_node_t *running = pe__current_node(rsc);
165 
166  if (running && (can_run_resources(running) == FALSE)) {
167  pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
168  rsc->id, running->details->uname);
169  } else if (running) {
170  for (GList *iter = nodes->next; iter; iter = iter->next) {
171  pe_node_t *tmp = (pe_node_t *) iter->data;
172 
173  if (tmp->weight != chosen->weight) {
174  // The nodes are sorted by weight, so no more are equal
175  break;
176  }
177  if (tmp->details == running->details) {
178  // Scores are equal, so prefer the current node
179  chosen = tmp;
180  }
181  multiple++;
182  }
183  }
184  }
185  }
186 
187  if (multiple > 1) {
188  static char score[33];
189  int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
190 
191  score2char_stack(chosen->weight, score, sizeof(score));
192  do_crm_log(log_level,
193  "Chose node %s for %s from %d nodes with score %s",
194  chosen->details->uname, rsc->id, multiple, score);
195  }
196 
197  result = native_assign_node(rsc, chosen, FALSE);
198  g_list_free(nodes);
199  return result;
200 }
201 
210 static int
211 best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
212  const char *value)
213 {
214  GHashTableIter iter;
215  pe_node_t *node = NULL;
216  int best_score = -INFINITY;
217  const char *best_node = NULL;
218 
219  // Find best allowed node with matching attribute
220  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
221  while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
222 
223  if ((node->weight > best_score) && can_run_resources(node)
224  && pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) {
225 
226  best_score = node->weight;
227  best_node = node->details->uname;
228  }
229  }
230 
231  if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) {
232  if (best_node == NULL) {
233  crm_info("No allowed node for %s matches node attribute %s=%s",
234  rsc->id, attr, value);
235  } else {
236  crm_info("Allowed node %s for %s had best score (%d) "
237  "of those matching node attribute %s=%s",
238  best_node, rsc->id, best_score, attr, value);
239  }
240  }
241  return best_score;
242 }
243 
258 static void
259 add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
260  const char *attr, float factor,
261  bool only_positive)
262 {
263  GHashTableIter iter;
264  pe_node_t *node = NULL;
265 
266  if (attr == NULL) {
267  attr = CRM_ATTR_UNAME;
268  }
269 
270  // Iterate through each node
271  g_hash_table_iter_init(&iter, nodes);
272  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
273  float weight_f = 0;
274  int weight = 0;
275  int score = 0;
276  int new_score = 0;
277 
278  score = best_node_score_matching_attr(rsc, attr,
279  pe_node_attribute_raw(node, attr));
280 
281  if ((factor < 0) && (score < 0)) {
282  /* Negative preference for a node with a negative score
283  * should not become a positive preference.
284  *
285  * @TODO Consider filtering only if weight is -INFINITY
286  */
287  crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
288  node->details->uname, node->weight, factor, score);
289  continue;
290  }
291 
292  if (node->weight == INFINITY_HACK) {
293  crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
294  node->details->uname, node->weight, factor, score);
295  continue;
296  }
297 
298  weight_f = factor * score;
299 
300  // Round the number; see http://c-faq.com/fp/round.html
301  weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
302 
303  /* Small factors can obliterate the small scores that are often actually
304  * used in configurations. If the score and factor are nonzero, ensure
305  * that the result is nonzero as well.
306  */
307  if ((weight == 0) && (score != 0)) {
308  if (factor > 0.0) {
309  weight = 1;
310  } else if (factor < 0.0) {
311  weight = -1;
312  }
313  }
314 
315  new_score = pe__add_scores(weight, node->weight);
316 
317  if (only_positive && (new_score < 0) && (node->weight > 0)) {
318  crm_trace("%s: Filtering %d + %f * %d = %d "
319  "(negative disallowed, marking node unusable)",
320  node->details->uname, node->weight, factor, score,
321  new_score);
322  node->weight = INFINITY_HACK;
323  continue;
324  }
325 
326  if (only_positive && (new_score < 0) && (node->weight == 0)) {
327  crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
328  node->details->uname, node->weight, factor, score,
329  new_score);
330  continue;
331  }
332 
333  crm_trace("%s: %d + %f * %d = %d", node->details->uname,
334  node->weight, factor, score, new_score);
335  node->weight = new_score;
336  }
337 }
338 
339 static inline bool
340 is_nonempty_group(pe_resource_t *rsc)
341 {
342  return rsc && (rsc->variant == pe_group) && (rsc->children != NULL);
343 }
344 
360 GHashTable *
362  GHashTable *nodes, const char *attr, float factor,
363  uint32_t flags)
364 {
365  GHashTable *work = NULL;
366 
367  // Avoid infinite recursion
368  if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
369  pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
370  return nodes;
371  }
373 
375  if (is_nonempty_group(rsc)) {
376  GList *last = g_list_last(rsc->children);
377  pe_resource_t *last_rsc = last->data;
378 
379  pe_rsc_trace(rsc, "%s: Merging scores from group %s "
380  "using last member %s (at %.6f)",
381  rhs, rsc->id, last_rsc->id, factor);
382  work = pcmk__native_merge_weights(last_rsc, rhs, NULL, attr, factor,
383  flags);
384  } else {
386  }
388 
389  } else if (is_nonempty_group(rsc)) {
390  /* The first member of the group will recursively incorporate any
391  * constraints involving other members (including the group internal
392  * colocation).
393  *
394  * @TODO The indirect colocations from the dependent group's other
395  * members will be incorporated at full strength rather than by
396  * factor, so the group's combined stickiness will be treated as
397  * (factor + (#members - 1)) * stickiness. It is questionable what
398  * the right approach should be.
399  */
400  pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s "
401  "(at %.6f)", rhs, rsc->id, factor);
402  work = pcmk__copy_node_table(nodes);
403  work = pcmk__native_merge_weights(rsc->children->data, rhs, work, attr,
404  factor, flags);
405 
406  } else {
407  pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
408  rhs, rsc->id, factor);
409  work = pcmk__copy_node_table(nodes);
410  add_node_scores_matching_attr(work, rsc, attr, factor,
412  }
413 
414  if (can_run_any(work)) {
415  GList *gIter = NULL;
416  int multiplier = (factor < 0)? -1 : 1;
417 
419  gIter = rsc->rsc_cons;
420  pe_rsc_trace(rsc,
421  "Checking additional %d optional '%s with' constraints",
422  g_list_length(gIter), rsc->id);
423 
424  } else if (is_nonempty_group(rsc)) {
425  pe_resource_t *last_rsc = g_list_last(rsc->children)->data;
426 
427  gIter = last_rsc->rsc_cons_lhs;
428  pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' "
429  "constraints using last member %s",
430  g_list_length(gIter), rsc->id, last_rsc->id);
431 
432  } else {
433  gIter = rsc->rsc_cons_lhs;
434  pe_rsc_trace(rsc,
435  "Checking additional %d optional 'with %s' constraints",
436  g_list_length(gIter), rsc->id);
437  }
438 
439  for (; gIter != NULL; gIter = gIter->next) {
440  pe_resource_t *other = NULL;
441  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
442 
444  other = constraint->rsc_rh;
445  } else if (!pcmk__colocation_has_influence(constraint, NULL)) {
446  continue;
447  } else {
448  other = constraint->rsc_lh;
449  }
450 
451  pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
452  constraint->id, constraint->rsc_lh->id,
453  constraint->rsc_rh->id);
454  work = pcmk__native_merge_weights(other, rhs, work,
455  constraint->node_attribute,
456  multiplier * constraint->score / (float) INFINITY,
458  pe__show_node_weights(true, NULL, rhs, work, rsc->cluster);
459  }
460 
461  } else if (pcmk_is_set(flags, pe_weights_rollback)) {
462  pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
463  rhs, rsc->id);
464  g_hash_table_destroy(work);
466  return nodes;
467  }
468 
469 
471  pe_node_t *node = NULL;
472  GHashTableIter iter;
473 
474  g_hash_table_iter_init(&iter, work);
475  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
476  if (node->weight == INFINITY_HACK) {
477  node->weight = 1;
478  }
479  }
480  }
481 
482  if (nodes) {
483  g_hash_table_destroy(nodes);
484  }
485 
487  return work;
488 }
489 
490 static inline bool
491 node_has_been_unfenced(pe_node_t *node)
492 {
493  const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
494 
495  return !pcmk__str_eq(unfenced, "0", pcmk__str_null_matches);
496 }
497 
498 static inline bool
499 is_unfence_device(pe_resource_t *rsc, pe_working_set_t *data_set)
500 {
503 }
504 
505 pe_node_t *
507  pe_working_set_t *data_set)
508 {
509  GList *gIter = NULL;
510 
511  if (rsc->parent && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
512  /* never allocate children on their own */
513  pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
514  rsc->parent->id);
515  rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
516  }
517 
518  if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
519  return rsc->allocated_to;
520  }
521 
522  if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
523  pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
524  return NULL;
525  }
526 
528  pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes, data_set);
529 
530  for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
531  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
532 
533  GHashTable *archive = NULL;
534  pe_resource_t *rsc_rh = constraint->rsc_rh;
535 
536  if ((constraint->role_lh >= RSC_ROLE_PROMOTED)
537  || (constraint->score < 0 && constraint->score > -INFINITY)) {
538  archive = pcmk__copy_node_table(rsc->allowed_nodes);
539  }
540 
541  pe_rsc_trace(rsc,
542  "%s: Allocating %s first (constraint=%s score=%d role=%s)",
543  rsc->id, rsc_rh->id, constraint->id,
544  constraint->score, role2text(constraint->role_lh));
545  rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
546  rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set);
547  if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
548  pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
549  g_hash_table_destroy(rsc->allowed_nodes);
550  rsc->allowed_nodes = archive;
551  archive = NULL;
552  }
553  if (archive) {
554  g_hash_table_destroy(archive);
555  }
556  }
557 
558  pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes, data_set);
559 
560  for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
561  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
562 
563  if (!pcmk__colocation_has_influence(constraint, NULL)) {
564  continue;
565  }
566  pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
567  constraint->id, constraint->rsc_lh->id,
568  constraint->rsc_rh->id);
569  rsc->allowed_nodes =
570  constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
571  constraint->node_attribute,
572  (float)constraint->score / INFINITY,
574  }
575 
576  if (rsc->next_role == RSC_ROLE_STOPPED) {
577  pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
578  /* make sure it doesn't come up again */
579  resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
580 
581  } else if(rsc->next_role > rsc->role
582  && !pcmk_is_set(data_set->flags, pe_flag_have_quorum)
583  && data_set->no_quorum_policy == no_quorum_freeze) {
584  crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
585  rsc->id, role2text(rsc->role), role2text(rsc->next_role));
586  pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
587  }
588 
590  rsc, __func__, rsc->allowed_nodes, data_set);
594  }
595 
596  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
597  const char *reason = NULL;
598  pe_node_t *assign_to = NULL;
599 
600  pe__set_next_role(rsc, rsc->role, "unmanaged");
601  assign_to = pe__current_node(rsc);
602  if (assign_to == NULL) {
603  reason = "inactive";
604  } else if (rsc->role == RSC_ROLE_PROMOTED) {
605  reason = "promoted";
606  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
607  reason = "failed";
608  } else {
609  reason = "active";
610  }
611  pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
612  (assign_to? assign_to->details->uname : "no node"), reason);
613  native_assign_node(rsc, assign_to, TRUE);
614 
615  } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
616  pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
617  native_assign_node(rsc, NULL, TRUE);
618 
619  } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
620  && native_choose_node(rsc, prefer, data_set)) {
621  pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
622  rsc->allocated_to->details->uname);
623 
624  } else if (rsc->allocated_to == NULL) {
625  if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
626  pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
627  } else if (rsc->running_on != NULL) {
628  pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
629  }
630 
631  } else {
632  pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
633  rsc->allocated_to->details->uname);
634  }
635 
637 
638  if (rsc->is_remote_node) {
639  pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
640 
641  CRM_ASSERT(remote_node != NULL);
642  if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
643  crm_trace("Setting Pacemaker Remote node %s to ONLINE",
644  remote_node->details->id);
645  remote_node->details->online = TRUE;
646  /* We shouldn't consider an unseen remote-node unclean if we are going
647  * to try and connect to it. Otherwise we get an unnecessary fence */
648  if (remote_node->details->unseen == TRUE) {
649  remote_node->details->unclean = FALSE;
650  }
651 
652  } else {
653  crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
654  remote_node->details->id, role2text(rsc->next_role),
655  (rsc->allocated_to? "" : "un"));
656  remote_node->details->shutdown = TRUE;
657  }
658  }
659 
660  return rsc->allocated_to;
661 }
662 
663 static gboolean
664 is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
665 {
666  gboolean dup = FALSE;
667  const char *id = NULL;
668  const char *value = NULL;
669  xmlNode *operation = NULL;
670  guint interval2_ms = 0;
671 
672  CRM_ASSERT(rsc);
673  for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
674  operation = pcmk__xe_next(operation)) {
675 
676  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
677  value = crm_element_value(operation, "name");
678  if (!pcmk__str_eq(value, name, pcmk__str_casei)) {
679  continue;
680  }
681 
682  value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
683  interval2_ms = crm_parse_interval_spec(value);
684  if (interval_ms != interval2_ms) {
685  continue;
686  }
687 
688  if (id == NULL) {
689  id = ID(operation);
690 
691  } else {
692  pcmk__config_err("Operation %s is duplicate of %s (do not use "
693  "same name and interval combination more "
694  "than once per resource)", ID(operation), id);
695  dup = TRUE;
696  }
697  }
698  }
699 
700  return dup;
701 }
702 
703 static bool
704 op_cannot_recur(const char *name)
705 {
707 }
708 
709 static void
710 RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
711  xmlNode * operation, pe_working_set_t * data_set)
712 {
713  char *key = NULL;
714  const char *name = NULL;
715  const char *role = NULL;
716  const char *interval_spec = NULL;
717  const char *node_uname = node? node->details->uname : "n/a";
718 
719  guint interval_ms = 0;
720  pe_action_t *mon = NULL;
721  gboolean is_optional = TRUE;
722  GList *possible_matches = NULL;
723 
724  CRM_ASSERT(rsc);
725 
726  /* Only process for the operations without role="Stopped" */
727  role = crm_element_value(operation, "role");
728  if (role && text2role(role) == RSC_ROLE_STOPPED) {
729  return;
730  }
731 
732  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
733  interval_ms = crm_parse_interval_spec(interval_spec);
734  if (interval_ms == 0) {
735  return;
736  }
737 
738  name = crm_element_value(operation, "name");
739  if (is_op_dup(rsc, name, interval_ms)) {
740  crm_trace("Not creating duplicate recurring action %s for %dms %s",
741  ID(operation), interval_ms, name);
742  return;
743  }
744 
745  if (op_cannot_recur(name)) {
746  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
747  ID(operation), name);
748  return;
749  }
750 
751  key = pcmk__op_key(rsc->id, name, interval_ms);
752  if (find_rsc_op_entry(rsc, key) == NULL) {
753  crm_trace("Not creating recurring action %s for disabled resource %s",
754  ID(operation), rsc->id);
755  free(key);
756  return;
757  }
758 
759  pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
760  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
761 
762  if (start != NULL) {
763  pe_rsc_trace(rsc, "Marking %s %s due to %s", key,
764  pcmk_is_set(start->flags, pe_action_optional)? "optional" : "mandatory",
765  start->uuid);
766  is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
767  } else {
768  pe_rsc_trace(rsc, "Marking %s optional", key);
769  is_optional = TRUE;
770  }
771 
772  /* start a monitor for an already active resource */
773  possible_matches = find_actions_exact(rsc->actions, key, node);
774  if (possible_matches == NULL) {
775  is_optional = FALSE;
776  pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
777 
778  } else {
779  GList *gIter = NULL;
780 
781  for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
782  pe_action_t *op = (pe_action_t *) gIter->data;
783 
785  is_optional = FALSE;
786  break;
787  }
788  }
789  g_list_free(possible_matches);
790  }
791 
792  if (((rsc->next_role == RSC_ROLE_PROMOTED) && (role == NULL))
793  || (role != NULL && text2role(role) != rsc->next_role)) {
794  int log_level = LOG_TRACE;
795  const char *result = "Ignoring";
796 
797  if (is_optional) {
798  char *after_key = NULL;
799  pe_action_t *cancel_op = NULL;
800 
801  // It's running, so cancel it
802  log_level = LOG_INFO;
803  result = "Cancelling";
804  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
805 
806  switch (rsc->role) {
807  case RSC_ROLE_UNPROMOTED:
808  case RSC_ROLE_STARTED:
809  if (rsc->next_role == RSC_ROLE_PROMOTED) {
810  after_key = promote_key(rsc);
811 
812  } else if (rsc->next_role == RSC_ROLE_STOPPED) {
813  after_key = stop_key(rsc);
814  }
815 
816  break;
817  case RSC_ROLE_PROMOTED:
818  after_key = demote_key(rsc);
819  break;
820  default:
821  break;
822  }
823 
824  if (after_key) {
825  custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL,
826  pe_order_runnable_left, data_set);
827  }
828  }
829 
830  do_crm_log(log_level, "%s action %s (%s vs. %s)",
831  result, key, role ? role : role2text(RSC_ROLE_UNPROMOTED),
832  role2text(rsc->next_role));
833 
834  free(key);
835  return;
836  }
837 
838  mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
839  key = mon->uuid;
840  if (is_optional) {
841  pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
842  }
843 
844  if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
845  pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
846  node_uname, mon->uuid);
848 
849  } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
850  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
851  node_uname, mon->uuid);
853 
854  } else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
855  pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
856  mon->task, interval_ms / 1000, rsc->id, node_uname);
857  }
858 
859  if (rsc->next_role == RSC_ROLE_PROMOTED) {
860  char *running_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
861 
862  add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_promoted);
863  free(running_promoted);
864  }
865 
866  if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
867  custom_action_order(rsc, start_key(rsc), NULL,
868  NULL, strdup(key), mon,
870 
871  custom_action_order(rsc, reload_key(rsc), NULL,
872  NULL, strdup(key), mon,
874 
875  if (rsc->next_role == RSC_ROLE_PROMOTED) {
876  custom_action_order(rsc, promote_key(rsc), NULL,
877  rsc, NULL, mon,
879 
880  } else if (rsc->role == RSC_ROLE_PROMOTED) {
881  custom_action_order(rsc, demote_key(rsc), NULL,
882  rsc, NULL, mon,
884  }
885  }
886 }
887 
888 static void
889 Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
890 {
891  if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
892  (node == NULL || node->details->maintenance == FALSE)) {
893  xmlNode *operation = NULL;
894 
895  for (operation = pcmk__xe_first_child(rsc->ops_xml);
896  operation != NULL;
897  operation = pcmk__xe_next(operation)) {
898 
899  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
900  RecurringOp(rsc, start, node, operation, data_set);
901  }
902  }
903  }
904 }
905 
906 static void
907 RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
908  xmlNode * operation, pe_working_set_t * data_set)
909 {
910  char *key = NULL;
911  const char *name = NULL;
912  const char *role = NULL;
913  const char *interval_spec = NULL;
914  const char *node_uname = node? node->details->uname : "n/a";
915 
916  guint interval_ms = 0;
917  GList *possible_matches = NULL;
918  GList *gIter = NULL;
919 
920  /* Only process for the operations with role="Stopped" */
921  role = crm_element_value(operation, "role");
922  if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
923  return;
924  }
925 
926  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
927  interval_ms = crm_parse_interval_spec(interval_spec);
928  if (interval_ms == 0) {
929  return;
930  }
931 
932  name = crm_element_value(operation, "name");
933  if (is_op_dup(rsc, name, interval_ms)) {
934  crm_trace("Not creating duplicate recurring action %s for %dms %s",
935  ID(operation), interval_ms, name);
936  return;
937  }
938 
939  if (op_cannot_recur(name)) {
940  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
941  ID(operation), name);
942  return;
943  }
944 
945  key = pcmk__op_key(rsc->id, name, interval_ms);
946  if (find_rsc_op_entry(rsc, key) == NULL) {
947  crm_trace("Not creating recurring action %s for disabled resource %s",
948  ID(operation), rsc->id);
949  free(key);
950  return;
951  }
952 
953  // @TODO add support
954  if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
955  crm_notice("Ignoring %s (recurring monitors for Stopped role are "
956  "not supported for anonymous clones)",
957  ID(operation));
958  return;
959  }
960 
961  pe_rsc_trace(rsc,
962  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
963  ID(operation), rsc->id, role2text(rsc->next_role));
964 
965  /* if the monitor exists on the node where the resource will be running, cancel it */
966  if (node != NULL) {
967  possible_matches = find_actions_exact(rsc->actions, key, node);
968  if (possible_matches) {
969  pe_action_t *cancel_op = NULL;
970 
971  g_list_free(possible_matches);
972 
973  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
974 
975  if ((rsc->next_role == RSC_ROLE_STARTED)
976  || (rsc->next_role == RSC_ROLE_UNPROMOTED)) {
977  /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
978  /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
979  custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
980  pe_order_runnable_left, data_set);
981  }
982 
983  pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
984  key, role, role2text(rsc->next_role), node_uname);
985  }
986  }
987 
988  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
989  pe_node_t *stop_node = (pe_node_t *) gIter->data;
990  const char *stop_node_uname = stop_node->details->uname;
991  gboolean is_optional = TRUE;
992  gboolean probe_is_optional = TRUE;
993  gboolean stop_is_optional = TRUE;
994  pe_action_t *stopped_mon = NULL;
995  char *rc_inactive = NULL;
996  GList *probe_complete_ops = NULL;
997  GList *stop_ops = NULL;
998  GList *local_gIter = NULL;
999 
1000  if (node && pcmk__str_eq(stop_node_uname, node_uname, pcmk__str_casei)) {
1001  continue;
1002  }
1003 
1004  pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
1005  ID(operation), rsc->id, crm_str(stop_node_uname));
1006 
1007  /* start a monitor for an already stopped resource */
1008  possible_matches = find_actions_exact(rsc->actions, key, stop_node);
1009  if (possible_matches == NULL) {
1010  pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
1011  crm_str(stop_node_uname));
1012  is_optional = FALSE;
1013  } else {
1014  pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
1015  crm_str(stop_node_uname));
1016  is_optional = TRUE;
1017  g_list_free(possible_matches);
1018  }
1019 
1020  stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
1021 
1022  rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
1023  add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
1024  free(rc_inactive);
1025 
1026  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1027  GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
1028  FALSE);
1029  GList *pIter = NULL;
1030 
1031  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
1032  pe_action_t *probe = (pe_action_t *) pIter->data;
1033 
1034  order_actions(probe, stopped_mon, pe_order_runnable_left);
1035  crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
1036  }
1037 
1038  g_list_free(probes);
1039  }
1040 
1041  if (probe_complete_ops) {
1042  g_list_free(probe_complete_ops);
1043  }
1044 
1045  stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
1046 
1047  for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
1048  pe_action_t *stop = (pe_action_t *) local_gIter->data;
1049 
1050  if (!pcmk_is_set(stop->flags, pe_action_optional)) {
1051  stop_is_optional = FALSE;
1052  }
1053 
1054  if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
1055  crm_debug("%s\t %s (cancelled : stop un-runnable)",
1056  crm_str(stop_node_uname), stopped_mon->uuid);
1058  }
1059 
1060  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1061  custom_action_order(rsc, stop_key(rsc), stop,
1062  NULL, strdup(key), stopped_mon,
1064  }
1065 
1066  }
1067 
1068  if (stop_ops) {
1069  g_list_free(stop_ops);
1070  }
1071 
1072  if (is_optional == FALSE && probe_is_optional && stop_is_optional
1073  && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1074  pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
1075  key, crm_str(stop_node_uname));
1077  }
1078 
1079  if (pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
1080  pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
1081  }
1082 
1083  if (stop_node->details->online == FALSE || stop_node->details->unclean) {
1084  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
1085  crm_str(stop_node_uname), stopped_mon->uuid);
1087  }
1088 
1089  if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
1090  && !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
1091  crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
1092  interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
1093  }
1094  }
1095 
1096  free(key);
1097 }
1098 
1099 static void
1100 Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
1101 {
1102  if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
1103  (node == NULL || node->details->maintenance == FALSE)) {
1104  xmlNode *operation = NULL;
1105 
1106  for (operation = pcmk__xe_first_child(rsc->ops_xml);
1107  operation != NULL;
1108  operation = pcmk__xe_next(operation)) {
1109 
1110  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
1111  RecurringOp_Stopped(rsc, start, node, operation, data_set);
1112  }
1113  }
1114  }
1115 }
1116 
1117 static void
1118 handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
1119 {
1120  pe_action_t *migrate_to = NULL;
1121  pe_action_t *migrate_from = NULL;
1122  pe_action_t *start = NULL;
1123  pe_action_t *stop = NULL;
1124  gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
1125 
1126  pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
1127  rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
1128  start = start_action(rsc, chosen, TRUE);
1129  stop = stop_action(rsc, current, TRUE);
1130 
1131  if (partial == FALSE) {
1132  migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1133  RSC_MIGRATE, current, TRUE, TRUE, data_set);
1134  }
1135 
1136  migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1137  RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
1138 
1139  if ((migrate_to && migrate_from) || (migrate_from && partial)) {
1140 
1143 
1144  // This is easier than trying to delete it from the graph
1146 
1147  /* order probes before migrations */
1148  if (partial) {
1150  migrate_from->needs = start->needs;
1151 
1152  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1153  rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1154  NULL, pe_order_optional, data_set);
1155 
1156  } else {
1159  migrate_to->needs = start->needs;
1160 
1161  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1162  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1163  NULL, pe_order_optional, data_set);
1165  NULL, rsc,
1166  pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1168  data_set);
1169  }
1170 
1171  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1172  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1174  data_set);
1175  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1176  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1178  data_set);
1179 
1180  }
1181 
1182  if (migrate_to) {
1183  add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1185 
1186  /* Pacemaker Remote connections don't require pending to be recorded in
1187  * the CIB. We can reduce CIB writes by not setting PENDING for them.
1188  */
1189  if (rsc->is_remote_node == FALSE) {
1190  /* migrate_to takes place on the source node, but can
1191  * have an effect on the target node depending on how
1192  * the agent is written. Because of this, we have to maintain
1193  * a record that the migrate_to occurred, in case the source node
1194  * loses membership while the migrate_to action is still in-flight.
1195  */
1196  add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
1197  }
1198  }
1199 
1200  if (migrate_from) {
1201  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1202  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
1203  }
1204 }
1205 
1206 void
1208 {
1209  pe_action_t *start = NULL;
1210  pe_node_t *chosen = NULL;
1211  pe_node_t *current = NULL;
1212  gboolean need_stop = FALSE;
1213  bool need_promote = FALSE;
1214  gboolean is_moving = FALSE;
1215  gboolean allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
1216 
1217  GList *gIter = NULL;
1218  unsigned int num_all_active = 0;
1219  unsigned int num_clean_active = 0;
1220  bool multiply_active = FALSE;
1221  enum rsc_role_e role = RSC_ROLE_UNKNOWN;
1222  enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
1223 
1224  CRM_ASSERT(rsc);
1225  chosen = rsc->allocated_to;
1226  next_role = rsc->next_role;
1227  if (next_role == RSC_ROLE_UNKNOWN) {
1228  pe__set_next_role(rsc,
1229  (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED,
1230  "allocation");
1231  }
1232  pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s",
1233  rsc->id, role2text(rsc->role), role2text(rsc->next_role),
1234  ((next_role == RSC_ROLE_UNKNOWN)? "implicit" : "explicit"),
1235  ((chosen == NULL)? "no node" : chosen->details->uname));
1236 
1237  current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
1238 
1239  for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
1240  pe_node_t *dangling_source = (pe_node_t *) gIter->data;
1241 
1242  pe_action_t *stop = NULL;
1243 
1244  pe_rsc_trace(rsc, "Creating stop action %sfor %s on %s due to dangling migration",
1245  pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)? "and cleanup " : "",
1246  rsc->id, dangling_source->details->uname);
1247  stop = stop_action(rsc, dangling_source, FALSE);
1249  if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
1250  DeleteRsc(rsc, dangling_source, FALSE, data_set);
1251  }
1252  }
1253 
1254  if ((num_all_active == 2) && (num_clean_active == 2) && chosen
1256  && (current->details == rsc->partial_migration_source->details)
1257  && (chosen->details == rsc->partial_migration_target->details)) {
1258 
1259  /* The chosen node is still the migration target from a partial
1260  * migration. Attempt to continue the migration instead of recovering
1261  * by stopping the resource everywhere and starting it on a single node.
1262  */
1263  pe_rsc_trace(rsc, "Will attempt to continue with partial migration "
1264  "to target %s from %s",
1267 
1268  } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
1269  /* If a resource has "requires" set to nothing or quorum, don't consider
1270  * it active on unclean nodes (similar to how all resources behave when
1271  * stonith-enabled is false). We can start such resources elsewhere
1272  * before fencing completes, and if we considered the resource active on
1273  * the failed node, we would attempt recovery for being active on
1274  * multiple nodes.
1275  */
1276  multiply_active = (num_clean_active > 1);
1277  } else {
1278  multiply_active = (num_all_active > 1);
1279  }
1280 
1281  if (multiply_active) {
1283  // Migration was in progress, but we've chosen a different target
1284  crm_notice("Resource %s can no longer migrate from %s to %s "
1285  "(will stop on both nodes)",
1288 
1289  } else {
1290  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
1291 
1292  // Resource was incorrectly multiply active
1293  pe_proc_err("%s resource %s is active on %u nodes (%s)",
1294  crm_str(class), rsc->id, num_all_active,
1295  recovery2text(rsc->recovery_type));
1296  crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
1297  }
1298 
1299  if (rsc->recovery_type == recovery_stop_start) {
1300  need_stop = TRUE;
1301  }
1302 
1303  /* If by chance a partial migration is in process, but the migration
1304  * target is not chosen still, clear all partial migration data.
1305  */
1307  allow_migrate = FALSE;
1308  }
1309 
1310  if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
1311  pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start",
1312  rsc->id);
1313  start = start_action(rsc, chosen, TRUE);
1315  }
1316 
1317  if (current && chosen && current->details != chosen->details) {
1318  pe_rsc_trace(rsc, "Moving %s from %s to %s",
1319  rsc->id, crm_str(current->details->uname),
1320  crm_str(chosen->details->uname));
1321  is_moving = TRUE;
1322  need_stop = TRUE;
1323 
1324  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
1325  if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
1326  need_stop = TRUE;
1327  pe_rsc_trace(rsc, "Recovering %s", rsc->id);
1328  } else {
1329  pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
1330  if (rsc->next_role == RSC_ROLE_PROMOTED) {
1331  need_promote = TRUE;
1332  }
1333  }
1334 
1335  } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
1336  pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
1337  need_stop = TRUE;
1338 
1339  } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
1340  pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
1341  rsc->id);
1342  start = start_action(rsc, chosen, TRUE);
1343  if (!pcmk_is_set(start->flags, pe_action_optional)) {
1344  // Recovery of a promoted resource
1345  pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
1346  need_stop = TRUE;
1347  }
1348  }
1349 
1350  /* Create any additional actions required when bringing resource down and
1351  * back up to same level.
1352  */
1353  role = rsc->role;
1354  while (role != RSC_ROLE_STOPPED) {
1355  next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
1356  pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
1357  (need_stop? "required" : "optional"), rsc->id,
1358  role2text(role), role2text(next_role));
1359  if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
1360  break;
1361  }
1362  role = next_role;
1363  }
1364 
1365 
1366  while ((rsc->role <= rsc->next_role) && (role != rsc->role)
1367  && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
1368  bool required = need_stop;
1369 
1370  next_role = rsc_state_matrix[role][rsc->role];
1371  if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
1372  required = true;
1373  }
1374  pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
1375  (required? "required" : "optional"), rsc->id,
1376  role2text(role), role2text(next_role));
1377  if (rsc_action_matrix[role][next_role](rsc, chosen, !required,
1378  data_set) == FALSE) {
1379  break;
1380  }
1381  role = next_role;
1382  }
1383  role = rsc->role;
1384 
1385  /* Required steps from this role to the next */
1386  while (role != rsc->next_role) {
1387  next_role = rsc_state_matrix[role][rsc->next_role];
1388  pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)",
1389  rsc->id, role2text(role), role2text(next_role),
1390  role2text(rsc->next_role));
1391  if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
1392  break;
1393  }
1394  role = next_role;
1395  }
1396 
1397  if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
1398  pe_rsc_trace(rsc, "Not creating recurring monitors for blocked resource %s",
1399  rsc->id);
1400 
1401  } else if ((rsc->next_role != RSC_ROLE_STOPPED)
1402  || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1403  pe_rsc_trace(rsc, "Creating recurring monitors for %s resource %s",
1404  ((rsc->next_role == RSC_ROLE_STOPPED)? "unmanaged" : "active"),
1405  rsc->id);
1406  start = start_action(rsc, chosen, TRUE);
1407  Recurring(rsc, start, chosen, data_set);
1408  Recurring_Stopped(rsc, start, chosen, data_set);
1409 
1410  } else {
1411  pe_rsc_trace(rsc, "Creating recurring monitors for inactive resource %s",
1412  rsc->id);
1413  Recurring_Stopped(rsc, NULL, NULL, data_set);
1414  }
1415 
1416  /* if we are stuck in a partial migration, where the target
1417  * of the partial migration no longer matches the chosen target.
1418  * A full stop/start is required */
1419  if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
1420  pe_rsc_trace(rsc, "Not allowing partial migration of %s to continue",
1421  rsc->id);
1422  allow_migrate = FALSE;
1423 
1424  } else if (!is_moving || !pcmk_is_set(rsc->flags, pe_rsc_managed)
1425  || pcmk_any_flags_set(rsc->flags,
1427  || (current && current->details->unclean)
1428  || rsc->next_role < RSC_ROLE_STARTED) {
1429 
1430  allow_migrate = FALSE;
1431  }
1432 
1433  if (allow_migrate) {
1434  handle_migration_actions(rsc, current, chosen, data_set);
1435  }
1436 }
1437 
1438 static void
1439 rsc_avoids_remote_nodes(pe_resource_t *rsc)
1440 {
1441  GHashTableIter iter;
1442  pe_node_t *node = NULL;
1443  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
1444  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1445  if (node->details->remote_rsc) {
1446  node->weight = -INFINITY;
1447  }
1448  }
1449 }
1450 
1465 static GList *
1466 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
1467 {
1468  GList *allowed_nodes = NULL;
1469 
1470  if (rsc->allowed_nodes) {
1471  allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
1472  }
1473 
1474  if (!pcmk__is_daemon) {
1475  allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
1476  }
1477 
1478  return allowed_nodes;
1479 }
1480 
1481 void
1483 {
1484  /* This function is on the critical path and worth optimizing as much as possible */
1485 
1486  pe_resource_t *top = NULL;
1487  GList *allowed_nodes = NULL;
1488  bool check_unfencing = FALSE;
1489  bool check_utilization = FALSE;
1490 
1491  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1492  pe_rsc_trace(rsc,
1493  "Skipping native constraints for unmanaged resource: %s",
1494  rsc->id);
1495  return;
1496  }
1497 
1498  top = uber_parent(rsc);
1499 
1500  // Whether resource requires unfencing
1501  check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
1504 
1505  // Whether a non-default placement strategy is used
1506  check_utilization = (g_hash_table_size(rsc->utilization) > 0)
1507  && !pcmk__str_eq(data_set->placement_strategy,
1508  "default", pcmk__str_casei);
1509 
1510  // Order stops before starts (i.e. restart)
1511  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1512  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1514  data_set);
1515 
1516  // Promotable ordering: demote before stop, start before promote
1518  || (rsc->role > RSC_ROLE_UNPROMOTED)) {
1519 
1520  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
1521  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1523 
1524  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1525  rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
1526  pe_order_runnable_left, data_set);
1527  }
1528 
1529  // Don't clear resource history if probing on same node
1531  NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
1533  data_set);
1534 
1535  // Certain checks need allowed nodes
1536  if (check_unfencing || check_utilization || rsc->container) {
1537  allowed_nodes = allowed_nodes_as_list(rsc, data_set);
1538  }
1539 
1540  if (check_unfencing) {
1541  /* Check if the node needs to be unfenced first */
1542 
1543  for (GList *item = allowed_nodes; item; item = item->next) {
1544  pe_node_t *node = item->data;
1545  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
1546 
1547  crm_debug("Ordering any stops of %s before %s, and any starts after",
1548  rsc->id, unfence->uuid);
1549 
1550  /*
1551  * It would be more efficient to order clone resources once,
1552  * rather than order each instance, but ordering the instance
1553  * allows us to avoid unnecessary dependencies that might conflict
1554  * with user constraints.
1555  *
1556  * @TODO: This constraint can still produce a transition loop if the
1557  * resource has a stop scheduled on the node being unfenced, and
1558  * there is a user ordering constraint to start some other resource
1559  * (which will be ordered after the unfence) before stopping this
1560  * resource. An example is "start some slow-starting cloned service
1561  * before stopping an associated virtual IP that may be moving to
1562  * it":
1563  * stop this -> unfencing -> start that -> stop this
1564  */
1565  custom_action_order(rsc, stop_key(rsc), NULL,
1566  NULL, strdup(unfence->uuid), unfence,
1568 
1569  custom_action_order(NULL, strdup(unfence->uuid), unfence,
1570  rsc, start_key(rsc), NULL,
1572  data_set);
1573  }
1574  }
1575 
1576  if (check_utilization) {
1577  GList *gIter = NULL;
1578 
1579  pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
1580  rsc->id, data_set->placement_strategy);
1581 
1582  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
1583  pe_node_t *current = (pe_node_t *) gIter->data;
1584 
1585  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1586  current->details->uname);
1587  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1588 
1589  if (load_stopped->node == NULL) {
1590  load_stopped->node = pe__copy_node(current);
1592  }
1593 
1594  custom_action_order(rsc, stop_key(rsc), NULL,
1595  NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
1596  }
1597 
1598  for (GList *item = allowed_nodes; item; item = item->next) {
1599  pe_node_t *next = item->data;
1600  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1601  next->details->uname);
1602  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1603 
1604  if (load_stopped->node == NULL) {
1605  load_stopped->node = pe__copy_node(next);
1607  }
1608 
1609  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1610  rsc, start_key(rsc), NULL, pe_order_load, data_set);
1611 
1612  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1613  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1614  NULL, pe_order_load, data_set);
1615 
1616  free(load_stopped_task);
1617  }
1618  }
1619 
1620  if (rsc->container) {
1621  pe_resource_t *remote_rsc = NULL;
1622 
1623  if (rsc->is_remote_node) {
1624  // rsc is the implicit remote connection for a guest or bundle node
1625 
1626  /* Do not allow a guest resource to live on a Pacemaker Remote node,
1627  * to avoid nesting remotes. However, allow bundles to run on remote
1628  * nodes.
1629  */
1631  rsc_avoids_remote_nodes(rsc->container);
1632  }
1633 
1634  /* If someone cleans up a guest or bundle node's container, we will
1635  * likely schedule a (re-)probe of the container and recovery of the
1636  * connection. Order the connection stop after the container probe,
1637  * so that if we detect the container running, we will trigger a new
1638  * transition and avoid the unnecessary recovery.
1639  */
1641  pe_order_optional, data_set);
1642 
1643  /* A user can specify that a resource must start on a Pacemaker Remote
1644  * node by explicitly configuring it with the container=NODENAME
1645  * meta-attribute. This is of questionable merit, since location
1646  * constraints can accomplish the same thing. But we support it, so here
1647  * we check whether a resource (that is not itself a remote connection)
1648  * has container set to a remote node or guest node resource.
1649  */
1650  } else if (rsc->container->is_remote_node) {
1651  remote_rsc = rsc->container;
1652  } else {
1653  remote_rsc = pe__resource_contains_guest_node(data_set,
1654  rsc->container);
1655  }
1656 
1657  if (remote_rsc) {
1658  /* Force the resource on the Pacemaker Remote node instead of
1659  * colocating the resource with the container resource.
1660  */
1661  for (GList *item = allowed_nodes; item; item = item->next) {
1662  pe_node_t *node = item->data;
1663 
1664  if (node->details->remote_rsc != remote_rsc) {
1665  node->weight = -INFINITY;
1666  }
1667  }
1668 
1669  } else {
1670  /* This resource is either a filler for a container that does NOT
1671  * represent a Pacemaker Remote node, or a Pacemaker Remote
1672  * connection resource for a guest node or bundle.
1673  */
1674  int score;
1675 
1676  crm_trace("Order and colocate %s relative to its container %s",
1677  rsc->id, rsc->container->id);
1678 
1680  pcmk__op_key(rsc->container->id, RSC_START, 0),
1681  NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
1682  NULL,
1684  data_set);
1685 
1686  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1687  rsc->container,
1688  pcmk__op_key(rsc->container->id, RSC_STOP, 0),
1689  NULL, pe_order_implies_first, data_set);
1690 
1692  score = 10000; /* Highly preferred but not essential */
1693  } else {
1694  score = INFINITY; /* Force them to run on the same host */
1695  }
1696  pcmk__new_colocation("resource-with-container", NULL, score, rsc,
1697  rsc->container, NULL, NULL, true, data_set);
1698  }
1699  }
1700 
1701  if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
1702  /* don't allow remote nodes to run stonith devices
1703  * or remote connection resources.*/
1704  rsc_avoids_remote_nodes(rsc);
1705  }
1706  g_list_free(allowed_nodes);
1707 }
1708 
1709 void
1711  pcmk__colocation_t *constraint,
1712  pe_working_set_t *data_set)
1713 {
1714  if (rsc_lh == NULL) {
1715  pe_err("rsc_lh was NULL for %s", constraint->id);
1716  return;
1717 
1718  } else if (constraint->rsc_rh == NULL) {
1719  pe_err("rsc_rh was NULL for %s", constraint->id);
1720  return;
1721  }
1722 
1723  pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
1724  rsc_rh->id);
1725 
1726  rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set);
1727 }
1728 
1731  pcmk__colocation_t *constraint, gboolean preview)
1732 {
1733  /* rh side must be allocated before we can process constraint */
1734  if (!preview && pcmk_is_set(rsc_rh->flags, pe_rsc_provisional)) {
1735  return influence_nothing;
1736  }
1737 
1738  if ((constraint->role_lh >= RSC_ROLE_UNPROMOTED) &&
1739  rsc_lh->parent && pcmk_is_set(rsc_lh->parent->flags, pe_rsc_promotable)
1740  && !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) {
1741 
1742  /* LH and RH resources have already been allocated, place the correct
1743  * priority on LH rsc for the given promotable clone resource role */
1744  return influence_rsc_priority;
1745  }
1746 
1747  if (!preview && !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) {
1748  // Log an error if we violated a mandatory colocation constraint
1749  const pe_node_t *rh_node = rsc_rh->allocated_to;
1750 
1751  if (rsc_lh->allocated_to == NULL) {
1752  // Dependent resource isn't allocated, so constraint doesn't matter
1753  return influence_nothing;
1754  }
1755 
1756  if (constraint->score >= INFINITY) {
1757  // Dependent resource must colocate with rh_node
1758 
1759  if ((rh_node == NULL)
1760  || (rh_node->details != rsc_lh->allocated_to->details)) {
1761  crm_err("%s must be colocated with %s but is not (%s vs. %s)",
1762  rsc_lh->id, rsc_rh->id,
1763  rsc_lh->allocated_to->details->uname,
1764  (rh_node? rh_node->details->uname : "unallocated"));
1765  }
1766 
1767  } else if (constraint->score <= -INFINITY) {
1768  // Dependent resource must anti-colocate with rh_node
1769 
1770  if ((rh_node != NULL)
1771  && (rsc_lh->allocated_to->details == rh_node->details)) {
1772  crm_err("%s and %s must be anti-colocated but are allocated "
1773  "to the same node (%s)",
1774  rsc_lh->id, rsc_rh->id, rh_node->details->uname);
1775  }
1776  }
1777  return influence_nothing;
1778  }
1779 
1780  if (constraint->score > 0
1781  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
1782  crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
1783  role2text(constraint->role_lh), role2text(rsc_lh->next_role));
1784  return influence_nothing;
1785  }
1786 
1787  if (constraint->score > 0
1788  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
1789  crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
1790  return influence_nothing;
1791  }
1792 
1793  if (constraint->score < 0
1794  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
1795  crm_trace("LH: Skipping negative constraint: \"%s\" state filter",
1796  role2text(constraint->role_lh));
1797  return influence_nothing;
1798  }
1799 
1800  if (constraint->score < 0
1801  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
1802  crm_trace("RH: Skipping negative constraint: \"%s\" state filter",
1803  role2text(constraint->role_rh));
1804  return influence_nothing;
1805  }
1806 
1807  return influence_rsc_location;
1808 }
1809 
1810 static void
1811 influence_priority(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
1812  pcmk__colocation_t *constraint)
1813 {
1814  const char *rh_value = NULL;
1815  const char *lh_value = NULL;
1816  const char *attribute = CRM_ATTR_ID;
1817  int score_multiplier = 1;
1818 
1819  if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
1820  return;
1821  }
1822 
1823  if (constraint->node_attribute != NULL) {
1824  attribute = constraint->node_attribute;
1825  }
1826 
1827  lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute);
1828  rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1829 
1830  if (!pcmk__str_eq(lh_value, rh_value, pcmk__str_casei)) {
1831  if ((constraint->score == INFINITY)
1832  && (constraint->role_lh == RSC_ROLE_PROMOTED)) {
1833  rsc_lh->priority = -INFINITY;
1834  }
1835  return;
1836  }
1837 
1838  if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
1839  return;
1840  }
1841 
1842  if (constraint->role_lh == RSC_ROLE_UNPROMOTED) {
1843  score_multiplier = -1;
1844  }
1845 
1846  rsc_lh->priority = pe__add_scores(score_multiplier * constraint->score,
1847  rsc_lh->priority);
1848 }
1849 
1850 static void
1851 colocation_match(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
1852  pcmk__colocation_t *constraint)
1853 {
1854  const char *attribute = CRM_ATTR_ID;
1855  const char *value = NULL;
1856  GHashTable *work = NULL;
1857  GHashTableIter iter;
1858  pe_node_t *node = NULL;
1859 
1860  if (constraint->node_attribute != NULL) {
1861  attribute = constraint->node_attribute;
1862  }
1863 
1864  if (rsc_rh->allocated_to) {
1865  value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1866 
1867  } else if (constraint->score < 0) {
1868  // Nothing to do (anti-colocation with something that is not running)
1869  return;
1870  }
1871 
1872  work = pcmk__copy_node_table(rsc_lh->allowed_nodes);
1873 
1874  g_hash_table_iter_init(&iter, work);
1875  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1876  if (rsc_rh->allocated_to == NULL) {
1877  pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s inactive)",
1878  constraint->id, rsc_lh->id, node->details->uname,
1879  constraint->score, rsc_rh->id);
1880  node->weight = pe__add_scores(-constraint->score, node->weight);
1881 
1882  } else if (pcmk__str_eq(pe_node_attribute_raw(node, attribute), value, pcmk__str_casei)) {
1883  if (constraint->score < CRM_SCORE_INFINITY) {
1884  pe_rsc_trace(rsc_lh, "%s: %s@%s += %d",
1885  constraint->id, rsc_lh->id,
1886  node->details->uname, constraint->score);
1887  node->weight = pe__add_scores(constraint->score, node->weight);
1888  }
1889 
1890  } else if (constraint->score >= CRM_SCORE_INFINITY) {
1891  pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s mismatch)",
1892  constraint->id, rsc_lh->id, node->details->uname,
1893  constraint->score, attribute);
1894  node->weight = pe__add_scores(-constraint->score, node->weight);
1895  }
1896  }
1897 
1898  if (can_run_any(work)
1899  || constraint->score <= -INFINITY || constraint->score >= INFINITY) {
1900  g_hash_table_destroy(rsc_lh->allowed_nodes);
1901  rsc_lh->allowed_nodes = work;
1902  work = NULL;
1903 
1904  } else {
1905  pe_rsc_info(rsc_lh,
1906  "%s: Rolling back scores from %s (no available nodes)",
1907  rsc_lh->id, rsc_rh->id);
1908  }
1909 
1910  if (work) {
1911  g_hash_table_destroy(work);
1912  }
1913 }
1914 
1915 void
1917  pcmk__colocation_t *constraint,
1918  pe_working_set_t *data_set)
1919 {
1920  enum filter_colocation_res filter_results;
1921 
1922  CRM_ASSERT(rsc_lh);
1923  CRM_ASSERT(rsc_rh);
1924  filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
1925  pe_rsc_trace(rsc_lh, "%s %s with %s (%s, score=%d, filter=%d)",
1926  ((constraint->score > 0)? "Colocating" : "Anti-colocating"),
1927  rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
1928 
1929  switch (filter_results) {
1931  influence_priority(rsc_lh, rsc_rh, constraint);
1932  break;
1934  colocation_match(rsc_lh, rsc_rh, constraint);
1935  break;
1936  case influence_nothing:
1937  default:
1938  return;
1939  }
1940 }
1941 
1942 static gboolean
1943 filter_rsc_ticket(pe_resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
1944 {
1945  if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
1946  pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
1947  role2text(rsc_ticket->role_lh));
1948  return FALSE;
1949  }
1950 
1951  return TRUE;
1952 }
1953 
1954 void
1956 {
1957  if (rsc_ticket == NULL) {
1958  pe_err("rsc_ticket was NULL");
1959  return;
1960  }
1961 
1962  if (rsc_lh == NULL) {
1963  pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
1964  return;
1965  }
1966 
1967  if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
1968  return;
1969  }
1970 
1971  if (rsc_lh->children) {
1972  GList *gIter = rsc_lh->children;
1973 
1974  pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
1975 
1976  for (; gIter != NULL; gIter = gIter->next) {
1977  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
1978 
1979  rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
1980  }
1981  return;
1982  }
1983 
1984  pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
1985  rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
1986  role2text(rsc_ticket->role_lh));
1987 
1988  if ((rsc_ticket->ticket->granted == FALSE)
1989  && (rsc_lh->running_on != NULL)) {
1990 
1991  GList *gIter = NULL;
1992 
1993  switch (rsc_ticket->loss_policy) {
1994  case loss_ticket_stop:
1995  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1996  break;
1997 
1998  case loss_ticket_demote:
1999  // Promotion score will be set to -INFINITY in promotion_order()
2000  if (rsc_ticket->role_lh != RSC_ROLE_PROMOTED) {
2001  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
2002  }
2003  break;
2004 
2005  case loss_ticket_fence:
2006  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
2007  return;
2008  }
2009 
2010  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
2011 
2012  for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
2013  pe_node_t *node = (pe_node_t *) gIter->data;
2014 
2015  pe_fence_node(data_set, node, "deadman ticket was lost", FALSE);
2016  }
2017  break;
2018 
2019  case loss_ticket_freeze:
2020  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
2021  return;
2022  }
2023  if (rsc_lh->running_on != NULL) {
2026  }
2027  break;
2028  }
2029 
2030  } else if (rsc_ticket->ticket->granted == FALSE) {
2031 
2032  if ((rsc_ticket->role_lh != RSC_ROLE_PROMOTED)
2033  || (rsc_ticket->loss_policy == loss_ticket_stop)) {
2034  resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
2035  }
2036 
2037  } else if (rsc_ticket->ticket->standby) {
2038 
2039  if ((rsc_ticket->role_lh != RSC_ROLE_PROMOTED)
2040  || (rsc_ticket->loss_policy == loss_ticket_stop)) {
2041  resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
2042  }
2043  }
2044 }
2045 
2046 enum pe_action_flags
2048 {
2049  return action->flags;
2050 }
2051 
2052 static inline bool
2053 is_primitive_action(pe_action_t *action)
2054 {
2055  return action && action->rsc && (action->rsc->variant == pe_native);
2056 }
2057 
2069 static void
2070 handle_restart_ordering(pe_action_t *first, pe_action_t *then,
2071  enum pe_action_flags filter)
2072 {
2073  const char *reason = NULL;
2074 
2075  CRM_ASSERT(is_primitive_action(first));
2076  CRM_ASSERT(is_primitive_action(then));
2077 
2078  // We need to update the action in two cases:
2079 
2080  // ... if 'then' is required
2081  if (pcmk_is_set(filter, pe_action_optional)
2082  && !pcmk_is_set(then->flags, pe_action_optional)) {
2083  reason = "restart";
2084  }
2085 
2086  /* ... if 'then' is unrunnable start of managed resource (if a resource
2087  * should restart but can't start, we still want to stop)
2088  */
2089  if (pcmk_is_set(filter, pe_action_runnable)
2091  && pcmk_is_set(then->rsc->flags, pe_rsc_managed)
2092  && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) {
2093  reason = "stop";
2094  }
2095 
2096  if (reason == NULL) {
2097  return;
2098  }
2099 
2100  pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
2101  first->uuid, then->uuid, reason);
2102 
2103  // Make 'first' required if it is runnable
2104  if (pcmk_is_set(first->flags, pe_action_runnable)) {
2105  pe_action_implies(first, then, pe_action_optional);
2106  }
2107 
2108  // Make 'first' required if 'then' is required
2109  if (!pcmk_is_set(then->flags, pe_action_optional)) {
2110  pe_action_implies(first, then, pe_action_optional);
2111  }
2112 
2113  // Make 'first' unmigratable if 'then' is unmigratable
2116  }
2117 
2118  // Make 'then' unrunnable if 'first' is required but unrunnable
2119  if (!pcmk_is_set(first->flags, pe_action_optional)
2120  && !pcmk_is_set(first->flags, pe_action_runnable)) {
2121  pe_action_implies(then, first, pe_action_runnable);
2122  }
2123 }
2124 
2125 enum pe_graph_flags
2127  enum pe_action_flags flags, enum pe_action_flags filter,
2128  enum pe_ordering type, pe_working_set_t *data_set)
2129 {
2130  /* flags == get_action_flags(first, then_node) called from update_action() */
2131  enum pe_graph_flags changed = pe_graph_none;
2132  enum pe_action_flags then_flags = then->flags;
2133  enum pe_action_flags first_flags = first->flags;
2134 
2135  crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
2136  first->uuid, first->node ? first->node->details->uname : "[none]",
2137  first->flags, then->uuid, then->flags);
2138 
2139  if (type & pe_order_asymmetrical) {
2140  pe_resource_t *then_rsc = then->rsc;
2141  enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
2142 
2143  if (!then_rsc) {
2144  /* ignore */
2145  } else if ((then_rsc_role == RSC_ROLE_STOPPED) && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_casei)) {
2146  /* ignore... if 'then' is supposed to be stopped after 'first', but
2147  * then is already stopped, there is nothing to be done when non-symmetrical. */
2148  } else if ((then_rsc_role >= RSC_ROLE_STARTED)
2149  && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)
2151  && then->node
2152  && pcmk__list_of_1(then_rsc->running_on)
2153  && then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) {
2154  /* Ignore. If 'then' is supposed to be started after 'first', but
2155  * 'then' is already started, there is nothing to be done when
2156  * asymmetrical -- unless the start is mandatory, which indicates
2157  * the resource is restarting, and the ordering is still needed.
2158  */
2159  } else if (!(first->flags & pe_action_runnable)) {
2160  /* prevent 'then' action from happening if 'first' is not runnable and
2161  * 'then' has not yet occurred. */
2162  pe_action_implies(then, first, pe_action_optional);
2163  pe_action_implies(then, first, pe_action_runnable);
2164 
2165  pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
2166  } else {
2167  /* ignore... then is allowed to start/stop if it wants to. */
2168  }
2169  }
2170 
2172  && !pcmk_is_set(then_flags, pe_action_optional)) {
2173  // Then is required, and implies first should be, too
2174 
2175  if (pcmk_is_set(filter, pe_action_optional)
2177  && pcmk_is_set(first_flags, pe_action_optional)) {
2178  pe_rsc_trace(first->rsc,
2179  "Unset optional on %s because %s implies first",
2180  first->uuid, then->uuid);
2181  pe_action_implies(first, then, pe_action_optional);
2182  }
2183 
2186 
2187  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
2188  first->uuid, then->uuid);
2190  }
2191  }
2192 
2194  if ((filter & pe_action_optional) &&
2195  ((then->flags & pe_action_optional) == FALSE) &&
2196  (then->rsc != NULL) && (then->rsc->role == RSC_ROLE_PROMOTED)) {
2197  pe_action_implies(first, then, pe_action_optional);
2198 
2201 
2202  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
2204  }
2205  pe_rsc_trace(then->rsc,
2206  "Unset optional on %s because %s (promoted) implies first",
2207  first->uuid, then->uuid);
2208  }
2209  }
2210 
2212  && pcmk_is_set(filter, pe_action_optional)) {
2213 
2214  if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
2215  ((then->flags & pe_action_runnable) == FALSE)) {
2216 
2217  pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
2218  pe_action_implies(first, then, pe_action_runnable);
2219  }
2220 
2221  if ((then->flags & pe_action_optional) == 0) {
2222  pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
2223  pe_action_implies(first, then, pe_action_optional);
2224  }
2225  }
2226 
2227  if ((type & pe_order_pseudo_left)
2228  && pcmk_is_set(filter, pe_action_optional)) {
2229 
2230  if ((first->flags & pe_action_runnable) == FALSE) {
2233  pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
2234  }
2235 
2236  }
2237 
2239  && pcmk_is_set(filter, pe_action_runnable)
2242  pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
2243  pe_action_implies(then, first, pe_action_runnable);
2245  }
2246 
2248  && pcmk_is_set(filter, pe_action_optional)
2252 
2253  pe_rsc_trace(then->rsc,
2254  "Unset optional on %s because %s implies 'then'",
2255  then->uuid, first->uuid);
2256  pe_action_implies(then, first, pe_action_optional);
2257  }
2258 
2260  handle_restart_ordering(first, then, filter);
2261  }
2262 
2263  if (then_flags != then->flags) {
2264  pe__set_graph_flags(changed, first, pe_graph_updated_then);
2265  pe_rsc_trace(then->rsc,
2266  "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2267  then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
2268  then_flags, first->uuid, first->flags);
2269 
2270  if(then->rsc && then->rsc->parent) {
2271  /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
2272  update_action(then, data_set);
2273  }
2274  }
2275 
2276  if (first_flags != first->flags) {
2278  pe_rsc_trace(first->rsc,
2279  "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2280  first->uuid, first->node ? first->node->details->uname : "[none]",
2281  first->flags, first_flags, then->uuid, then->flags);
2282  }
2283 
2284  return changed;
2285 }
2286 
2287 void
2289 {
2290  GList *gIter = NULL;
2291  bool need_role = false;
2292 
2293  CRM_CHECK((constraint != NULL) && (rsc != NULL), return);
2294 
2295  // If a role was specified, ensure constraint is applicable
2296  need_role = (constraint->role_filter > RSC_ROLE_UNKNOWN);
2297  if (need_role && (constraint->role_filter != rsc->next_role)) {
2298  pe_rsc_trace(rsc,
2299  "Not applying %s to %s because role will be %s not %s",
2300  constraint->id, rsc->id, role2text(rsc->next_role),
2301  role2text(constraint->role_filter));
2302  return;
2303  }
2304 
2305  if (constraint->node_list_rh == NULL) {
2306  pe_rsc_trace(rsc, "Not applying %s to %s because no nodes match",
2307  constraint->id, rsc->id);
2308  return;
2309  }
2310 
2311  pe_rsc_trace(rsc, "Applying %s%s%s to %s", constraint->id,
2312  (need_role? " for role " : ""),
2313  (need_role? role2text(constraint->role_filter) : ""), rsc->id);
2314 
2315  for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
2316  pe_node_t *node = (pe_node_t *) gIter->data;
2317  pe_node_t *other_node = NULL;
2318 
2319  other_node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2320 
2321  if (other_node != NULL) {
2322  pe_rsc_trace(rsc, "* + %d on %s",
2323  node->weight, node->details->uname);
2324  other_node->weight = pe__add_scores(other_node->weight,
2325  node->weight);
2326 
2327  } else {
2328  pe_rsc_trace(rsc, "* = %d on %s",
2329  node->weight, node->details->uname);
2330  other_node = pe__copy_node(node);
2331  g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
2332  }
2333 
2334  if (other_node->rsc_discover_mode < constraint->discover_mode) {
2335  if (constraint->discover_mode == pe_discover_exclusive) {
2336  rsc->exclusive_discover = TRUE;
2337  }
2338  /* exclusive > never > always... always is default */
2339  other_node->rsc_discover_mode = constraint->discover_mode;
2340  }
2341  }
2342 }
2343 
2344 void
2346 {
2347  GList *gIter = NULL;
2348 
2349  CRM_ASSERT(rsc);
2350  pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
2351 
2352  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2353  pe_action_t *action = (pe_action_t *) gIter->data;
2354 
2355  crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
2356  graph_element_from_action(action, data_set);
2357  }
2358 
2359  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2360  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2361 
2362  child_rsc->cmds->expand(child_rsc, data_set);
2363  }
2364 }
2365 
2366 #define STOP_SANITY_ASSERT(lineno) do { \
2367  if(current && current->details->unclean) { \
2368  /* It will be a pseudo op */ \
2369  } else if(stop == NULL) { \
2370  crm_err("%s:%d: No stop action exists for %s", \
2371  __func__, lineno, rsc->id); \
2372  CRM_ASSERT(stop != NULL); \
2373  } else if (pcmk_is_set(stop->flags, pe_action_optional)) { \
2374  crm_err("%s:%d: Action %s is still optional", \
2375  __func__, lineno, stop->uuid); \
2376  CRM_ASSERT(!pcmk_is_set(stop->flags, pe_action_optional)); \
2377  } \
2378  } while(0)
2379 
2380 void
2382 {
2383  pcmk__output_t *out = data_set->priv;
2384 
2385  pe_node_t *next = NULL;
2386  pe_node_t *current = NULL;
2387 
2388  gboolean moving = FALSE;
2389 
2390  if(rsc->variant == pe_container) {
2391  pcmk__bundle_log_actions(rsc, data_set);
2392  return;
2393  }
2394 
2395  if (rsc->children) {
2396  GList *gIter = NULL;
2397 
2398  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2399  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2400 
2401  LogActions(child_rsc, data_set);
2402  }
2403  return;
2404  }
2405 
2406  next = rsc->allocated_to;
2407  if (rsc->running_on) {
2408  current = pe__current_node(rsc);
2409  if (rsc->role == RSC_ROLE_STOPPED) {
2410  /*
2411  * This can occur when resources are being recovered
2412  * We fiddle with the current role in native_create_actions()
2413  */
2414  rsc->role = RSC_ROLE_STARTED;
2415  }
2416  }
2417 
2418  if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
2419  /* Don't log stopped orphans */
2420  return;
2421  }
2422 
2423  out->message(out, "rsc-action", rsc, current, next, moving);
2424 }
2425 
2426 gboolean
2427 StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2428 {
2429  GList *gIter = NULL;
2430 
2431  CRM_ASSERT(rsc);
2432  pe_rsc_trace(rsc, "%s", rsc->id);
2433 
2434  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2435  pe_node_t *current = (pe_node_t *) gIter->data;
2436  pe_action_t *stop;
2437 
2438  if (rsc->partial_migration_target) {
2439  if (rsc->partial_migration_target->details == current->details) {
2440  pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
2441  next->details->uname, rsc->id);
2442  continue;
2443  } else {
2444  pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
2445  optional = FALSE;
2446  }
2447  }
2448 
2449  pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
2450  stop = stop_action(rsc, current, optional);
2451 
2452  if(rsc->allocated_to == NULL) {
2453  pe_action_set_reason(stop, "node availability", TRUE);
2454  }
2455 
2456  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
2458  }
2459 
2460  if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
2461  DeleteRsc(rsc, current, optional, data_set);
2462  }
2463 
2465  pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set);
2466 
2467  order_actions(stop, unfence, pe_order_implies_first);
2468  if (!node_has_been_unfenced(current)) {
2469  pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
2470  }
2471  }
2472  }
2473 
2474  return TRUE;
2475 }
2476 
2477 static void
2478 order_after_unfencing(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action,
2479  enum pe_ordering order, pe_working_set_t *data_set)
2480 {
2481  /* When unfencing is in use, we order unfence actions before any probe or
2482  * start of resources that require unfencing, and also of fence devices.
2483  *
2484  * This might seem to violate the principle that fence devices require
2485  * only quorum. However, fence agents that unfence often don't have enough
2486  * information to even probe or start unless the node is first unfenced.
2487  */
2488  if (is_unfence_device(rsc, data_set)
2490 
2491  /* Start with an optional ordering. Requiring unfencing would result in
2492  * the node being unfenced, and all its resources being stopped,
2493  * whenever a new resource is added -- which would be highly suboptimal.
2494  */
2495  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
2496 
2497  order_actions(unfence, action, order);
2498 
2499  if (!node_has_been_unfenced(node)) {
2500  // But unfencing is required if it has never been done
2501  char *reason = crm_strdup_printf("required by %s %s",
2502  rsc->id, action->task);
2503 
2504  trigger_unfencing(NULL, node, reason, NULL, data_set);
2505  free(reason);
2506  }
2507  }
2508 }
2509 
2510 gboolean
2511 StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2512 {
2513  pe_action_t *start = NULL;
2514 
2515  CRM_ASSERT(rsc);
2516  pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
2517  start = start_action(rsc, next, TRUE);
2518 
2519  order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set);
2520 
2521  if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
2523  }
2524 
2525 
2526  return TRUE;
2527 }
2528 
2529 gboolean
2530 PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2531 {
2532  GList *gIter = NULL;
2533  gboolean runnable = TRUE;
2534  GList *action_list = NULL;
2535 
2536  CRM_ASSERT(rsc);
2537  CRM_CHECK(next != NULL, return FALSE);
2538  pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
2539 
2540  action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
2541 
2542  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2543  pe_action_t *start = (pe_action_t *) gIter->data;
2544 
2545  if (!pcmk_is_set(start->flags, pe_action_runnable)) {
2546  runnable = FALSE;
2547  }
2548  }
2549  g_list_free(action_list);
2550 
2551  if (runnable) {
2552  promote_action(rsc, next, optional);
2553  return TRUE;
2554  }
2555 
2556  pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
2557 
2558  action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
2559 
2560  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2561  pe_action_t *promote = (pe_action_t *) gIter->data;
2562 
2564  }
2565 
2566  g_list_free(action_list);
2567  return TRUE;
2568 }
2569 
2570 gboolean
2571 DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2572 {
2573  GList *gIter = NULL;
2574 
2575  CRM_ASSERT(rsc);
2576  pe_rsc_trace(rsc, "%s", rsc->id);
2577 
2578  /* CRM_CHECK(rsc->next_role == RSC_ROLE_UNPROMOTED, return FALSE); */
2579  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2580  pe_node_t *current = (pe_node_t *) gIter->data;
2581 
2582  pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
2583  demote_action(rsc, current, optional);
2584  }
2585  return TRUE;
2586 }
2587 
2588 gboolean
2589 RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2590 {
2591  CRM_ASSERT(rsc);
2592  crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
2593  CRM_CHECK(FALSE, return FALSE);
2594  return FALSE;
2595 }
2596 
2597 gboolean
2598 NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2599 {
2600  CRM_ASSERT(rsc);
2601  pe_rsc_trace(rsc, "%s", rsc->id);
2602  return FALSE;
2603 }
2604 
2605 gboolean
2606 DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
2607 {
2608  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
2609  pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
2610  return FALSE;
2611 
2612  } else if (node == NULL) {
2613  pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
2614  return FALSE;
2615 
2616  } else if (node->details->unclean || node->details->online == FALSE) {
2617  pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
2618  node->details->uname);
2619  return FALSE;
2620  }
2621 
2622  crm_notice("Removing %s from %s", rsc->id, node->details->uname);
2623 
2624  delete_action(rsc, node, optional);
2625 
2626  new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
2627  optional ? pe_order_implies_then : pe_order_optional, data_set);
2628 
2629  new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
2630  optional ? pe_order_implies_then : pe_order_optional, data_set);
2631 
2632  return TRUE;
2633 }
2634 
2635 gboolean
2637  gboolean force, pe_working_set_t * data_set)
2638 {
2640  char *key = NULL;
2641  pe_action_t *probe = NULL;
2642  pe_node_t *running = NULL;
2643  pe_node_t *allowed = NULL;
2644  pe_resource_t *top = uber_parent(rsc);
2645 
2646  static const char *rc_promoted = NULL;
2647  static const char *rc_inactive = NULL;
2648 
2649  if (rc_inactive == NULL) {
2650  rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
2651  rc_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
2652  }
2653 
2654  CRM_CHECK(node != NULL, return FALSE);
2655  if (!force && !pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
2656  pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
2657  return FALSE;
2658  }
2659 
2660  if (pe__is_guest_or_remote_node(node)) {
2661  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2662 
2663  if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
2664  pe_rsc_trace(rsc,
2665  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
2666  rsc->id, node->details->id);
2667  return FALSE;
2668  } else if (pe__is_guest_node(node)
2669  && pe__resource_contains_guest_node(data_set, rsc)) {
2670  pe_rsc_trace(rsc,
2671  "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
2672  rsc->id, node->details->id);
2673  return FALSE;
2674  } else if (rsc->is_remote_node) {
2675  pe_rsc_trace(rsc,
2676  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
2677  rsc->id, node->details->id);
2678  return FALSE;
2679  }
2680  }
2681 
2682  if (rsc->children) {
2683  GList *gIter = NULL;
2684  gboolean any_created = FALSE;
2685 
2686  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2687  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2688 
2689  any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
2690  || any_created;
2691  }
2692 
2693  return any_created;
2694 
2695  } else if ((rsc->container) && (!rsc->is_remote_node)) {
2696  pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
2697  return FALSE;
2698  }
2699 
2700  if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
2701  pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
2702  return FALSE;
2703  }
2704 
2705  // Check whether resource is already known on node
2706  if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
2707  pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
2708  return FALSE;
2709  }
2710 
2711  allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2712 
2713  if (rsc->exclusive_discover || top->exclusive_discover) {
2714  if (allowed == NULL) {
2715  /* exclusive discover is enabled and this node is not in the allowed list. */
2716  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
2717  return FALSE;
2718  } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
2719  /* exclusive discover is enabled and this node is not marked
2720  * as a node this resource should be discovered on */
2721  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
2722  return FALSE;
2723  }
2724  }
2725 
2726  if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
2727  /* If this node was allowed to host this resource it would
2728  * have been explicitly added to the 'allowed_nodes' list.
2729  * However it wasn't and the node has discovery disabled, so
2730  * no need to probe for this resource.
2731  */
2732  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
2733  return FALSE;
2734  }
2735 
2736  if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
2737  /* this resource is marked as not needing to be discovered on this node */
2738  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
2739  return FALSE;
2740  }
2741 
2742  if (pe__is_guest_node(node)) {
2743  pe_resource_t *remote = node->details->remote_rsc->container;
2744 
2745  if(remote->role == RSC_ROLE_STOPPED) {
2746  /* If the container is stopped, then we know anything that
2747  * might have been inside it is also stopped and there is
2748  * no need to probe.
2749  *
2750  * If we don't know the container's state on the target
2751  * either:
2752  *
2753  * - the container is running, the transition will abort
2754  * and we'll end up in a different case next time, or
2755  *
2756  * - the container is stopped
2757  *
2758  * Either way there is no need to probe.
2759  *
2760  */
2761  if(remote->allocated_to
2762  && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
2763  /* For safety, we order the 'rsc' start after 'remote'
2764  * has been probed.
2765  *
2766  * Using 'top' helps for groups, but we may need to
2767  * follow the start's ordering chain backwards.
2768  */
2769  custom_action_order(remote,
2770  pcmk__op_key(remote->id, RSC_STATUS, 0),
2771  NULL, top,
2772  pcmk__op_key(top->id, RSC_START, 0), NULL,
2773  pe_order_optional, data_set);
2774  }
2775  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
2776  rsc->id, node->details->id, remote->id);
2777  return FALSE;
2778 
2779  /* Here we really we want to check if remote->stop is required,
2780  * but that information doesn't exist yet
2781  */
2782  } else if(node->details->remote_requires_reset
2783  || node->details->unclean
2784  || pcmk_is_set(remote->flags, pe_rsc_failed)
2785  || remote->next_role == RSC_ROLE_STOPPED
2786  || (remote->allocated_to
2787  && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
2788  ) {
2789  /* The container is stopping or restarting, don't start
2790  * 'rsc' until 'remote' stops as this also implies that
2791  * 'rsc' is stopped - avoiding the need to probe
2792  */
2793  custom_action_order(remote, pcmk__op_key(remote->id, RSC_STOP, 0),
2794  NULL, top, pcmk__op_key(top->id, RSC_START, 0),
2795  NULL, pe_order_optional, data_set);
2796  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
2797  rsc->id, node->details->id, remote->id);
2798  return FALSE;
2799 /* } else {
2800  * The container is running so there is no problem probing it
2801  */
2802  }
2803  }
2804 
2805  key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
2806  probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
2808 
2809  order_after_unfencing(rsc, node, probe, pe_order_optional, data_set);
2810 
2811  /*
2812  * We need to know if it's running_on (not just known_on) this node
2813  * to correctly determine the target rc.
2814  */
2815  running = pe_find_node_id(rsc->running_on, node->details->id);
2816  if (running == NULL) {
2817  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
2818 
2819  } else if (rsc->role == RSC_ROLE_PROMOTED) {
2820  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_promoted);
2821  }
2822 
2823  crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
2825 
2826  if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
2827  top = rsc;
2828  } else {
2829  crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
2830  }
2831 
2832  if (!pcmk_is_set(probe->flags, pe_action_runnable)
2833  && (rsc->running_on == NULL)) {
2834  /* Prevent the start from occurring if rsc isn't active, but
2835  * don't cause it to stop if it was active already
2836  */
2838  }
2839 
2840  custom_action_order(rsc, NULL, probe,
2841  top, pcmk__op_key(top->id, RSC_START, 0), NULL,
2842  flags, data_set);
2843 
2844  // Order the probe before any agent reload
2845  custom_action_order(rsc, NULL, probe,
2846  top, reload_key(rsc), NULL,
2847  pe_order_optional, data_set);
2848 
2849 #if 0
2850  // complete is always null currently
2851  if (!is_unfence_device(rsc, data_set)) {
2852  /* Normally rsc.start depends on probe complete which depends
2853  * on rsc.probe. But this can't be the case for fence devices
2854  * with unfencing, as it would create graph loops.
2855  *
2856  * So instead we explicitly order 'rsc.probe then rsc.start'
2857  */
2858  order_actions(probe, complete, pe_order_implies_then);
2859  }
2860 #endif
2861  return TRUE;
2862 }
2863 
2873 static bool
2874 rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
2875 {
2876  if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
2877  return TRUE;
2878 
2879  } else if ((rsc->variant == pe_native)
2880  && pe_rsc_is_anon_clone(rsc->parent)
2881  && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
2882  /* We check only the parent, not the uber-parent, because we cannot
2883  * assume that the resource is known if it is in an anonymously cloned
2884  * group (which may be only partially known).
2885  */
2886  return TRUE;
2887  }
2888  return FALSE;
2889 }
2890 
2899 static void
2900 native_start_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
2901 {
2902  pe_node_t *target;
2903  GList *gIter = NULL;
2904 
2905  CRM_CHECK(stonith_op && stonith_op->node, return);
2906  target = stonith_op->node;
2907 
2908  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2909  pe_action_t *action = (pe_action_t *) gIter->data;
2910 
2911  switch (action->needs) {
2912  case rsc_req_nothing:
2913  // Anything other than start or promote requires nothing
2914  break;
2915 
2916  case rsc_req_stonith:
2917  order_actions(stonith_op, action, pe_order_optional);
2918  break;
2919 
2920  case rsc_req_quorum:
2921  if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
2922  && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
2923  && !rsc_is_known_on(rsc, target)) {
2924 
2925  /* If we don't know the status of the resource on the node
2926  * we're about to shoot, we have to assume it may be active
2927  * there. Order the resource start after the fencing. This
2928  * is analogous to waiting for all the probes for a resource
2929  * to complete before starting it.
2930  *
2931  * The most likely explanation is that the DC died and took
2932  * its status with it.
2933  */
2934  pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
2935  target->details->uname);
2936  order_actions(stonith_op, action,
2938  }
2939  break;
2940  }
2941  }
2942 }
2943 
2944 static void
2945 native_stop_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
2946 {
2947  GList *gIter = NULL;
2948  GList *action_list = NULL;
2949  bool order_implicit = false;
2950 
2951  pe_resource_t *top = uber_parent(rsc);
2952  pe_action_t *parent_stop = NULL;
2953  pe_node_t *target;
2954 
2955  CRM_CHECK(stonith_op && stonith_op->node, return);
2956  target = stonith_op->node;
2957 
2958  /* Get a list of stop actions potentially implied by the fencing */
2959  action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
2960 
2961  /* If resource requires fencing, implicit actions must occur after fencing.
2962  *
2963  * Implied stops and demotes of resources running on guest nodes are always
2964  * ordered after fencing, even if the resource does not require fencing,
2965  * because guest node "fencing" is actually just a resource stop.
2966  */
2968  || pe__is_guest_node(target)) {
2969 
2970  order_implicit = true;
2971  }
2972 
2973  if (action_list && order_implicit) {
2974  parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
2975  }
2976 
2977  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2978  pe_action_t *action = (pe_action_t *) gIter->data;
2979 
2980  // The stop would never complete, so convert it into a pseudo-action.
2982 
2983  if (order_implicit) {
2985 
2986  /* Order the stonith before the parent stop (if any).
2987  *
2988  * Also order the stonith before the resource stop, unless the
2989  * resource is inside a bundle -- that would cause a graph loop.
2990  * We can rely on the parent stop's ordering instead.
2991  *
2992  * User constraints must not order a resource in a guest node
2993  * relative to the guest node container resource. The
2994  * pe_order_preserve flag marks constraints as generated by the
2995  * cluster and thus immune to that check (and is irrelevant if
2996  * target is not a guest).
2997  */
2998  if (!pe_rsc_is_bundled(rsc)) {
2999  order_actions(stonith_op, action, pe_order_preserve);
3000  }
3001  order_actions(stonith_op, parent_stop, pe_order_preserve);
3002  }
3003 
3004  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3005  crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
3006  rsc->id, (order_implicit? "after" : "because"),
3007  target->details->uname);
3008  } else {
3009  crm_info("%s is implicit %s %s is fenced",
3010  action->uuid, (order_implicit? "after" : "because"),
3011  target->details->uname);
3012  }
3013 
3014  if (pcmk_is_set(rsc->flags, pe_rsc_notify)) {
3015  /* Create a second notification that will be delivered
3016  * immediately after the node is fenced
3017  *
3018  * Basic problem:
3019  * - C is a clone active on the node to be shot and stopping on another
3020  * - R is a resource that depends on C
3021  *
3022  * + C.stop depends on R.stop
3023  * + C.stopped depends on STONITH
3024  * + C.notify depends on C.stopped
3025  * + C.healthy depends on C.notify
3026  * + R.stop depends on C.healthy
3027  *
3028  * The extra notification here changes
3029  * + C.healthy depends on C.notify
3030  * into:
3031  * + C.healthy depends on C.notify'
3032  * + C.notify' depends on STONITH'
3033  * thus breaking the loop
3034  */
3035  create_secondary_notification(action, rsc, stonith_op, data_set);
3036  }
3037 
3038 /* From Bug #1601, successful fencing must be an input to a failed resources stop action.
3039 
3040  However given group(rA, rB) running on nodeX and B.stop has failed,
3041  A := stop healthy resource (rA.stop)
3042  B := stop failed resource (pseudo operation B.stop)
3043  C := stonith nodeX
3044  A requires B, B requires C, C requires A
3045  This loop would prevent the cluster from making progress.
3046 
3047  This block creates the "C requires A" dependency and therefore must (at least
3048  for now) be disabled.
3049 
3050  Instead, run the block above and treat all resources on nodeX as B would be
3051  (marked as a pseudo op depending on the STONITH).
3052 
3053  TODO: Break the "A requires B" dependency in update_action() and re-enable this block
3054 
3055  } else if(is_stonith == FALSE) {
3056  crm_info("Moving healthy resource %s"
3057  " off %s before fencing",
3058  rsc->id, node->details->uname);
3059 
3060  * stop healthy resources before the
3061  * stonith op
3062  *
3063  custom_action_order(
3064  rsc, stop_key(rsc), NULL,
3065  NULL,strdup(CRM_OP_FENCE),stonith_op,
3066  pe_order_optional, data_set);
3067 */
3068  }
3069 
3070  g_list_free(action_list);
3071 
3072  /* Get a list of demote actions potentially implied by the fencing */
3073  action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
3074 
3075  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3076  pe_action_t *action = (pe_action_t *) gIter->data;
3077 
3078  if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
3079  || pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3080 
3081  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3082  pe_rsc_info(rsc,
3083  "Demote of failed resource %s is implicit after %s is fenced",
3084  rsc->id, target->details->uname);
3085  } else {
3086  pe_rsc_info(rsc, "%s is implicit after %s is fenced",
3087  action->uuid, target->details->uname);
3088  }
3089 
3090  /* The demote would never complete and is now implied by the
3091  * fencing, so convert it into a pseudo-action.
3092  */
3094 
3095  if (pe_rsc_is_bundled(rsc)) {
3096  /* Do nothing, let the recovery be ordered after the parent's implied stop */
3097 
3098  } else if (order_implicit) {
3100  }
3101  }
3102  }
3103 
3104  g_list_free(action_list);
3105 }
3106 
3107 void
3109 {
3110  if (rsc->children) {
3111  GList *gIter = NULL;
3112 
3113  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3114  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
3115 
3116  rsc_stonith_ordering(child_rsc, stonith_op, data_set);
3117  }
3118 
3119  } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
3120  pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
3121 
3122  } else {
3123  native_start_constraints(rsc, stonith_op, data_set);
3124  native_stop_constraints(rsc, stonith_op, data_set);
3125  }
3126 }
3127 
3128 void
3130 {
3131  GList *gIter = NULL;
3132  pe_action_t *reload = NULL;
3133 
3134  if (rsc->children) {
3135  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3136  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
3137 
3138  ReloadRsc(child_rsc, node, data_set);
3139  }
3140  return;
3141 
3142  } else if (rsc->variant > pe_native) {
3143  /* Complex resource with no children */
3144  return;
3145 
3146  } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
3147  pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
3148  return;
3149 
3150  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3151  /* We don't need to specify any particular actions here, normal failure
3152  * recovery will apply.
3153  */
3154  pe_rsc_trace(rsc, "%s: preventing agent reload because failed",
3155  rsc->id);
3156  return;
3157 
3158  } else if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
3159  /* If a resource's configuration changed while a start was pending,
3160  * force a full restart.
3161  */
3162  pe_rsc_trace(rsc, "%s: preventing agent reload because start pending",
3163  rsc->id);
3164  stop_action(rsc, node, FALSE);
3165  return;
3166 
3167  } else if (node == NULL) {
3168  pe_rsc_trace(rsc, "%s: not active", rsc->id);
3169  return;
3170  }
3171 
3172  pe_rsc_trace(rsc, "Processing %s", rsc->id);
3174 
3175  reload = custom_action(rsc, reload_key(rsc), CRMD_ACTION_RELOAD_AGENT, node,
3176  FALSE, TRUE, data_set);
3177  pe_action_set_reason(reload, "resource definition change", FALSE);
3178 
3179  custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
3181  data_set);
3182  custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
3184  data_set);
3185 }
3186 
3187 void
3188 native_append_meta(pe_resource_t * rsc, xmlNode * xml)
3189 {
3190  char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
3191  pe_resource_t *parent;
3192 
3193  if (value) {
3194  char *name = NULL;
3195 
3197  crm_xml_add(xml, name, value);
3198  free(name);
3199  }
3200 
3201  value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
3202  if (value) {
3203  char *name = NULL;
3204 
3206  crm_xml_add(xml, name, value);
3207  free(name);
3208  }
3209 
3210  for (parent = rsc; parent != NULL; parent = parent->parent) {
3211  if (parent->container) {
3213  }
3214  }
3215 }
Services API.
pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set)
#define LOG_TRACE
Definition: logging.h:36
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:218
pe_node_t * pe_find_node(GList *node_list, const char *uname)
Definition: status.c:434
enum rsc_role_e role_filter
Definition: internal.h:171
void pcmk__bundle_log_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
enum rsc_start_requirement needs
Definition: pe_types.h:421
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:149
#define RSC_STOP
Definition: crm.h:204
#define crm_notice(fmt, args...)
Definition: logging.h:352
GHashTable * known_on
Definition: pe_types.h:368
xmlNode * ops_xml
Definition: pe_types.h:326
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition: remote.c:41
pe_resource_t * rsc_lh
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:19
gboolean unseen
Definition: pe_types.h:218
#define INFINITY
Definition: crm.h:99
GList * rsc_cons
Definition: pe_types.h:358
#define LOAD_STOPPED
#define pe__set_action_flags(action, flags_to_set)
Definition: internal.h:59
#define pe__show_node_weights(level, rsc, text, nodes, data_set)
Definition: internal.h:353
#define promote_action(rsc, node, optional)
Definition: internal.h:393
GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set)
#define stop_action(rsc, node, optional)
Definition: internal.h:377
pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: complex.c:999
pe_resource_t * container
Definition: pe_types.h:381
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:955
pe_node_t * partial_migration_source
Definition: pe_types.h:366
int(* message)(pcmk__output_t *out, const char *message_id,...)
enum rsc_role_e role
Definition: pe_types.h:371
gboolean PromoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
GList * children
Definition: pe_types.h:378
resource_alloc_functions_t * cmds
Definition: pe_types.h:334
void pcmk__new_colocation(const char *id, const char *node_attr, int score, pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, const char *state_lh, const char *state_rh, bool influence, pe_working_set_t *data_set)
gboolean standby
Definition: pe_types.h:461
#define pe_action_implies(action, reason, flag)
Definition: internal.h:508
#define pe_rsc_stop
Definition: pe_types.h:262
#define delete_action(rsc, node, optional)
Definition: internal.h:367
#define pe_flag_remove_after_stop
Definition: pe_types.h:110
pe_resource_t * rsc
Definition: pe_types.h:411
#define XML_RSC_ATTR_INCARNATION
Definition: msg_xml.h:225
enum rsc_role_e next_role
Definition: pe_types.h:372
#define INFINITY_HACK
gboolean exclusive_discover
Definition: pe_types.h:353
#define reload_key(rsc)
Definition: internal.h:381
#define pcmk__config_err(fmt...)
gboolean StartRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_resource_t * remote_rsc
Definition: pe_types.h:230
pe_resource_t * rsc_rh
GHashTable * meta
Definition: pe_types.h:374
gboolean native_assign_node(pe_resource_t *rsc, pe_node_t *chosen, gboolean force)
#define pe_rsc_unique
Definition: pe_types.h:254
#define pe_rsc_notify
Definition: pe_types.h:253
void LogActions(pe_resource_t *rsc, pe_working_set_t *data_set)
char * score2char_stack(int score, char *buf, size_t len)
Definition: utils.c:101
resource_object_functions_t * fns
Definition: pe_types.h:333
#define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear)
#define XML_LRM_ATTR_INTERVAL
Definition: msg_xml.h:291
#define RSC_DELETE
Definition: crm.h:195
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:324
pe_node_t * pe__copy_node(const pe_node_t *this_node)
Definition: utils.c:142
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
void resource_location(pe_resource_t *rsc, pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1594
GList * rsc_cons_lhs
Definition: pe_types.h:357
pe_ticket_t * ticket
enum crm_ais_msg_types type
Definition: cpg.c:48
#define demote_key(rsc)
Definition: internal.h:402
pe_node_t * partial_migration_target
Definition: pe_types.h:365
#define RSC_START
Definition: crm.h:201
pe_node_t *(* allocate)(pe_resource_t *, pe_node_t *, pe_working_set_t *)
gboolean RoleError(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_node_t * allocated_to
Definition: pe_types.h:364
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:264
gboolean can_run_resources(const pe_node_t *node)
#define pe_flag_have_quorum
Definition: pe_types.h:94
#define CRM_SCORE_INFINITY
Definition: crm.h:85
#define pe_proc_err(fmt...)
Definition: internal.h:32
gboolean remote_requires_reset
Definition: pe_types.h:224
#define RSC_MIGRATE
Definition: crm.h:198
char * crm_meta_name(const char *field)
Definition: utils.c:511
const char * action
Definition: pcmk_fence.c:30
#define pe__set_resource_flags(resource, flags_to_set)
Definition: internal.h:47
void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t *data_set)
Definition: utils.c:2149
GList * nodes
Definition: pe_types.h:157
#define pe_flag_stop_everything
Definition: pe_types.h:105
#define demote_action(rsc, node, optional)
Definition: internal.h:403
#define pe_rsc_provisional
Definition: pe_types.h:258
const char * role2text(enum rsc_role_e role)
Definition: common.c:459
#define CRM_ATTR_UNFENCED
Definition: crm.h:121
int weight
Definition: pe_types.h:241
#define pe_rsc_merging
Definition: pe_types.h:260
GList * dangling_migrations
Definition: pe_types.h:379
enum pe_discover_e discover_mode
Definition: internal.h:172
#define CRMD_ACTION_RELOAD_AGENT
Definition: crm.h:172
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: utils.c:2278
gboolean(* create_probe)(pe_resource_t *, pe_node_t *, pe_action_t *, gboolean, pe_working_set_t *)
gboolean DemoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_action_flags
Definition: pe_types.h:291
#define pe_rsc_allow_migrate
Definition: pe_types.h:273
#define pe_rsc_failed
Definition: pe_types.h:267
pe_action_t * get_pseudo_op(const char *name, pe_working_set_t *data_set)
Definition: utils.c:1854
#define crm_debug(fmt, args...)
Definition: logging.h:355
void native_expand(pe_resource_t *rsc, pe_working_set_t *data_set)
gboolean(* rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:903
pe_resource_t * pe__resource_contains_guest_node(const pe_working_set_t *data_set, const pe_resource_t *rsc)
Definition: remote.c:66
#define XML_RSC_ATTR_CONTAINER
Definition: msg_xml.h:241
void native_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
void native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:530
bool pe__is_guest_node(const pe_node_t *node)
Definition: remote.c:33
void(* rsc_colocation_lh)(pe_resource_t *, pe_resource_t *, pcmk__colocation_t *, pe_working_set_t *)
#define stop_key(rsc)
Definition: internal.h:376
void native_append_meta(pe_resource_t *rsc, xmlNode *xml)
#define pe_rsc_start_pending
Definition: pe_types.h:269
char * task
Definition: pe_types.h:415
gboolean update_action(pe_action_t *action, pe_working_set_t *data_set)
#define pe__clear_action_flags(action, flags_to_clear)
Definition: internal.h:68
#define CRM_ATTR_UNAME
Definition: crm.h:114
int custom_action_order(pe_resource_t *lh_rsc, char *lh_task, pe_action_t *lh_action, pe_resource_t *rh_rsc, char *rh_task, pe_action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
#define crm_trace(fmt, args...)
Definition: logging.h:356
#define do_crm_log(level, fmt, args...)
Log a message.
Definition: logging.h:159
void pe_fence_node(pe_working_set_t *data_set, pe_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition: unpack.c:97
#define promote_key(rsc)
Definition: internal.h:392
char * crm_strdup_printf(char const *format,...) G_GNUC_PRINTF(1
void process_utilization(pe_resource_t *rsc, pe_node_t **prefer, pe_working_set_t *data_set)
GHashTable * meta
Definition: pe_types.h:425
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:114
struct pe_node_shared_s * details
Definition: pe_types.h:244
enum rsc_recovery_type recovery_type
Definition: pe_types.h:336
pe_node_t * node
Definition: pe_types.h:412
filter_colocation_res
enum loss_ticket_policy_e loss_policy
#define pe_rsc_needs_fencing
Definition: pe_types.h:280
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1808
unsigned long long flags
Definition: pe_types.h:349
const char * uname
Definition: pe_types.h:209
#define pe_rsc_promotable
Definition: pe_types.h:256
void(* expand)(pe_resource_t *, pe_working_set_t *)
xmlNode * find_rsc_op_entry(pe_resource_t *rsc, const char *key)
Definition: utils.c:1327
bool pcmk__is_daemon
Definition: logging.c:47
#define pe_flag_stonith_enabled
Definition: pe_types.h:98
void native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
Definition: common.c:635
#define pe__set_graph_flags(graph_flags, gr_action, flags_to_set)
Definition: internal.h:125
GList * actions
Definition: pe_types.h:360
pe_graph_flags
Definition: pe_types.h:283
GHashTable * utilization
Definition: pe_types.h:376
#define XML_RSC_ATTR_TARGET_ROLE
Definition: msg_xml.h:233
#define XML_LRM_ATTR_MIGRATE_TARGET
Definition: msg_xml.h:323
#define XML_RSC_ATTR_REMOTE_NODE
Definition: msg_xml.h:244
char * uuid
Definition: pe_types.h:416
void(* rsc_colocation_rh)(pe_resource_t *, pe_resource_t *, pcmk__colocation_t *, pe_working_set_t *)
enum pe_action_flags(* action_flags)(pe_action_t *, pe_node_t *)
#define pe_rsc_allocating
Definition: pe_types.h:259
enum rsc_role_e text2role(const char *role)
Definition: common.c:488
enum pe_obj_types variant
Definition: pe_types.h:331
gboolean granted
Definition: pe_types.h:459
int new_rsc_order(pe_resource_t *lh_rsc, const char *lh_task, pe_resource_t *rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t *data_set)
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
const char * placement_strategy
Definition: pe_types.h:144
int rsc_discover_mode
Definition: pe_types.h:245
enum filter_colocation_res filter_colocation_constraint(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, pcmk__colocation_t *constraint, gboolean preview)
gboolean can_run_any(GHashTable *nodes)
const char * id
Definition: pe_types.h:208
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:45
char * id
Definition: pe_types.h:458
#define pe_rsc_fence_device
Definition: pe_types.h:255
GHashTable * pcmk__copy_node_table(GHashTable *nodes)
pe_node_t * pe_find_node_id(GList *node_list, const char *id)
Definition: status.c:418
const char * target
Definition: pcmk_fence.c:29
#define PCMK_RESOURCE_CLASS_STONITH
Definition: services.h:49
enum pe_action_flags native_action_flags(pe_action_t *action, pe_node_t *node)
enum rsc_role_e(* state)(const pe_resource_t *, gboolean)
Definition: pe_types.h:53
void ReloadRsc(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
gboolean is_remote_node
Definition: pe_types.h:352
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:579
#define start_action(rsc, node, optional)
Definition: internal.h:383
#define CRM_META
Definition: crm.h:78
int pe__add_scores(int score1, int score2)
Definition: common.c:516
#define crm_err(fmt, args...)
Definition: logging.h:350
#define CRM_ASSERT(expr)
Definition: results.h:42
#define RSC_STATUS
Definition: crm.h:215
char guint crm_parse_interval_spec(const char *input)
Parse milliseconds from a Pacemaker interval specification.
Definition: utils.c:314
#define pe_rsc_reload
Definition: pe_types.h:263
#define RSC_PROMOTE
Definition: crm.h:207
void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
Definition: complex.c:1116
gboolean StopRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
This structure contains everything that makes up a single output formatter.
pe_node_t * pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, pe_working_set_t *data_set)
void rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
#define pe_rsc_needs_unfencing
Definition: pe_types.h:281
void native_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
gboolean shutdown
Definition: pe_types.h:219
#define pe__clear_resource_flags(resource, flags_to_clear)
Definition: internal.h:53
#define crm_str(x)
Definition: logging.h:376
GHashTable * pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, uint32_t flags)
rsc_role_e
Possible roles that a resource can be in.
Definition: common.h:92
GList * running_on
Definition: pe_types.h:367
#define pe_rsc_block
Definition: pe_types.h:250
enum pe_action_flags flags
Definition: pe_types.h:420
gboolean maintenance
Definition: pe_types.h:222
#define pe_rsc_maintenance
Definition: pe_types.h:276
pe_working_set_t * cluster
Definition: pe_types.h:328
const char * node_attribute
#define XML_OP_ATTR_PENDING
Definition: msg_xml.h:258
gboolean DeleteRsc(pe_resource_t *rsc, pe_node_t *node, gboolean optional, pe_working_set_t *data_set)
const char * id
#define pe_flag_have_stonith_resource
Definition: pe_types.h:99
#define RSC_ROLE_MAX
Definition: common.h:108
GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1498
#define pe_flag_enable_unfencing
Definition: pe_types.h:100
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:20
#define pe__set_order_flags(order_flags, flags_to_set)
Definition: internal.h:111
#define start_key(rsc)
Definition: internal.h:382
void rsc_ticket_constraint(pe_resource_t *rsc_lh, rsc_ticket_t *rsc_ticket, pe_working_set_t *data_set)
#define ID(x)
Definition: msg_xml.h:456
unsigned long long flags
Definition: pe_types.h:146
#define pe_err(fmt...)
Definition: internal.h:22
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1546
char * name
Definition: pcmk_fence.c:31
#define XML_LRM_ATTR_MIGRATE_SOURCE
Definition: msg_xml.h:322
gboolean native_create_probe(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set)
#define CRM_OP_LRM_DELETE
Definition: crm.h:151
#define CRM_ATTR_ID
Definition: crm.h:115
gint sort_node_uname(gconstpointer a, gconstpointer b)
Definition: utils.c:218
gboolean unclean
Definition: pe_types.h:217
#define pe_flag_show_scores
Definition: pe_types.h:133
void graph_element_from_action(pe_action_t *action, pe_working_set_t *data_set)
#define crm_info(fmt, args...)
Definition: logging.h:353
#define pe_rsc_managed
Definition: pe_types.h:249
#define pe_rsc_orphan
Definition: pe_types.h:248
pe_action_t * find_first_action(GList *input, const char *uuid, const char *task, pe_node_t *on_node)
Definition: utils.c:1428
pe_ordering
Definition: pe_types.h:484
gboolean online
Definition: pe_types.h:213
uint64_t flags
Definition: remote.c:149
#define XML_ATTR_TE_TARGET_RC
Definition: msg_xml.h:402
pe_resource_t * parent
Definition: pe_types.h:329
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
Definition: utils.c:2041
GHashTable *(* merge_weights)(pe_resource_t *, const char *, GHashTable *, const char *, float, enum pe_weights)
#define RSC_DEMOTE
Definition: crm.h:209
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:18
gboolean NullOp(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
void create_secondary_notification(pe_action_t *action, pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:266
char * id
Definition: pe_types.h:322
pe_action_t * custom_action(pe_resource_t *rsc, char *key, const char *task, pe_node_t *on_node, gboolean optional, gboolean foo, pe_working_set_t *data_set)
Definition: utils.c:415
GHashTable * allowed_nodes
Definition: pe_types.h:369
#define RSC_MIGRATED
Definition: crm.h:199
#define pe_flag_startup_probes
Definition: pe_types.h:115