pacemaker  2.1.0-7c3f660
Scalable High-Availability cluster resource manager
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pcmk_sched_native.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2021 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <stdbool.h>
13 
14 #include <crm/pengine/rules.h>
15 #include <crm/msg_xml.h>
17 #include <pacemaker-internal.h>
18 #include <crm/services.h>
19 
20 // The controller removes the resource from the CIB, making this redundant
21 // #define DELETE_THEN_REFRESH 1
22 
23 #define INFINITY_HACK (INFINITY * -100)
24 
25 #define VARIANT_NATIVE 1
26 #include <lib/pengine/variant.h>
27 
28 extern bool pcmk__is_daemon;
29 
30 static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
31  pe_working_set_t *data_set);
32 static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
33  xmlNode *operation, pe_working_set_t *data_set);
34 static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
35  pe_working_set_t *data_set);
36 static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
37  xmlNode *operation, pe_working_set_t *data_set);
38 
39 void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
40 gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
41 gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
42 gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
43 gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
44 gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional,
45  pe_working_set_t * data_set);
46 gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
47 gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
48 
49 /* This array says what the *next* role should be when transitioning from one
50  * role to another. For example going from Stopped to Promoted, the next role is
51  * RSC_ROLE_UNPROMOTED, because the resource must be started before being promoted.
52  * The current state then becomes Started, which is fed into this array again,
53  * giving a next role of RSC_ROLE_PROMOTED.
54  */
55 static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
56 /* Current state Next state*/
57 /* Unknown Stopped Started Unpromoted Promoted */
58 /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED },
59 /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED },
63 };
64 
65 typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next,
66  gboolean optional,
67  pe_working_set_t *data_set);
68 
69 // This array picks the function needed to transition from one role to another
70 static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
71 /* Current state Next state */
72 /* Unknown Stopped Started Unpromoted Promoted */
73 /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
74 /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
75 /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
76 /* Unpromoted */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
77 /* Promoted */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
78 };
79 
80 #define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear) do { \
81  flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
82  "Node weight", (nw_rsc)->id, (flags), \
83  (flags_to_clear), #flags_to_clear); \
84  } while (0)
85 
86 static gboolean
87 native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set)
88 {
89  GList *nodes = NULL;
90  pe_node_t *chosen = NULL;
91  pe_node_t *best = NULL;
92  int multiple = 1;
93  int length = 0;
94  gboolean result = FALSE;
95 
96  process_utilization(rsc, &prefer, data_set);
97 
98  if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
99  return rsc->allocated_to ? TRUE : FALSE;
100  }
101 
102  // Sort allowed nodes by weight
103  if (rsc->allowed_nodes) {
104  length = g_hash_table_size(rsc->allowed_nodes);
105  }
106  if (length > 0) {
107  nodes = g_hash_table_get_values(rsc->allowed_nodes);
108  nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
109 
110  // First node in sorted list has the best score
111  best = g_list_nth_data(nodes, 0);
112  }
113 
114  if (prefer && nodes) {
115  chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
116 
117  if (chosen == NULL) {
118  pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
119  prefer->details->uname, rsc->id);
120 
121  /* Favor the preferred node as long as its weight is at least as good as
122  * the best allowed node's.
123  *
124  * An alternative would be to favor the preferred node even if the best
125  * node is better, when the best node's weight is less than INFINITY.
126  */
127  } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
128  pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
129  chosen->details->uname, rsc->id);
130  chosen = NULL;
131 
132  } else if (!can_run_resources(chosen)) {
133  pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
134  chosen->details->uname, rsc->id);
135  chosen = NULL;
136 
137  } else {
138  pe_rsc_trace(rsc,
139  "Chose preferred node %s for %s (ignoring %d candidates)",
140  chosen->details->uname, rsc->id, length);
141  }
142  }
143 
144  if ((chosen == NULL) && nodes) {
145  /* Either there is no preferred node, or the preferred node is not
146  * available, but there are other nodes allowed to run the resource.
147  */
148 
149  chosen = best;
150  pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
151  chosen ? chosen->details->uname : "<none>", rsc->id, length);
152 
153  if (!pe_rsc_is_unique_clone(rsc->parent)
154  && chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
155  /* If the resource is already running on a node, prefer that node if
156  * it is just as good as the chosen node.
157  *
158  * We don't do this for unique clone instances, because
159  * distribute_children() has already assigned instances to their
160  * running nodes when appropriate, and if we get here, we don't want
161  * remaining unallocated instances to prefer a node that's already
162  * running another instance.
163  */
164  pe_node_t *running = pe__current_node(rsc);
165 
166  if (running && (can_run_resources(running) == FALSE)) {
167  pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
168  rsc->id, running->details->uname);
169  } else if (running) {
170  for (GList *iter = nodes->next; iter; iter = iter->next) {
171  pe_node_t *tmp = (pe_node_t *) iter->data;
172 
173  if (tmp->weight != chosen->weight) {
174  // The nodes are sorted by weight, so no more are equal
175  break;
176  }
177  if (tmp->details == running->details) {
178  // Scores are equal, so prefer the current node
179  chosen = tmp;
180  }
181  multiple++;
182  }
183  }
184  }
185  }
186 
187  if (multiple > 1) {
188  static char score[33];
189  int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
190 
191  score2char_stack(chosen->weight, score, sizeof(score));
192  do_crm_log(log_level,
193  "Chose node %s for %s from %d nodes with score %s",
194  chosen->details->uname, rsc->id, multiple, score);
195  }
196 
197  result = native_assign_node(rsc, chosen, FALSE);
198  g_list_free(nodes);
199  return result;
200 }
201 
210 static int
211 best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
212  const char *value)
213 {
214  GHashTableIter iter;
215  pe_node_t *node = NULL;
216  int best_score = -INFINITY;
217  const char *best_node = NULL;
218 
219  // Find best allowed node with matching attribute
220  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
221  while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
222 
223  if ((node->weight > best_score) && can_run_resources(node)
224  && pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) {
225 
226  best_score = node->weight;
227  best_node = node->details->uname;
228  }
229  }
230 
231  if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) {
232  if (best_node == NULL) {
233  crm_info("No allowed node for %s matches node attribute %s=%s",
234  rsc->id, attr, value);
235  } else {
236  crm_info("Allowed node %s for %s had best score (%d) "
237  "of those matching node attribute %s=%s",
238  best_node, rsc->id, best_score, attr, value);
239  }
240  }
241  return best_score;
242 }
243 
258 static void
259 add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
260  const char *attr, float factor,
261  bool only_positive)
262 {
263  GHashTableIter iter;
264  pe_node_t *node = NULL;
265 
266  if (attr == NULL) {
267  attr = CRM_ATTR_UNAME;
268  }
269 
270  // Iterate through each node
271  g_hash_table_iter_init(&iter, nodes);
272  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
273  float weight_f = 0;
274  int weight = 0;
275  int score = 0;
276  int new_score = 0;
277 
278  score = best_node_score_matching_attr(rsc, attr,
279  pe_node_attribute_raw(node, attr));
280 
281  if ((factor < 0) && (score < 0)) {
282  /* Negative preference for a node with a negative score
283  * should not become a positive preference.
284  *
285  * @TODO Consider filtering only if weight is -INFINITY
286  */
287  crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
288  node->details->uname, node->weight, factor, score);
289  continue;
290  }
291 
292  if (node->weight == INFINITY_HACK) {
293  crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
294  node->details->uname, node->weight, factor, score);
295  continue;
296  }
297 
298  weight_f = factor * score;
299 
300  // Round the number; see http://c-faq.com/fp/round.html
301  weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
302 
303  /* Small factors can obliterate the small scores that are often actually
304  * used in configurations. If the score and factor are nonzero, ensure
305  * that the result is nonzero as well.
306  */
307  if ((weight == 0) && (score != 0)) {
308  if (factor > 0.0) {
309  weight = 1;
310  } else if (factor < 0.0) {
311  weight = -1;
312  }
313  }
314 
315  new_score = pe__add_scores(weight, node->weight);
316 
317  if (only_positive && (new_score < 0) && (node->weight > 0)) {
318  crm_trace("%s: Filtering %d + %f * %d = %d "
319  "(negative disallowed, marking node unusable)",
320  node->details->uname, node->weight, factor, score,
321  new_score);
322  node->weight = INFINITY_HACK;
323  continue;
324  }
325 
326  if (only_positive && (new_score < 0) && (node->weight == 0)) {
327  crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
328  node->details->uname, node->weight, factor, score,
329  new_score);
330  continue;
331  }
332 
333  crm_trace("%s: %d + %f * %d = %d", node->details->uname,
334  node->weight, factor, score, new_score);
335  node->weight = new_score;
336  }
337 }
338 
339 static inline bool
340 is_nonempty_group(pe_resource_t *rsc)
341 {
342  return rsc && (rsc->variant == pe_group) && (rsc->children != NULL);
343 }
344 
360 GHashTable *
362  GHashTable *nodes, const char *attr, float factor,
363  uint32_t flags)
364 {
365  GHashTable *work = NULL;
366 
367  // Avoid infinite recursion
368  if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
369  pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
370  return nodes;
371  }
373 
374  if (pcmk_is_set(flags, pe_weights_init)) {
375  if (is_nonempty_group(rsc)) {
376  GList *last = g_list_last(rsc->children);
377  pe_resource_t *last_rsc = last->data;
378 
379  pe_rsc_trace(rsc, "%s: Merging scores from group %s "
380  "using last member %s (at %.6f)",
381  rhs, rsc->id, last_rsc->id, factor);
382  work = pcmk__native_merge_weights(last_rsc, rhs, NULL, attr, factor,
383  flags);
384  } else {
386  }
388 
389  } else if (is_nonempty_group(rsc)) {
390  /* The first member of the group will recursively incorporate any
391  * constraints involving other members (including the group internal
392  * colocation).
393  *
394  * @TODO The indirect colocations from the dependent group's other
395  * members will be incorporated at full strength rather than by
396  * factor, so the group's combined stickiness will be treated as
397  * (factor + (#members - 1)) * stickiness. It is questionable what
398  * the right approach should be.
399  */
400  pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s "
401  "(at %.6f)", rhs, rsc->id, factor);
402  work = pcmk__copy_node_table(nodes);
403  work = pcmk__native_merge_weights(rsc->children->data, rhs, work, attr,
404  factor, flags);
405 
406  } else {
407  pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
408  rhs, rsc->id, factor);
409  work = pcmk__copy_node_table(nodes);
410  add_node_scores_matching_attr(work, rsc, attr, factor,
412  }
413 
414  if (can_run_any(work)) {
415  GList *gIter = NULL;
416  int multiplier = (factor < 0)? -1 : 1;
417 
418  if (pcmk_is_set(flags, pe_weights_forward)) {
419  gIter = rsc->rsc_cons;
420  pe_rsc_trace(rsc,
421  "Checking additional %d optional '%s with' constraints",
422  g_list_length(gIter), rsc->id);
423 
424  } else if (is_nonempty_group(rsc)) {
425  pe_resource_t *last_rsc = g_list_last(rsc->children)->data;
426 
427  gIter = last_rsc->rsc_cons_lhs;
428  pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' "
429  "constraints using last member %s",
430  g_list_length(gIter), rsc->id, last_rsc->id);
431 
432  } else {
433  gIter = rsc->rsc_cons_lhs;
434  pe_rsc_trace(rsc,
435  "Checking additional %d optional 'with %s' constraints",
436  g_list_length(gIter), rsc->id);
437  }
438 
439  for (; gIter != NULL; gIter = gIter->next) {
440  pe_resource_t *other = NULL;
441  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
442 
443  if (pcmk_is_set(flags, pe_weights_forward)) {
444  other = constraint->rsc_rh;
445  } else if (!pcmk__colocation_has_influence(constraint, NULL)) {
446  continue;
447  } else {
448  other = constraint->rsc_lh;
449  }
450 
451  pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
452  constraint->id, constraint->rsc_lh->id,
453  constraint->rsc_rh->id);
454  work = pcmk__native_merge_weights(other, rhs, work,
455  constraint->node_attribute,
456  multiplier * constraint->score / (float) INFINITY,
457  flags|pe_weights_rollback);
458  pe__show_node_weights(true, NULL, rhs, work, rsc->cluster);
459  }
460 
461  } else if (pcmk_is_set(flags, pe_weights_rollback)) {
462  pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
463  rhs, rsc->id);
464  g_hash_table_destroy(work);
466  return nodes;
467  }
468 
469 
470  if (pcmk_is_set(flags, pe_weights_positive)) {
471  pe_node_t *node = NULL;
472  GHashTableIter iter;
473 
474  g_hash_table_iter_init(&iter, work);
475  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
476  if (node->weight == INFINITY_HACK) {
477  node->weight = 1;
478  }
479  }
480  }
481 
482  if (nodes) {
483  g_hash_table_destroy(nodes);
484  }
485 
487  return work;
488 }
489 
490 static inline bool
491 node_has_been_unfenced(pe_node_t *node)
492 {
493  const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
494 
495  return !pcmk__str_eq(unfenced, "0", pcmk__str_null_matches);
496 }
497 
498 static inline bool
499 is_unfence_device(pe_resource_t *rsc, pe_working_set_t *data_set)
500 {
503 }
504 
505 pe_node_t *
507  pe_working_set_t *data_set)
508 {
509  GList *gIter = NULL;
510 
511  if (rsc->parent && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
512  /* never allocate children on their own */
513  pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
514  rsc->parent->id);
515  rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
516  }
517 
518  if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
519  return rsc->allocated_to;
520  }
521 
522  if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
523  pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
524  return NULL;
525  }
526 
528  pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes, data_set);
529 
530  for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
531  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
532 
533  GHashTable *archive = NULL;
534  pe_resource_t *rsc_rh = constraint->rsc_rh;
535 
536  if ((constraint->role_lh >= RSC_ROLE_PROMOTED)
537  || (constraint->score < 0 && constraint->score > -INFINITY)) {
538  archive = pcmk__copy_node_table(rsc->allowed_nodes);
539  }
540 
541  pe_rsc_trace(rsc,
542  "%s: Allocating %s first (constraint=%s score=%d role=%s)",
543  rsc->id, rsc_rh->id, constraint->id,
544  constraint->score, role2text(constraint->role_lh));
545  rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
546  rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set);
547  if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
548  pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
549  g_hash_table_destroy(rsc->allowed_nodes);
550  rsc->allowed_nodes = archive;
551  archive = NULL;
552  }
553  if (archive) {
554  g_hash_table_destroy(archive);
555  }
556  }
557 
558  pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes, data_set);
559 
560  for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
561  pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
562 
563  if (!pcmk__colocation_has_influence(constraint, NULL)) {
564  continue;
565  }
566  pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
567  constraint->id, constraint->rsc_lh->id,
568  constraint->rsc_rh->id);
569  rsc->allowed_nodes =
570  constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
571  constraint->node_attribute,
572  (float)constraint->score / INFINITY,
574  }
575 
576  if (rsc->next_role == RSC_ROLE_STOPPED) {
577  pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
578  /* make sure it doesn't come up again */
579  resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
580 
581  } else if(rsc->next_role > rsc->role
582  && !pcmk_is_set(data_set->flags, pe_flag_have_quorum)
583  && data_set->no_quorum_policy == no_quorum_freeze) {
584  crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
585  rsc->id, role2text(rsc->role), role2text(rsc->next_role));
586  pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
587  }
588 
590  rsc, __func__, rsc->allowed_nodes, data_set);
594  }
595 
596  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
597  const char *reason = NULL;
598  pe_node_t *assign_to = NULL;
599 
600  pe__set_next_role(rsc, rsc->role, "unmanaged");
601  assign_to = pe__current_node(rsc);
602  if (assign_to == NULL) {
603  reason = "inactive";
604  } else if (rsc->role == RSC_ROLE_PROMOTED) {
605  reason = "promoted";
606  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
607  reason = "failed";
608  } else {
609  reason = "active";
610  }
611  pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
612  (assign_to? assign_to->details->uname : "no node"), reason);
613  native_assign_node(rsc, assign_to, TRUE);
614 
615  } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
616  pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
617  native_assign_node(rsc, NULL, TRUE);
618 
619  } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
620  && native_choose_node(rsc, prefer, data_set)) {
621  pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
622  rsc->allocated_to->details->uname);
623 
624  } else if (rsc->allocated_to == NULL) {
625  if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
626  pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
627  } else if (rsc->running_on != NULL) {
628  pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
629  }
630 
631  } else {
632  pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
633  rsc->allocated_to->details->uname);
634  }
635 
637 
638  if (rsc->is_remote_node) {
639  pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
640 
641  CRM_ASSERT(remote_node != NULL);
642  if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
643  crm_trace("Setting Pacemaker Remote node %s to ONLINE",
644  remote_node->details->id);
645  remote_node->details->online = TRUE;
646  /* We shouldn't consider an unseen remote-node unclean if we are going
647  * to try and connect to it. Otherwise we get an unnecessary fence */
648  if (remote_node->details->unseen == TRUE) {
649  remote_node->details->unclean = FALSE;
650  }
651 
652  } else {
653  crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
654  remote_node->details->id, role2text(rsc->next_role),
655  (rsc->allocated_to? "" : "un"));
656  remote_node->details->shutdown = TRUE;
657  }
658  }
659 
660  return rsc->allocated_to;
661 }
662 
663 static gboolean
664 is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
665 {
666  gboolean dup = FALSE;
667  const char *id = NULL;
668  const char *value = NULL;
669  xmlNode *operation = NULL;
670  guint interval2_ms = 0;
671 
672  CRM_ASSERT(rsc);
673  for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
674  operation = pcmk__xe_next(operation)) {
675 
676  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
677  value = crm_element_value(operation, "name");
678  if (!pcmk__str_eq(value, name, pcmk__str_casei)) {
679  continue;
680  }
681 
682  value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
683  interval2_ms = crm_parse_interval_spec(value);
684  if (interval_ms != interval2_ms) {
685  continue;
686  }
687 
688  if (id == NULL) {
689  id = ID(operation);
690 
691  } else {
692  pcmk__config_err("Operation %s is duplicate of %s (do not use "
693  "same name and interval combination more "
694  "than once per resource)", ID(operation), id);
695  dup = TRUE;
696  }
697  }
698  }
699 
700  return dup;
701 }
702 
703 static bool
704 op_cannot_recur(const char *name)
705 {
707 }
708 
709 static void
710 RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
711  xmlNode * operation, pe_working_set_t * data_set)
712 {
713  char *key = NULL;
714  const char *name = NULL;
715  const char *role = NULL;
716  const char *interval_spec = NULL;
717  const char *node_uname = node? node->details->uname : "n/a";
718 
719  guint interval_ms = 0;
720  pe_action_t *mon = NULL;
721  gboolean is_optional = TRUE;
722  GList *possible_matches = NULL;
723 
724  CRM_ASSERT(rsc);
725 
726  /* Only process for the operations without role="Stopped" */
727  role = crm_element_value(operation, "role");
728  if (role && text2role(role) == RSC_ROLE_STOPPED) {
729  return;
730  }
731 
732  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
733  interval_ms = crm_parse_interval_spec(interval_spec);
734  if (interval_ms == 0) {
735  return;
736  }
737 
738  name = crm_element_value(operation, "name");
739  if (is_op_dup(rsc, name, interval_ms)) {
740  crm_trace("Not creating duplicate recurring action %s for %dms %s",
741  ID(operation), interval_ms, name);
742  return;
743  }
744 
745  if (op_cannot_recur(name)) {
746  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
747  ID(operation), name);
748  return;
749  }
750 
751  key = pcmk__op_key(rsc->id, name, interval_ms);
752  if (find_rsc_op_entry(rsc, key) == NULL) {
753  crm_trace("Not creating recurring action %s for disabled resource %s",
754  ID(operation), rsc->id);
755  free(key);
756  return;
757  }
758 
759  pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
760  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
761 
762  if (start != NULL) {
763  pe_rsc_trace(rsc, "Marking %s %s due to %s", key,
764  pcmk_is_set(start->flags, pe_action_optional)? "optional" : "mandatory",
765  start->uuid);
766  is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
767  } else {
768  pe_rsc_trace(rsc, "Marking %s optional", key);
769  is_optional = TRUE;
770  }
771 
772  /* start a monitor for an already active resource */
773  possible_matches = find_actions_exact(rsc->actions, key, node);
774  if (possible_matches == NULL) {
775  is_optional = FALSE;
776  pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
777 
778  } else {
779  GList *gIter = NULL;
780 
781  for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
782  pe_action_t *op = (pe_action_t *) gIter->data;
783 
785  is_optional = FALSE;
786  break;
787  }
788  }
789  g_list_free(possible_matches);
790  }
791 
792  if (((rsc->next_role == RSC_ROLE_PROMOTED) && (role == NULL))
793  || (role != NULL && text2role(role) != rsc->next_role)) {
794  int log_level = LOG_TRACE;
795  const char *result = "Ignoring";
796 
797  if (is_optional) {
798  char *after_key = NULL;
799  pe_action_t *cancel_op = NULL;
800 
801  // It's running, so cancel it
802  log_level = LOG_INFO;
803  result = "Cancelling";
804  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
805 
806  switch (rsc->role) {
807  case RSC_ROLE_UNPROMOTED:
808  case RSC_ROLE_STARTED:
809  if (rsc->next_role == RSC_ROLE_PROMOTED) {
810  after_key = promote_key(rsc);
811 
812  } else if (rsc->next_role == RSC_ROLE_STOPPED) {
813  after_key = stop_key(rsc);
814  }
815 
816  break;
817  case RSC_ROLE_PROMOTED:
818  after_key = demote_key(rsc);
819  break;
820  default:
821  break;
822  }
823 
824  if (after_key) {
825  custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL,
826  pe_order_runnable_left, data_set);
827  }
828  }
829 
830  do_crm_log(log_level, "%s action %s (%s vs. %s)",
831  result, key, role ? role : role2text(RSC_ROLE_UNPROMOTED),
832  role2text(rsc->next_role));
833 
834  free(key);
835  return;
836  }
837 
838  mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
839  key = mon->uuid;
840  if (is_optional) {
841  pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
842  }
843 
844  if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
845  pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
846  node_uname, mon->uuid);
848  __func__, __LINE__);
849 
850  } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
851  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
852  node_uname, mon->uuid);
854  __func__, __LINE__);
855 
856  } else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
857  pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
858  mon->task, interval_ms / 1000, rsc->id, node_uname);
859  }
860 
861  if (rsc->next_role == RSC_ROLE_PROMOTED) {
862  char *running_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
863 
864  add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_promoted);
865  free(running_promoted);
866  }
867 
868  if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
869  custom_action_order(rsc, start_key(rsc), NULL,
870  NULL, strdup(key), mon,
872 
873  custom_action_order(rsc, reload_key(rsc), NULL,
874  NULL, strdup(key), mon,
876 
877  if (rsc->next_role == RSC_ROLE_PROMOTED) {
878  custom_action_order(rsc, promote_key(rsc), NULL,
879  rsc, NULL, mon,
881 
882  } else if (rsc->role == RSC_ROLE_PROMOTED) {
883  custom_action_order(rsc, demote_key(rsc), NULL,
884  rsc, NULL, mon,
886  }
887  }
888 }
889 
890 static void
891 Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
892 {
893  if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
894  (node == NULL || node->details->maintenance == FALSE)) {
895  xmlNode *operation = NULL;
896 
897  for (operation = pcmk__xe_first_child(rsc->ops_xml);
898  operation != NULL;
899  operation = pcmk__xe_next(operation)) {
900 
901  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
902  RecurringOp(rsc, start, node, operation, data_set);
903  }
904  }
905  }
906 }
907 
908 static void
909 RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
910  xmlNode * operation, pe_working_set_t * data_set)
911 {
912  char *key = NULL;
913  const char *name = NULL;
914  const char *role = NULL;
915  const char *interval_spec = NULL;
916  const char *node_uname = node? node->details->uname : "n/a";
917 
918  guint interval_ms = 0;
919  GList *possible_matches = NULL;
920  GList *gIter = NULL;
921 
922  /* Only process for the operations with role="Stopped" */
923  role = crm_element_value(operation, "role");
924  if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
925  return;
926  }
927 
928  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
929  interval_ms = crm_parse_interval_spec(interval_spec);
930  if (interval_ms == 0) {
931  return;
932  }
933 
934  name = crm_element_value(operation, "name");
935  if (is_op_dup(rsc, name, interval_ms)) {
936  crm_trace("Not creating duplicate recurring action %s for %dms %s",
937  ID(operation), interval_ms, name);
938  return;
939  }
940 
941  if (op_cannot_recur(name)) {
942  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
943  ID(operation), name);
944  return;
945  }
946 
947  key = pcmk__op_key(rsc->id, name, interval_ms);
948  if (find_rsc_op_entry(rsc, key) == NULL) {
949  crm_trace("Not creating recurring action %s for disabled resource %s",
950  ID(operation), rsc->id);
951  free(key);
952  return;
953  }
954 
955  // @TODO add support
956  if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
957  crm_notice("Ignoring %s (recurring monitors for Stopped role are "
958  "not supported for anonymous clones)",
959  ID(operation));
960  return;
961  }
962 
963  pe_rsc_trace(rsc,
964  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
965  ID(operation), rsc->id, role2text(rsc->next_role));
966 
967  /* if the monitor exists on the node where the resource will be running, cancel it */
968  if (node != NULL) {
969  possible_matches = find_actions_exact(rsc->actions, key, node);
970  if (possible_matches) {
971  pe_action_t *cancel_op = NULL;
972 
973  g_list_free(possible_matches);
974 
975  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
976 
977  if ((rsc->next_role == RSC_ROLE_STARTED)
978  || (rsc->next_role == RSC_ROLE_UNPROMOTED)) {
979  /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
980  /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
981  custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
982  pe_order_runnable_left, data_set);
983  }
984 
985  pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
986  key, role, role2text(rsc->next_role), node_uname);
987  }
988  }
989 
990  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
991  pe_node_t *stop_node = (pe_node_t *) gIter->data;
992  const char *stop_node_uname = stop_node->details->uname;
993  gboolean is_optional = TRUE;
994  gboolean probe_is_optional = TRUE;
995  gboolean stop_is_optional = TRUE;
996  pe_action_t *stopped_mon = NULL;
997  char *rc_inactive = NULL;
998  GList *probe_complete_ops = NULL;
999  GList *stop_ops = NULL;
1000  GList *local_gIter = NULL;
1001 
1002  if (node && pcmk__str_eq(stop_node_uname, node_uname, pcmk__str_casei)) {
1003  continue;
1004  }
1005 
1006  pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
1007  ID(operation), rsc->id, crm_str(stop_node_uname));
1008 
1009  /* start a monitor for an already stopped resource */
1010  possible_matches = find_actions_exact(rsc->actions, key, stop_node);
1011  if (possible_matches == NULL) {
1012  pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
1013  crm_str(stop_node_uname));
1014  is_optional = FALSE;
1015  } else {
1016  pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
1017  crm_str(stop_node_uname));
1018  is_optional = TRUE;
1019  g_list_free(possible_matches);
1020  }
1021 
1022  stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
1023 
1024  rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
1025  add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
1026  free(rc_inactive);
1027 
1028  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1029  GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
1030  FALSE);
1031  GList *pIter = NULL;
1032 
1033  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
1034  pe_action_t *probe = (pe_action_t *) pIter->data;
1035 
1036  order_actions(probe, stopped_mon, pe_order_runnable_left);
1037  crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
1038  }
1039 
1040  g_list_free(probes);
1041  }
1042 
1043  if (probe_complete_ops) {
1044  g_list_free(probe_complete_ops);
1045  }
1046 
1047  stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
1048 
1049  for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
1050  pe_action_t *stop = (pe_action_t *) local_gIter->data;
1051 
1052  if (!pcmk_is_set(stop->flags, pe_action_optional)) {
1053  stop_is_optional = FALSE;
1054  }
1055 
1056  if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
1057  crm_debug("%s\t %s (cancelled : stop un-runnable)",
1058  crm_str(stop_node_uname), stopped_mon->uuid);
1060  __func__, __LINE__);
1061  }
1062 
1063  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1064  custom_action_order(rsc, stop_key(rsc), stop,
1065  NULL, strdup(key), stopped_mon,
1067  }
1068 
1069  }
1070 
1071  if (stop_ops) {
1072  g_list_free(stop_ops);
1073  }
1074 
1075  if (is_optional == FALSE && probe_is_optional && stop_is_optional
1076  && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1077  pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
1078  key, crm_str(stop_node_uname));
1079  update_action_flags(stopped_mon, pe_action_optional, __func__,
1080  __LINE__);
1081  }
1082 
1083  if (pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
1084  pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
1085  }
1086 
1087  if (stop_node->details->online == FALSE || stop_node->details->unclean) {
1088  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
1089  crm_str(stop_node_uname), stopped_mon->uuid);
1091  __func__, __LINE__);
1092  }
1093 
1094  if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
1095  && !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
1096  crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
1097  interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
1098  }
1099  }
1100 
1101  free(key);
1102 }
1103 
1104 static void
1105 Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
1106 {
1107  if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
1108  (node == NULL || node->details->maintenance == FALSE)) {
1109  xmlNode *operation = NULL;
1110 
1111  for (operation = pcmk__xe_first_child(rsc->ops_xml);
1112  operation != NULL;
1113  operation = pcmk__xe_next(operation)) {
1114 
1115  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
1116  RecurringOp_Stopped(rsc, start, node, operation, data_set);
1117  }
1118  }
1119  }
1120 }
1121 
1122 static void
1123 handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
1124 {
1125  pe_action_t *migrate_to = NULL;
1126  pe_action_t *migrate_from = NULL;
1127  pe_action_t *start = NULL;
1128  pe_action_t *stop = NULL;
1129  gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
1130 
1131  pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
1132  rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
1133  start = start_action(rsc, chosen, TRUE);
1134  stop = stop_action(rsc, current, TRUE);
1135 
1136  if (partial == FALSE) {
1137  migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1138  RSC_MIGRATE, current, TRUE, TRUE, data_set);
1139  }
1140 
1141  migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1142  RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
1143 
1144  if ((migrate_to && migrate_from) || (migrate_from && partial)) {
1145 
1148 
1149  // This is easier than trying to delete it from the graph
1150  update_action_flags(start, pe_action_pseudo, __func__, __LINE__);
1151 
1152  /* order probes before migrations */
1153  if (partial) {
1155  migrate_from->needs = start->needs;
1156 
1157  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1158  rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1159  NULL, pe_order_optional, data_set);
1160 
1161  } else {
1164  migrate_to->needs = start->needs;
1165 
1166  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1167  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1168  NULL, pe_order_optional, data_set);
1170  NULL, rsc,
1171  pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1173  data_set);
1174  }
1175 
1176  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1177  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1179  data_set);
1180  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1181  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1183  data_set);
1184 
1185  }
1186 
1187  if (migrate_to) {
1188  add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1190 
1191  /* Pacemaker Remote connections don't require pending to be recorded in
1192  * the CIB. We can reduce CIB writes by not setting PENDING for them.
1193  */
1194  if (rsc->is_remote_node == FALSE) {
1195  /* migrate_to takes place on the source node, but can
1196  * have an effect on the target node depending on how
1197  * the agent is written. Because of this, we have to maintain
1198  * a record that the migrate_to occurred, in case the source node
1199  * loses membership while the migrate_to action is still in-flight.
1200  */
1201  add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
1202  }
1203  }
1204 
1205  if (migrate_from) {
1206  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1207  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
1208  }
1209 }
1210 
1211 void
1213 {
1214  pe_action_t *start = NULL;
1215  pe_node_t *chosen = NULL;
1216  pe_node_t *current = NULL;
1217  gboolean need_stop = FALSE;
1218  bool need_promote = FALSE;
1219  gboolean is_moving = FALSE;
1220  gboolean allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
1221 
1222  GList *gIter = NULL;
1223  unsigned int num_all_active = 0;
1224  unsigned int num_clean_active = 0;
1225  bool multiply_active = FALSE;
1226  enum rsc_role_e role = RSC_ROLE_UNKNOWN;
1227  enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
1228 
1229  CRM_ASSERT(rsc);
1230  chosen = rsc->allocated_to;
1231  next_role = rsc->next_role;
1232  if (next_role == RSC_ROLE_UNKNOWN) {
1233  pe__set_next_role(rsc,
1234  (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED,
1235  "allocation");
1236  }
1237  pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s",
1238  rsc->id, role2text(rsc->role), role2text(rsc->next_role),
1239  ((next_role == RSC_ROLE_UNKNOWN)? "implicit" : "explicit"),
1240  ((chosen == NULL)? "no node" : chosen->details->uname));
1241 
1242  current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
1243 
1244  for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
1245  pe_node_t *dangling_source = (pe_node_t *) gIter->data;
1246 
1247  pe_action_t *stop = NULL;
1248 
1249  pe_rsc_trace(rsc, "Creating stop action %sfor %s on %s due to dangling migration",
1250  pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)? "and cleanup " : "",
1251  rsc->id, dangling_source->details->uname);
1252  stop = stop_action(rsc, dangling_source, FALSE);
1254  if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
1255  DeleteRsc(rsc, dangling_source, FALSE, data_set);
1256  }
1257  }
1258 
1259  if ((num_all_active == 2) && (num_clean_active == 2) && chosen
1261  && (current->details == rsc->partial_migration_source->details)
1262  && (chosen->details == rsc->partial_migration_target->details)) {
1263 
1264  /* The chosen node is still the migration target from a partial
1265  * migration. Attempt to continue the migration instead of recovering
1266  * by stopping the resource everywhere and starting it on a single node.
1267  */
1268  pe_rsc_trace(rsc, "Will attempt to continue with partial migration "
1269  "to target %s from %s",
1272 
1273  } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
1274  /* If a resource has "requires" set to nothing or quorum, don't consider
1275  * it active on unclean nodes (similar to how all resources behave when
1276  * stonith-enabled is false). We can start such resources elsewhere
1277  * before fencing completes, and if we considered the resource active on
1278  * the failed node, we would attempt recovery for being active on
1279  * multiple nodes.
1280  */
1281  multiply_active = (num_clean_active > 1);
1282  } else {
1283  multiply_active = (num_all_active > 1);
1284  }
1285 
1286  if (multiply_active) {
1288  // Migration was in progress, but we've chosen a different target
1289  crm_notice("Resource %s can no longer migrate from %s to %s "
1290  "(will stop on both nodes)",
1293 
1294  } else {
1295  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
1296 
1297  // Resource was incorrectly multiply active
1298  pe_proc_err("%s resource %s is active on %u nodes (%s)",
1299  crm_str(class), rsc->id, num_all_active,
1300  recovery2text(rsc->recovery_type));
1301  crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
1302  }
1303 
1304  if (rsc->recovery_type == recovery_stop_start) {
1305  need_stop = TRUE;
1306  }
1307 
1308  /* If by chance a partial migration is in process, but the migration
1309  * target is not chosen still, clear all partial migration data.
1310  */
1312  allow_migrate = FALSE;
1313  }
1314 
1315  if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
1316  pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start",
1317  rsc->id);
1318  start = start_action(rsc, chosen, TRUE);
1320  }
1321 
1322  if (current && chosen && current->details != chosen->details) {
1323  pe_rsc_trace(rsc, "Moving %s from %s to %s",
1324  rsc->id, crm_str(current->details->uname),
1325  crm_str(chosen->details->uname));
1326  is_moving = TRUE;
1327  need_stop = TRUE;
1328 
1329  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
1330  if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
1331  need_stop = TRUE;
1332  pe_rsc_trace(rsc, "Recovering %s", rsc->id);
1333  } else {
1334  pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
1335  if (rsc->next_role == RSC_ROLE_PROMOTED) {
1336  need_promote = TRUE;
1337  }
1338  }
1339 
1340  } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
1341  pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
1342  need_stop = TRUE;
1343 
1344  } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
1345  pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
1346  rsc->id);
1347  start = start_action(rsc, chosen, TRUE);
1348  if (!pcmk_is_set(start->flags, pe_action_optional)) {
1349  // Recovery of a promoted resource
1350  pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
1351  need_stop = TRUE;
1352  }
1353  }
1354 
1355  /* Create any additional actions required when bringing resource down and
1356  * back up to same level.
1357  */
1358  role = rsc->role;
1359  while (role != RSC_ROLE_STOPPED) {
1360  next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
1361  pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
1362  (need_stop? "required" : "optional"), rsc->id,
1363  role2text(role), role2text(next_role));
1364  if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
1365  break;
1366  }
1367  role = next_role;
1368  }
1369 
1370 
1371  while ((rsc->role <= rsc->next_role) && (role != rsc->role)
1372  && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
1373  bool required = need_stop;
1374 
1375  next_role = rsc_state_matrix[role][rsc->role];
1376  if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
1377  required = true;
1378  }
1379  pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
1380  (required? "required" : "optional"), rsc->id,
1381  role2text(role), role2text(next_role));
1382  if (rsc_action_matrix[role][next_role](rsc, chosen, !required,
1383  data_set) == FALSE) {
1384  break;
1385  }
1386  role = next_role;
1387  }
1388  role = rsc->role;
1389 
1390  /* Required steps from this role to the next */
1391  while (role != rsc->next_role) {
1392  next_role = rsc_state_matrix[role][rsc->next_role];
1393  pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)",
1394  rsc->id, role2text(role), role2text(next_role),
1395  role2text(rsc->next_role));
1396  if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
1397  break;
1398  }
1399  role = next_role;
1400  }
1401 
1402  if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
1403  pe_rsc_trace(rsc, "Not creating recurring monitors for blocked resource %s",
1404  rsc->id);
1405 
1406  } else if ((rsc->next_role != RSC_ROLE_STOPPED)
1407  || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1408  pe_rsc_trace(rsc, "Creating recurring monitors for %s resource %s",
1409  ((rsc->next_role == RSC_ROLE_STOPPED)? "unmanaged" : "active"),
1410  rsc->id);
1411  start = start_action(rsc, chosen, TRUE);
1412  Recurring(rsc, start, chosen, data_set);
1413  Recurring_Stopped(rsc, start, chosen, data_set);
1414 
1415  } else {
1416  pe_rsc_trace(rsc, "Creating recurring monitors for inactive resource %s",
1417  rsc->id);
1418  Recurring_Stopped(rsc, NULL, NULL, data_set);
1419  }
1420 
1421  /* if we are stuck in a partial migration, where the target
1422  * of the partial migration no longer matches the chosen target.
1423  * A full stop/start is required */
1424  if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
1425  pe_rsc_trace(rsc, "Not allowing partial migration of %s to continue",
1426  rsc->id);
1427  allow_migrate = FALSE;
1428 
1429  } else if (!is_moving || !pcmk_is_set(rsc->flags, pe_rsc_managed)
1430  || pcmk_any_flags_set(rsc->flags,
1432  || (current && current->details->unclean)
1433  || rsc->next_role < RSC_ROLE_STARTED) {
1434 
1435  allow_migrate = FALSE;
1436  }
1437 
1438  if (allow_migrate) {
1439  handle_migration_actions(rsc, current, chosen, data_set);
1440  }
1441 }
1442 
1443 static void
1444 rsc_avoids_remote_nodes(pe_resource_t *rsc)
1445 {
1446  GHashTableIter iter;
1447  pe_node_t *node = NULL;
1448  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
1449  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1450  if (node->details->remote_rsc) {
1451  node->weight = -INFINITY;
1452  }
1453  }
1454 }
1455 
1470 static GList *
1471 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
1472 {
1473  GList *allowed_nodes = NULL;
1474 
1475  if (rsc->allowed_nodes) {
1476  allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
1477  }
1478 
1479  if (!pcmk__is_daemon) {
1480  allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
1481  }
1482 
1483  return allowed_nodes;
1484 }
1485 
1486 void
1488 {
1489  /* This function is on the critical path and worth optimizing as much as possible */
1490 
1491  pe_resource_t *top = NULL;
1492  GList *allowed_nodes = NULL;
1493  bool check_unfencing = FALSE;
1494  bool check_utilization = FALSE;
1495 
1496  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1497  pe_rsc_trace(rsc,
1498  "Skipping native constraints for unmanaged resource: %s",
1499  rsc->id);
1500  return;
1501  }
1502 
1503  top = uber_parent(rsc);
1504 
1505  // Whether resource requires unfencing
1506  check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
1509 
1510  // Whether a non-default placement strategy is used
1511  check_utilization = (g_hash_table_size(rsc->utilization) > 0)
1512  && !pcmk__str_eq(data_set->placement_strategy,
1513  "default", pcmk__str_casei);
1514 
1515  // Order stops before starts (i.e. restart)
1516  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1517  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1519  data_set);
1520 
1521  // Promotable ordering: demote before stop, start before promote
1523  || (rsc->role > RSC_ROLE_UNPROMOTED)) {
1524 
1525  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
1526  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1528 
1529  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1530  rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
1531  pe_order_runnable_left, data_set);
1532  }
1533 
1534  // Don't clear resource history if probing on same node
1536  NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
1538  data_set);
1539 
1540  // Certain checks need allowed nodes
1541  if (check_unfencing || check_utilization || rsc->container) {
1542  allowed_nodes = allowed_nodes_as_list(rsc, data_set);
1543  }
1544 
1545  if (check_unfencing) {
1546  /* Check if the node needs to be unfenced first */
1547 
1548  for (GList *item = allowed_nodes; item; item = item->next) {
1549  pe_node_t *node = item->data;
1550  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
1551 
1552  crm_debug("Ordering any stops of %s before %s, and any starts after",
1553  rsc->id, unfence->uuid);
1554 
1555  /*
1556  * It would be more efficient to order clone resources once,
1557  * rather than order each instance, but ordering the instance
1558  * allows us to avoid unnecessary dependencies that might conflict
1559  * with user constraints.
1560  *
1561  * @TODO: This constraint can still produce a transition loop if the
1562  * resource has a stop scheduled on the node being unfenced, and
1563  * there is a user ordering constraint to start some other resource
1564  * (which will be ordered after the unfence) before stopping this
1565  * resource. An example is "start some slow-starting cloned service
1566  * before stopping an associated virtual IP that may be moving to
1567  * it":
1568  * stop this -> unfencing -> start that -> stop this
1569  */
1570  custom_action_order(rsc, stop_key(rsc), NULL,
1571  NULL, strdup(unfence->uuid), unfence,
1573 
1574  custom_action_order(NULL, strdup(unfence->uuid), unfence,
1575  rsc, start_key(rsc), NULL,
1577  data_set);
1578  }
1579  }
1580 
1581  if (check_utilization) {
1582  GList *gIter = NULL;
1583 
1584  pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
1585  rsc->id, data_set->placement_strategy);
1586 
1587  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
1588  pe_node_t *current = (pe_node_t *) gIter->data;
1589 
1590  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1591  current->details->uname);
1592  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1593 
1594  if (load_stopped->node == NULL) {
1595  load_stopped->node = pe__copy_node(current);
1597  __func__, __LINE__);
1598  }
1599 
1600  custom_action_order(rsc, stop_key(rsc), NULL,
1601  NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
1602  }
1603 
1604  for (GList *item = allowed_nodes; item; item = item->next) {
1605  pe_node_t *next = item->data;
1606  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1607  next->details->uname);
1608  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1609 
1610  if (load_stopped->node == NULL) {
1611  load_stopped->node = pe__copy_node(next);
1613  __func__, __LINE__);
1614  }
1615 
1616  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1617  rsc, start_key(rsc), NULL, pe_order_load, data_set);
1618 
1619  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1620  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1621  NULL, pe_order_load, data_set);
1622 
1623  free(load_stopped_task);
1624  }
1625  }
1626 
1627  if (rsc->container) {
1628  pe_resource_t *remote_rsc = NULL;
1629 
1630  if (rsc->is_remote_node) {
1631  // rsc is the implicit remote connection for a guest or bundle node
1632 
1633  /* Do not allow a guest resource to live on a Pacemaker Remote node,
1634  * to avoid nesting remotes. However, allow bundles to run on remote
1635  * nodes.
1636  */
1638  rsc_avoids_remote_nodes(rsc->container);
1639  }
1640 
1641  /* If someone cleans up a guest or bundle node's container, we will
1642  * likely schedule a (re-)probe of the container and recovery of the
1643  * connection. Order the connection stop after the container probe,
1644  * so that if we detect the container running, we will trigger a new
1645  * transition and avoid the unnecessary recovery.
1646  */
1648  pe_order_optional, data_set);
1649 
1650  /* A user can specify that a resource must start on a Pacemaker Remote
1651  * node by explicitly configuring it with the container=NODENAME
1652  * meta-attribute. This is of questionable merit, since location
1653  * constraints can accomplish the same thing. But we support it, so here
1654  * we check whether a resource (that is not itself a remote connection)
1655  * has container set to a remote node or guest node resource.
1656  */
1657  } else if (rsc->container->is_remote_node) {
1658  remote_rsc = rsc->container;
1659  } else {
1660  remote_rsc = pe__resource_contains_guest_node(data_set,
1661  rsc->container);
1662  }
1663 
1664  if (remote_rsc) {
1665  /* Force the resource on the Pacemaker Remote node instead of
1666  * colocating the resource with the container resource.
1667  */
1668  for (GList *item = allowed_nodes; item; item = item->next) {
1669  pe_node_t *node = item->data;
1670 
1671  if (node->details->remote_rsc != remote_rsc) {
1672  node->weight = -INFINITY;
1673  }
1674  }
1675 
1676  } else {
1677  /* This resource is either a filler for a container that does NOT
1678  * represent a Pacemaker Remote node, or a Pacemaker Remote
1679  * connection resource for a guest node or bundle.
1680  */
1681  int score;
1682 
1683  crm_trace("Order and colocate %s relative to its container %s",
1684  rsc->id, rsc->container->id);
1685 
1687  pcmk__op_key(rsc->container->id, RSC_START, 0),
1688  NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
1689  NULL,
1691  data_set);
1692 
1693  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1694  rsc->container,
1695  pcmk__op_key(rsc->container->id, RSC_STOP, 0),
1696  NULL, pe_order_implies_first, data_set);
1697 
1699  score = 10000; /* Highly preferred but not essential */
1700  } else {
1701  score = INFINITY; /* Force them to run on the same host */
1702  }
1703  pcmk__new_colocation("resource-with-container", NULL, score, rsc,
1704  rsc->container, NULL, NULL, true, data_set);
1705  }
1706  }
1707 
1708  if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
1709  /* don't allow remote nodes to run stonith devices
1710  * or remote connection resources.*/
1711  rsc_avoids_remote_nodes(rsc);
1712  }
1713  g_list_free(allowed_nodes);
1714 }
1715 
1716 void
1718  pcmk__colocation_t *constraint,
1719  pe_working_set_t *data_set)
1720 {
1721  if (rsc_lh == NULL) {
1722  pe_err("rsc_lh was NULL for %s", constraint->id);
1723  return;
1724 
1725  } else if (constraint->rsc_rh == NULL) {
1726  pe_err("rsc_rh was NULL for %s", constraint->id);
1727  return;
1728  }
1729 
1730  pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
1731  rsc_rh->id);
1732 
1733  rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set);
1734 }
1735 
1738  pcmk__colocation_t *constraint, gboolean preview)
1739 {
1740  /* rh side must be allocated before we can process constraint */
1741  if (!preview && pcmk_is_set(rsc_rh->flags, pe_rsc_provisional)) {
1742  return influence_nothing;
1743  }
1744 
1745  if ((constraint->role_lh >= RSC_ROLE_UNPROMOTED) &&
1746  rsc_lh->parent && pcmk_is_set(rsc_lh->parent->flags, pe_rsc_promotable)
1747  && !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) {
1748 
1749  /* LH and RH resources have already been allocated, place the correct
1750  * priority on LH rsc for the given promotable clone resource role */
1751  return influence_rsc_priority;
1752  }
1753 
1754  if (!preview && !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) {
1755  // Log an error if we violated a mandatory colocation constraint
1756  const pe_node_t *rh_node = rsc_rh->allocated_to;
1757 
1758  if (rsc_lh->allocated_to == NULL) {
1759  // Dependent resource isn't allocated, so constraint doesn't matter
1760  return influence_nothing;
1761  }
1762 
1763  if (constraint->score >= INFINITY) {
1764  // Dependent resource must colocate with rh_node
1765 
1766  if ((rh_node == NULL)
1767  || (rh_node->details != rsc_lh->allocated_to->details)) {
1768  crm_err("%s must be colocated with %s but is not (%s vs. %s)",
1769  rsc_lh->id, rsc_rh->id,
1770  rsc_lh->allocated_to->details->uname,
1771  (rh_node? rh_node->details->uname : "unallocated"));
1772  }
1773 
1774  } else if (constraint->score <= -INFINITY) {
1775  // Dependent resource must anti-colocate with rh_node
1776 
1777  if ((rh_node != NULL)
1778  && (rsc_lh->allocated_to->details == rh_node->details)) {
1779  crm_err("%s and %s must be anti-colocated but are allocated "
1780  "to the same node (%s)",
1781  rsc_lh->id, rsc_rh->id, rh_node->details->uname);
1782  }
1783  }
1784  return influence_nothing;
1785  }
1786 
1787  if (constraint->score > 0
1788  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
1789  crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
1790  role2text(constraint->role_lh), role2text(rsc_lh->next_role));
1791  return influence_nothing;
1792  }
1793 
1794  if (constraint->score > 0
1795  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
1796  crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
1797  return influence_nothing;
1798  }
1799 
1800  if (constraint->score < 0
1801  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
1802  crm_trace("LH: Skipping negative constraint: \"%s\" state filter",
1803  role2text(constraint->role_lh));
1804  return influence_nothing;
1805  }
1806 
1807  if (constraint->score < 0
1808  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
1809  crm_trace("RH: Skipping negative constraint: \"%s\" state filter",
1810  role2text(constraint->role_rh));
1811  return influence_nothing;
1812  }
1813 
1814  return influence_rsc_location;
1815 }
1816 
1817 static void
1818 influence_priority(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
1819  pcmk__colocation_t *constraint)
1820 {
1821  const char *rh_value = NULL;
1822  const char *lh_value = NULL;
1823  const char *attribute = CRM_ATTR_ID;
1824  int score_multiplier = 1;
1825 
1826  if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
1827  return;
1828  }
1829 
1830  if (constraint->node_attribute != NULL) {
1831  attribute = constraint->node_attribute;
1832  }
1833 
1834  lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute);
1835  rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1836 
1837  if (!pcmk__str_eq(lh_value, rh_value, pcmk__str_casei)) {
1838  if ((constraint->score == INFINITY)
1839  && (constraint->role_lh == RSC_ROLE_PROMOTED)) {
1840  rsc_lh->priority = -INFINITY;
1841  }
1842  return;
1843  }
1844 
1845  if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
1846  return;
1847  }
1848 
1849  if (constraint->role_lh == RSC_ROLE_UNPROMOTED) {
1850  score_multiplier = -1;
1851  }
1852 
1853  rsc_lh->priority = pe__add_scores(score_multiplier * constraint->score,
1854  rsc_lh->priority);
1855 }
1856 
1857 static void
1858 colocation_match(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh,
1859  pcmk__colocation_t *constraint)
1860 {
1861  const char *attribute = CRM_ATTR_ID;
1862  const char *value = NULL;
1863  GHashTable *work = NULL;
1864  GHashTableIter iter;
1865  pe_node_t *node = NULL;
1866 
1867  if (constraint->node_attribute != NULL) {
1868  attribute = constraint->node_attribute;
1869  }
1870 
1871  if (rsc_rh->allocated_to) {
1872  value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1873 
1874  } else if (constraint->score < 0) {
1875  // Nothing to do (anti-colocation with something that is not running)
1876  return;
1877  }
1878 
1879  work = pcmk__copy_node_table(rsc_lh->allowed_nodes);
1880 
1881  g_hash_table_iter_init(&iter, work);
1882  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1883  if (rsc_rh->allocated_to == NULL) {
1884  pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s inactive)",
1885  constraint->id, rsc_lh->id, node->details->uname,
1886  constraint->score, rsc_rh->id);
1887  node->weight = pe__add_scores(-constraint->score, node->weight);
1888 
1889  } else if (pcmk__str_eq(pe_node_attribute_raw(node, attribute), value, pcmk__str_casei)) {
1890  if (constraint->score < CRM_SCORE_INFINITY) {
1891  pe_rsc_trace(rsc_lh, "%s: %s@%s += %d",
1892  constraint->id, rsc_lh->id,
1893  node->details->uname, constraint->score);
1894  node->weight = pe__add_scores(constraint->score, node->weight);
1895  }
1896 
1897  } else if (constraint->score >= CRM_SCORE_INFINITY) {
1898  pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s mismatch)",
1899  constraint->id, rsc_lh->id, node->details->uname,
1900  constraint->score, attribute);
1901  node->weight = pe__add_scores(-constraint->score, node->weight);
1902  }
1903  }
1904 
1905  if (can_run_any(work)
1906  || constraint->score <= -INFINITY || constraint->score >= INFINITY) {
1907  g_hash_table_destroy(rsc_lh->allowed_nodes);
1908  rsc_lh->allowed_nodes = work;
1909  work = NULL;
1910 
1911  } else {
1912  pe_rsc_info(rsc_lh,
1913  "%s: Rolling back scores from %s (no available nodes)",
1914  rsc_lh->id, rsc_rh->id);
1915  }
1916 
1917  if (work) {
1918  g_hash_table_destroy(work);
1919  }
1920 }
1921 
1922 void
1924  pcmk__colocation_t *constraint,
1925  pe_working_set_t *data_set)
1926 {
1927  enum filter_colocation_res filter_results;
1928 
1929  CRM_ASSERT(rsc_lh);
1930  CRM_ASSERT(rsc_rh);
1931  filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
1932  pe_rsc_trace(rsc_lh, "%s %s with %s (%s, score=%d, filter=%d)",
1933  ((constraint->score > 0)? "Colocating" : "Anti-colocating"),
1934  rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
1935 
1936  switch (filter_results) {
1938  influence_priority(rsc_lh, rsc_rh, constraint);
1939  break;
1941  colocation_match(rsc_lh, rsc_rh, constraint);
1942  break;
1943  case influence_nothing:
1944  default:
1945  return;
1946  }
1947 }
1948 
1949 static gboolean
1950 filter_rsc_ticket(pe_resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
1951 {
1952  if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
1953  pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
1954  role2text(rsc_ticket->role_lh));
1955  return FALSE;
1956  }
1957 
1958  return TRUE;
1959 }
1960 
1961 void
1963 {
1964  if (rsc_ticket == NULL) {
1965  pe_err("rsc_ticket was NULL");
1966  return;
1967  }
1968 
1969  if (rsc_lh == NULL) {
1970  pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
1971  return;
1972  }
1973 
1974  if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
1975  return;
1976  }
1977 
1978  if (rsc_lh->children) {
1979  GList *gIter = rsc_lh->children;
1980 
1981  pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
1982 
1983  for (; gIter != NULL; gIter = gIter->next) {
1984  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
1985 
1986  rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
1987  }
1988  return;
1989  }
1990 
1991  pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
1992  rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
1993  role2text(rsc_ticket->role_lh));
1994 
1995  if ((rsc_ticket->ticket->granted == FALSE)
1996  && (rsc_lh->running_on != NULL)) {
1997 
1998  GList *gIter = NULL;
1999 
2000  switch (rsc_ticket->loss_policy) {
2001  case loss_ticket_stop:
2002  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
2003  break;
2004 
2005  case loss_ticket_demote:
2006  // Promotion score will be set to -INFINITY in promotion_order()
2007  if (rsc_ticket->role_lh != RSC_ROLE_PROMOTED) {
2008  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
2009  }
2010  break;
2011 
2012  case loss_ticket_fence:
2013  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
2014  return;
2015  }
2016 
2017  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
2018 
2019  for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
2020  pe_node_t *node = (pe_node_t *) gIter->data;
2021 
2022  pe_fence_node(data_set, node, "deadman ticket was lost", FALSE);
2023  }
2024  break;
2025 
2026  case loss_ticket_freeze:
2027  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
2028  return;
2029  }
2030  if (rsc_lh->running_on != NULL) {
2033  }
2034  break;
2035  }
2036 
2037  } else if (rsc_ticket->ticket->granted == FALSE) {
2038 
2039  if ((rsc_ticket->role_lh != RSC_ROLE_PROMOTED)
2040  || (rsc_ticket->loss_policy == loss_ticket_stop)) {
2041  resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
2042  }
2043 
2044  } else if (rsc_ticket->ticket->standby) {
2045 
2046  if ((rsc_ticket->role_lh != RSC_ROLE_PROMOTED)
2047  || (rsc_ticket->loss_policy == loss_ticket_stop)) {
2048  resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
2049  }
2050  }
2051 }
2052 
2053 enum pe_action_flags
2055 {
2056  return action->flags;
2057 }
2058 
2059 static inline bool
2060 is_primitive_action(pe_action_t *action)
2061 {
2062  return action && action->rsc && (action->rsc->variant == pe_native);
2063 }
2064 
2076 static void
2077 handle_restart_ordering(pe_action_t *first, pe_action_t *then,
2078  enum pe_action_flags filter)
2079 {
2080  const char *reason = NULL;
2081 
2082  CRM_ASSERT(is_primitive_action(first));
2083  CRM_ASSERT(is_primitive_action(then));
2084 
2085  // We need to update the action in two cases:
2086 
2087  // ... if 'then' is required
2088  if (pcmk_is_set(filter, pe_action_optional)
2089  && !pcmk_is_set(then->flags, pe_action_optional)) {
2090  reason = "restart";
2091  }
2092 
2093  /* ... if 'then' is unrunnable start of managed resource (if a resource
2094  * should restart but can't start, we still want to stop)
2095  */
2096  if (pcmk_is_set(filter, pe_action_runnable)
2098  && pcmk_is_set(then->rsc->flags, pe_rsc_managed)
2099  && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) {
2100  reason = "stop";
2101  }
2102 
2103  if (reason == NULL) {
2104  return;
2105  }
2106 
2107  pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
2108  first->uuid, then->uuid, reason);
2109 
2110  // Make 'first' required if it is runnable
2111  if (pcmk_is_set(first->flags, pe_action_runnable)) {
2112  pe_action_implies(first, then, pe_action_optional);
2113  }
2114 
2115  // Make 'first' required if 'then' is required
2116  if (!pcmk_is_set(then->flags, pe_action_optional)) {
2117  pe_action_implies(first, then, pe_action_optional);
2118  }
2119 
2120  // Make 'first' unmigratable if 'then' is unmigratable
2123  }
2124 
2125  // Make 'then' unrunnable if 'first' is required but unrunnable
2126  if (!pcmk_is_set(first->flags, pe_action_optional)
2127  && !pcmk_is_set(first->flags, pe_action_runnable)) {
2128  pe_action_implies(then, first, pe_action_runnable);
2129  }
2130 }
2131 
2132 enum pe_graph_flags
2134  enum pe_action_flags flags, enum pe_action_flags filter,
2135  enum pe_ordering type, pe_working_set_t *data_set)
2136 {
2137  /* flags == get_action_flags(first, then_node) called from update_action() */
2138  enum pe_graph_flags changed = pe_graph_none;
2139  enum pe_action_flags then_flags = then->flags;
2140  enum pe_action_flags first_flags = first->flags;
2141 
2142  crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
2143  first->uuid, first->node ? first->node->details->uname : "[none]",
2144  first->flags, then->uuid, then->flags);
2145 
2146  if (type & pe_order_asymmetrical) {
2147  pe_resource_t *then_rsc = then->rsc;
2148  enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
2149 
2150  if (!then_rsc) {
2151  /* ignore */
2152  } else if ((then_rsc_role == RSC_ROLE_STOPPED) && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_casei)) {
2153  /* ignore... if 'then' is supposed to be stopped after 'first', but
2154  * then is already stopped, there is nothing to be done when non-symmetrical. */
2155  } else if ((then_rsc_role >= RSC_ROLE_STARTED)
2156  && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)
2158  && then->node
2159  && pcmk__list_of_1(then_rsc->running_on)
2160  && then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) {
2161  /* Ignore. If 'then' is supposed to be started after 'first', but
2162  * 'then' is already started, there is nothing to be done when
2163  * asymmetrical -- unless the start is mandatory, which indicates
2164  * the resource is restarting, and the ordering is still needed.
2165  */
2166  } else if (!(first->flags & pe_action_runnable)) {
2167  /* prevent 'then' action from happening if 'first' is not runnable and
2168  * 'then' has not yet occurred. */
2169  pe_action_implies(then, first, pe_action_optional);
2170  pe_action_implies(then, first, pe_action_runnable);
2171 
2172  pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
2173  } else {
2174  /* ignore... then is allowed to start/stop if it wants to. */
2175  }
2176  }
2177 
2178  if (type & pe_order_implies_first) {
2179  if (pcmk_is_set(filter, pe_action_optional)
2180  && !pcmk_is_set(flags /* Should be then_flags? */, pe_action_optional)) {
2181  // Needs pcmk_is_set(first_flags, pe_action_optional) too?
2182  pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2183  pe_action_implies(first, then, pe_action_optional);
2184  }
2185 
2189 
2190  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
2191  first->uuid, then->uuid);
2193  }
2194  }
2195 
2196  if (type & pe_order_promoted_implies_first) {
2197  if ((filter & pe_action_optional) &&
2198  ((then->flags & pe_action_optional) == FALSE) &&
2199  (then->rsc != NULL) && (then->rsc->role == RSC_ROLE_PROMOTED)) {
2200  pe_action_implies(first, then, pe_action_optional);
2201 
2204 
2205  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
2207  }
2208  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2209  }
2210  }
2211 
2213  && pcmk_is_set(filter, pe_action_optional)) {
2214 
2215  if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
2216  ((then->flags & pe_action_runnable) == FALSE)) {
2217 
2218  pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
2219  pe_action_implies(first, then, pe_action_runnable);
2220  }
2221 
2222  if ((then->flags & pe_action_optional) == 0) {
2223  pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
2224  pe_action_implies(first, then, pe_action_optional);
2225  }
2226  }
2227 
2228  if ((type & pe_order_pseudo_left)
2229  && pcmk_is_set(filter, pe_action_optional)) {
2230 
2231  if ((first->flags & pe_action_runnable) == FALSE) {
2234  pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
2235  }
2236 
2237  }
2238 
2240  && pcmk_is_set(filter, pe_action_runnable)
2242  && !pcmk_is_set(flags, pe_action_runnable)) {
2243  pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
2244  pe_action_implies(then, first, pe_action_runnable);
2246  }
2247 
2249  && pcmk_is_set(filter, pe_action_optional)
2251  && !pcmk_is_set(flags, pe_action_optional)) {
2252 
2253  /* in this case, treat migrate_runnable as if first is optional */
2255  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid);
2256  pe_action_implies(then, first, pe_action_optional);
2257  }
2258  }
2259 
2260  if (pcmk_is_set(type, pe_order_restart)) {
2261  handle_restart_ordering(first, then, filter);
2262  }
2263 
2264  if (then_flags != then->flags) {
2265  pe__set_graph_flags(changed, first, pe_graph_updated_then);
2266  pe_rsc_trace(then->rsc,
2267  "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2268  then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
2269  then_flags, first->uuid, first->flags);
2270 
2271  if(then->rsc && then->rsc->parent) {
2272  /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
2273  update_action(then, data_set);
2274  }
2275  }
2276 
2277  if (first_flags != first->flags) {
2279  pe_rsc_trace(first->rsc,
2280  "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2281  first->uuid, first->node ? first->node->details->uname : "[none]",
2282  first->flags, first_flags, then->uuid, then->flags);
2283  }
2284 
2285  return changed;
2286 }
2287 
2288 void
2290 {
2291  GList *gIter = NULL;
2292  bool need_role = false;
2293 
2294  CRM_CHECK((constraint != NULL) && (rsc != NULL), return);
2295 
2296  // If a role was specified, ensure constraint is applicable
2297  need_role = (constraint->role_filter > RSC_ROLE_UNKNOWN);
2298  if (need_role && (constraint->role_filter != rsc->next_role)) {
2299  pe_rsc_trace(rsc,
2300  "Not applying %s to %s because role will be %s not %s",
2301  constraint->id, rsc->id, role2text(rsc->next_role),
2302  role2text(constraint->role_filter));
2303  return;
2304  }
2305 
2306  if (constraint->node_list_rh == NULL) {
2307  pe_rsc_trace(rsc, "Not applying %s to %s because no nodes match",
2308  constraint->id, rsc->id);
2309  return;
2310  }
2311 
2312  pe_rsc_trace(rsc, "Applying %s%s%s to %s", constraint->id,
2313  (need_role? " for role " : ""),
2314  (need_role? role2text(constraint->role_filter) : ""), rsc->id);
2315 
2316  for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
2317  pe_node_t *node = (pe_node_t *) gIter->data;
2318  pe_node_t *other_node = NULL;
2319 
2320  other_node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2321 
2322  if (other_node != NULL) {
2323  pe_rsc_trace(rsc, "* + %d on %s",
2324  node->weight, node->details->uname);
2325  other_node->weight = pe__add_scores(other_node->weight,
2326  node->weight);
2327 
2328  } else {
2329  pe_rsc_trace(rsc, "* = %d on %s",
2330  node->weight, node->details->uname);
2331  other_node = pe__copy_node(node);
2332  g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
2333  }
2334 
2335  if (other_node->rsc_discover_mode < constraint->discover_mode) {
2336  if (constraint->discover_mode == pe_discover_exclusive) {
2337  rsc->exclusive_discover = TRUE;
2338  }
2339  /* exclusive > never > always... always is default */
2340  other_node->rsc_discover_mode = constraint->discover_mode;
2341  }
2342  }
2343 }
2344 
2345 void
2347 {
2348  GList *gIter = NULL;
2349 
2350  CRM_ASSERT(rsc);
2351  pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
2352 
2353  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2354  pe_action_t *action = (pe_action_t *) gIter->data;
2355 
2356  crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
2357  graph_element_from_action(action, data_set);
2358  }
2359 
2360  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2361  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2362 
2363  child_rsc->cmds->expand(child_rsc, data_set);
2364  }
2365 }
2366 
2367 #define STOP_SANITY_ASSERT(lineno) do { \
2368  if(current && current->details->unclean) { \
2369  /* It will be a pseudo op */ \
2370  } else if(stop == NULL) { \
2371  crm_err("%s:%d: No stop action exists for %s", \
2372  __func__, lineno, rsc->id); \
2373  CRM_ASSERT(stop != NULL); \
2374  } else if (pcmk_is_set(stop->flags, pe_action_optional)) { \
2375  crm_err("%s:%d: Action %s is still optional", \
2376  __func__, lineno, stop->uuid); \
2377  CRM_ASSERT(!pcmk_is_set(stop->flags, pe_action_optional)); \
2378  } \
2379  } while(0)
2380 
2381 void
2383 {
2384  pcmk__output_t *out = data_set->priv;
2385 
2386  pe_node_t *next = NULL;
2387  pe_node_t *current = NULL;
2388 
2389  gboolean moving = FALSE;
2390 
2391  if(rsc->variant == pe_container) {
2392  pcmk__bundle_log_actions(rsc, data_set);
2393  return;
2394  }
2395 
2396  if (rsc->children) {
2397  GList *gIter = NULL;
2398 
2399  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2400  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2401 
2402  LogActions(child_rsc, data_set);
2403  }
2404  return;
2405  }
2406 
2407  next = rsc->allocated_to;
2408  if (rsc->running_on) {
2409  current = pe__current_node(rsc);
2410  if (rsc->role == RSC_ROLE_STOPPED) {
2411  /*
2412  * This can occur when resources are being recovered
2413  * We fiddle with the current role in native_create_actions()
2414  */
2415  rsc->role = RSC_ROLE_STARTED;
2416  }
2417  }
2418 
2419  if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
2420  /* Don't log stopped orphans */
2421  return;
2422  }
2423 
2424  out->message(out, "rsc-action", rsc, current, next, moving);
2425 }
2426 
2427 gboolean
2428 StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2429 {
2430  GList *gIter = NULL;
2431 
2432  CRM_ASSERT(rsc);
2433  pe_rsc_trace(rsc, "%s", rsc->id);
2434 
2435  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2436  pe_node_t *current = (pe_node_t *) gIter->data;
2437  pe_action_t *stop;
2438 
2439  if (rsc->partial_migration_target) {
2440  if (rsc->partial_migration_target->details == current->details) {
2441  pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
2442  next->details->uname, rsc->id);
2443  continue;
2444  } else {
2445  pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
2446  optional = FALSE;
2447  }
2448  }
2449 
2450  pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
2451  stop = stop_action(rsc, current, optional);
2452 
2453  if(rsc->allocated_to == NULL) {
2454  pe_action_set_reason(stop, "node availability", TRUE);
2455  }
2456 
2457  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
2459  __func__, __LINE__);
2460  }
2461 
2462  if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
2463  DeleteRsc(rsc, current, optional, data_set);
2464  }
2465 
2467  pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set);
2468 
2469  order_actions(stop, unfence, pe_order_implies_first);
2470  if (!node_has_been_unfenced(current)) {
2471  pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
2472  }
2473  }
2474  }
2475 
2476  return TRUE;
2477 }
2478 
2479 static void
2480 order_after_unfencing(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action,
2481  enum pe_ordering order, pe_working_set_t *data_set)
2482 {
2483  /* When unfencing is in use, we order unfence actions before any probe or
2484  * start of resources that require unfencing, and also of fence devices.
2485  *
2486  * This might seem to violate the principle that fence devices require
2487  * only quorum. However, fence agents that unfence often don't have enough
2488  * information to even probe or start unless the node is first unfenced.
2489  */
2490  if (is_unfence_device(rsc, data_set)
2492 
2493  /* Start with an optional ordering. Requiring unfencing would result in
2494  * the node being unfenced, and all its resources being stopped,
2495  * whenever a new resource is added -- which would be highly suboptimal.
2496  */
2497  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
2498 
2499  order_actions(unfence, action, order);
2500 
2501  if (!node_has_been_unfenced(node)) {
2502  // But unfencing is required if it has never been done
2503  char *reason = crm_strdup_printf("required by %s %s",
2504  rsc->id, action->task);
2505 
2506  trigger_unfencing(NULL, node, reason, NULL, data_set);
2507  free(reason);
2508  }
2509  }
2510 }
2511 
2512 gboolean
2513 StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2514 {
2515  pe_action_t *start = NULL;
2516 
2517  CRM_ASSERT(rsc);
2518  pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
2519  start = start_action(rsc, next, TRUE);
2520 
2521  order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set);
2522 
2523  if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
2525  __func__, __LINE__);
2526  }
2527 
2528 
2529  return TRUE;
2530 }
2531 
2532 gboolean
2533 PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2534 {
2535  GList *gIter = NULL;
2536  gboolean runnable = TRUE;
2537  GList *action_list = NULL;
2538 
2539  CRM_ASSERT(rsc);
2540  CRM_CHECK(next != NULL, return FALSE);
2541  pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
2542 
2543  action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
2544 
2545  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2546  pe_action_t *start = (pe_action_t *) gIter->data;
2547 
2548  if (!pcmk_is_set(start->flags, pe_action_runnable)) {
2549  runnable = FALSE;
2550  }
2551  }
2552  g_list_free(action_list);
2553 
2554  if (runnable) {
2555  promote_action(rsc, next, optional);
2556  return TRUE;
2557  }
2558 
2559  pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
2560 
2561  action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
2562 
2563  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2564  pe_action_t *promote = (pe_action_t *) gIter->data;
2565 
2567  __func__, __LINE__);
2568  }
2569 
2570  g_list_free(action_list);
2571  return TRUE;
2572 }
2573 
2574 gboolean
2575 DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2576 {
2577  GList *gIter = NULL;
2578 
2579  CRM_ASSERT(rsc);
2580  pe_rsc_trace(rsc, "%s", rsc->id);
2581 
2582  /* CRM_CHECK(rsc->next_role == RSC_ROLE_UNPROMOTED, return FALSE); */
2583  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2584  pe_node_t *current = (pe_node_t *) gIter->data;
2585 
2586  pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
2587  demote_action(rsc, current, optional);
2588  }
2589  return TRUE;
2590 }
2591 
2592 gboolean
2593 RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2594 {
2595  CRM_ASSERT(rsc);
2596  crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
2597  CRM_CHECK(FALSE, return FALSE);
2598  return FALSE;
2599 }
2600 
2601 gboolean
2602 NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2603 {
2604  CRM_ASSERT(rsc);
2605  pe_rsc_trace(rsc, "%s", rsc->id);
2606  return FALSE;
2607 }
2608 
2609 gboolean
2610 DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
2611 {
2612  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
2613  pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
2614  return FALSE;
2615 
2616  } else if (node == NULL) {
2617  pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
2618  return FALSE;
2619 
2620  } else if (node->details->unclean || node->details->online == FALSE) {
2621  pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
2622  node->details->uname);
2623  return FALSE;
2624  }
2625 
2626  crm_notice("Removing %s from %s", rsc->id, node->details->uname);
2627 
2628  delete_action(rsc, node, optional);
2629 
2630  new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
2631  optional ? pe_order_implies_then : pe_order_optional, data_set);
2632 
2633  new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
2634  optional ? pe_order_implies_then : pe_order_optional, data_set);
2635 
2636  return TRUE;
2637 }
2638 
2639 gboolean
2641  gboolean force, pe_working_set_t * data_set)
2642 {
2644  char *key = NULL;
2645  pe_action_t *probe = NULL;
2646  pe_node_t *running = NULL;
2647  pe_node_t *allowed = NULL;
2648  pe_resource_t *top = uber_parent(rsc);
2649 
2650  static const char *rc_promoted = NULL;
2651  static const char *rc_inactive = NULL;
2652 
2653  if (rc_inactive == NULL) {
2654  rc_inactive = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
2655  rc_promoted = pcmk__itoa(PCMK_OCF_RUNNING_PROMOTED);
2656  }
2657 
2658  CRM_CHECK(node != NULL, return FALSE);
2659  if (!force && !pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
2660  pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
2661  return FALSE;
2662  }
2663 
2664  if (pe__is_guest_or_remote_node(node)) {
2665  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2666 
2667  if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
2668  pe_rsc_trace(rsc,
2669  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
2670  rsc->id, node->details->id);
2671  return FALSE;
2672  } else if (pe__is_guest_node(node)
2673  && pe__resource_contains_guest_node(data_set, rsc)) {
2674  pe_rsc_trace(rsc,
2675  "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
2676  rsc->id, node->details->id);
2677  return FALSE;
2678  } else if (rsc->is_remote_node) {
2679  pe_rsc_trace(rsc,
2680  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
2681  rsc->id, node->details->id);
2682  return FALSE;
2683  }
2684  }
2685 
2686  if (rsc->children) {
2687  GList *gIter = NULL;
2688  gboolean any_created = FALSE;
2689 
2690  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2691  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2692 
2693  any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
2694  || any_created;
2695  }
2696 
2697  return any_created;
2698 
2699  } else if ((rsc->container) && (!rsc->is_remote_node)) {
2700  pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
2701  return FALSE;
2702  }
2703 
2704  if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
2705  pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
2706  return FALSE;
2707  }
2708 
2709  // Check whether resource is already known on node
2710  if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
2711  pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
2712  return FALSE;
2713  }
2714 
2715  allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2716 
2717  if (rsc->exclusive_discover || top->exclusive_discover) {
2718  if (allowed == NULL) {
2719  /* exclusive discover is enabled and this node is not in the allowed list. */
2720  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
2721  return FALSE;
2722  } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
2723  /* exclusive discover is enabled and this node is not marked
2724  * as a node this resource should be discovered on */
2725  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
2726  return FALSE;
2727  }
2728  }
2729 
2730  if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
2731  /* If this node was allowed to host this resource it would
2732  * have been explicitly added to the 'allowed_nodes' list.
2733  * However it wasn't and the node has discovery disabled, so
2734  * no need to probe for this resource.
2735  */
2736  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
2737  return FALSE;
2738  }
2739 
2740  if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
2741  /* this resource is marked as not needing to be discovered on this node */
2742  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
2743  return FALSE;
2744  }
2745 
2746  if (pe__is_guest_node(node)) {
2747  pe_resource_t *remote = node->details->remote_rsc->container;
2748 
2749  if(remote->role == RSC_ROLE_STOPPED) {
2750  /* If the container is stopped, then we know anything that
2751  * might have been inside it is also stopped and there is
2752  * no need to probe.
2753  *
2754  * If we don't know the container's state on the target
2755  * either:
2756  *
2757  * - the container is running, the transition will abort
2758  * and we'll end up in a different case next time, or
2759  *
2760  * - the container is stopped
2761  *
2762  * Either way there is no need to probe.
2763  *
2764  */
2765  if(remote->allocated_to
2766  && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
2767  /* For safety, we order the 'rsc' start after 'remote'
2768  * has been probed.
2769  *
2770  * Using 'top' helps for groups, but we may need to
2771  * follow the start's ordering chain backwards.
2772  */
2773  custom_action_order(remote,
2774  pcmk__op_key(remote->id, RSC_STATUS, 0),
2775  NULL, top,
2776  pcmk__op_key(top->id, RSC_START, 0), NULL,
2777  pe_order_optional, data_set);
2778  }
2779  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
2780  rsc->id, node->details->id, remote->id);
2781  return FALSE;
2782 
2783  /* Here we really we want to check if remote->stop is required,
2784  * but that information doesn't exist yet
2785  */
2786  } else if(node->details->remote_requires_reset
2787  || node->details->unclean
2788  || pcmk_is_set(remote->flags, pe_rsc_failed)
2789  || remote->next_role == RSC_ROLE_STOPPED
2790  || (remote->allocated_to
2791  && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
2792  ) {
2793  /* The container is stopping or restarting, don't start
2794  * 'rsc' until 'remote' stops as this also implies that
2795  * 'rsc' is stopped - avoiding the need to probe
2796  */
2797  custom_action_order(remote, pcmk__op_key(remote->id, RSC_STOP, 0),
2798  NULL, top, pcmk__op_key(top->id, RSC_START, 0),
2799  NULL, pe_order_optional, data_set);
2800  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
2801  rsc->id, node->details->id, remote->id);
2802  return FALSE;
2803 /* } else {
2804  * The container is running so there is no problem probing it
2805  */
2806  }
2807  }
2808 
2809  key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
2810  probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
2812  __LINE__);
2813 
2814  order_after_unfencing(rsc, node, probe, pe_order_optional, data_set);
2815 
2816  /*
2817  * We need to know if it's running_on (not just known_on) this node
2818  * to correctly determine the target rc.
2819  */
2820  running = pe_find_node_id(rsc->running_on, node->details->id);
2821  if (running == NULL) {
2822  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
2823 
2824  } else if (rsc->role == RSC_ROLE_PROMOTED) {
2825  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_promoted);
2826  }
2827 
2828  crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
2830 
2831  if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
2832  top = rsc;
2833  } else {
2834  crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
2835  }
2836 
2837  if (!pcmk_is_set(probe->flags, pe_action_runnable)
2838  && (rsc->running_on == NULL)) {
2839  /* Prevent the start from occurring if rsc isn't active, but
2840  * don't cause it to stop if it was active already
2841  */
2843  }
2844 
2845  custom_action_order(rsc, NULL, probe,
2846  top, pcmk__op_key(top->id, RSC_START, 0), NULL,
2847  flags, data_set);
2848 
2849  // Order the probe before any agent reload
2850  custom_action_order(rsc, NULL, probe,
2851  top, reload_key(rsc), NULL,
2852  pe_order_optional, data_set);
2853 
2854 #if 0
2855  // complete is always null currently
2856  if (!is_unfence_device(rsc, data_set)) {
2857  /* Normally rsc.start depends on probe complete which depends
2858  * on rsc.probe. But this can't be the case for fence devices
2859  * with unfencing, as it would create graph loops.
2860  *
2861  * So instead we explicitly order 'rsc.probe then rsc.start'
2862  */
2863  order_actions(probe, complete, pe_order_implies_then);
2864  }
2865 #endif
2866  return TRUE;
2867 }
2868 
2878 static bool
2879 rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
2880 {
2881  if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
2882  return TRUE;
2883 
2884  } else if ((rsc->variant == pe_native)
2885  && pe_rsc_is_anon_clone(rsc->parent)
2886  && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
2887  /* We check only the parent, not the uber-parent, because we cannot
2888  * assume that the resource is known if it is in an anonymously cloned
2889  * group (which may be only partially known).
2890  */
2891  return TRUE;
2892  }
2893  return FALSE;
2894 }
2895 
2904 static void
2905 native_start_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
2906 {
2907  pe_node_t *target;
2908  GList *gIter = NULL;
2909 
2910  CRM_CHECK(stonith_op && stonith_op->node, return);
2911  target = stonith_op->node;
2912 
2913  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2914  pe_action_t *action = (pe_action_t *) gIter->data;
2915 
2916  switch (action->needs) {
2917  case rsc_req_nothing:
2918  // Anything other than start or promote requires nothing
2919  break;
2920 
2921  case rsc_req_stonith:
2922  order_actions(stonith_op, action, pe_order_optional);
2923  break;
2924 
2925  case rsc_req_quorum:
2926  if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
2927  && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
2928  && !rsc_is_known_on(rsc, target)) {
2929 
2930  /* If we don't know the status of the resource on the node
2931  * we're about to shoot, we have to assume it may be active
2932  * there. Order the resource start after the fencing. This
2933  * is analogous to waiting for all the probes for a resource
2934  * to complete before starting it.
2935  *
2936  * The most likely explanation is that the DC died and took
2937  * its status with it.
2938  */
2939  pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
2940  target->details->uname);
2941  order_actions(stonith_op, action,
2943  }
2944  break;
2945  }
2946  }
2947 }
2948 
2949 static void
2950 native_stop_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
2951 {
2952  GList *gIter = NULL;
2953  GList *action_list = NULL;
2954  bool order_implicit = false;
2955 
2956  pe_resource_t *top = uber_parent(rsc);
2957  pe_action_t *parent_stop = NULL;
2958  pe_node_t *target;
2959 
2960  CRM_CHECK(stonith_op && stonith_op->node, return);
2961  target = stonith_op->node;
2962 
2963  /* Get a list of stop actions potentially implied by the fencing */
2964  action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
2965 
2966  /* If resource requires fencing, implicit actions must occur after fencing.
2967  *
2968  * Implied stops and demotes of resources running on guest nodes are always
2969  * ordered after fencing, even if the resource does not require fencing,
2970  * because guest node "fencing" is actually just a resource stop.
2971  */
2973  || pe__is_guest_node(target)) {
2974 
2975  order_implicit = true;
2976  }
2977 
2978  if (action_list && order_implicit) {
2979  parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
2980  }
2981 
2982  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2983  pe_action_t *action = (pe_action_t *) gIter->data;
2984 
2985  // The stop would never complete, so convert it into a pseudo-action.
2987  __func__, __LINE__);
2988 
2989  if (order_implicit) {
2991  __func__, __LINE__);
2992 
2993  /* Order the stonith before the parent stop (if any).
2994  *
2995  * Also order the stonith before the resource stop, unless the
2996  * resource is inside a bundle -- that would cause a graph loop.
2997  * We can rely on the parent stop's ordering instead.
2998  *
2999  * User constraints must not order a resource in a guest node
3000  * relative to the guest node container resource. The
3001  * pe_order_preserve flag marks constraints as generated by the
3002  * cluster and thus immune to that check (and is irrelevant if
3003  * target is not a guest).
3004  */
3005  if (!pe_rsc_is_bundled(rsc)) {
3006  order_actions(stonith_op, action, pe_order_preserve);
3007  }
3008  order_actions(stonith_op, parent_stop, pe_order_preserve);
3009  }
3010 
3011  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3012  crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
3013  rsc->id, (order_implicit? "after" : "because"),
3014  target->details->uname);
3015  } else {
3016  crm_info("%s is implicit %s %s is fenced",
3017  action->uuid, (order_implicit? "after" : "because"),
3018  target->details->uname);
3019  }
3020 
3021  if (pcmk_is_set(rsc->flags, pe_rsc_notify)) {
3022  /* Create a second notification that will be delivered
3023  * immediately after the node is fenced
3024  *
3025  * Basic problem:
3026  * - C is a clone active on the node to be shot and stopping on another
3027  * - R is a resource that depends on C
3028  *
3029  * + C.stop depends on R.stop
3030  * + C.stopped depends on STONITH
3031  * + C.notify depends on C.stopped
3032  * + C.healthy depends on C.notify
3033  * + R.stop depends on C.healthy
3034  *
3035  * The extra notification here changes
3036  * + C.healthy depends on C.notify
3037  * into:
3038  * + C.healthy depends on C.notify'
3039  * + C.notify' depends on STONITH'
3040  * thus breaking the loop
3041  */
3042  create_secondary_notification(action, rsc, stonith_op, data_set);
3043  }
3044 
3045 /* From Bug #1601, successful fencing must be an input to a failed resources stop action.
3046 
3047  However given group(rA, rB) running on nodeX and B.stop has failed,
3048  A := stop healthy resource (rA.stop)
3049  B := stop failed resource (pseudo operation B.stop)
3050  C := stonith nodeX
3051  A requires B, B requires C, C requires A
3052  This loop would prevent the cluster from making progress.
3053 
3054  This block creates the "C requires A" dependency and therefore must (at least
3055  for now) be disabled.
3056 
3057  Instead, run the block above and treat all resources on nodeX as B would be
3058  (marked as a pseudo op depending on the STONITH).
3059 
3060  TODO: Break the "A requires B" dependency in update_action() and re-enable this block
3061 
3062  } else if(is_stonith == FALSE) {
3063  crm_info("Moving healthy resource %s"
3064  " off %s before fencing",
3065  rsc->id, node->details->uname);
3066 
3067  * stop healthy resources before the
3068  * stonith op
3069  *
3070  custom_action_order(
3071  rsc, stop_key(rsc), NULL,
3072  NULL,strdup(CRM_OP_FENCE),stonith_op,
3073  pe_order_optional, data_set);
3074 */
3075  }
3076 
3077  g_list_free(action_list);
3078 
3079  /* Get a list of demote actions potentially implied by the fencing */
3080  action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
3081 
3082  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3083  pe_action_t *action = (pe_action_t *) gIter->data;
3084 
3085  if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
3086  || pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3087 
3088  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3089  pe_rsc_info(rsc,
3090  "Demote of failed resource %s is implicit after %s is fenced",
3091  rsc->id, target->details->uname);
3092  } else {
3093  pe_rsc_info(rsc, "%s is implicit after %s is fenced",
3094  action->uuid, target->details->uname);
3095  }
3096 
3097  /* The demote would never complete and is now implied by the
3098  * fencing, so convert it into a pseudo-action.
3099  */
3101  __func__, __LINE__);
3102 
3103  if (pe_rsc_is_bundled(rsc)) {
3104  /* Do nothing, let the recovery be ordered after the parent's implied stop */
3105 
3106  } else if (order_implicit) {
3107  order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
3108  }
3109  }
3110  }
3111 
3112  g_list_free(action_list);
3113 }
3114 
3115 void
3117 {
3118  if (rsc->children) {
3119  GList *gIter = NULL;
3120 
3121  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3122  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
3123 
3124  rsc_stonith_ordering(child_rsc, stonith_op, data_set);
3125  }
3126 
3127  } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
3128  pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
3129 
3130  } else {
3131  native_start_constraints(rsc, stonith_op, data_set);
3132  native_stop_constraints(rsc, stonith_op, data_set);
3133  }
3134 }
3135 
3136 void
3138 {
3139  GList *gIter = NULL;
3140  pe_action_t *reload = NULL;
3141 
3142  if (rsc->children) {
3143  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3144  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
3145 
3146  ReloadRsc(child_rsc, node, data_set);
3147  }
3148  return;
3149 
3150  } else if (rsc->variant > pe_native) {
3151  /* Complex resource with no children */
3152  return;
3153 
3154  } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
3155  pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
3156  return;
3157 
3158  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3159  /* We don't need to specify any particular actions here, normal failure
3160  * recovery will apply.
3161  */
3162  pe_rsc_trace(rsc, "%s: preventing agent reload because failed",
3163  rsc->id);
3164  return;
3165 
3166  } else if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
3167  /* If a resource's configuration changed while a start was pending,
3168  * force a full restart.
3169  */
3170  pe_rsc_trace(rsc, "%s: preventing agent reload because start pending",
3171  rsc->id);
3172  stop_action(rsc, node, FALSE);
3173  return;
3174 
3175  } else if (node == NULL) {
3176  pe_rsc_trace(rsc, "%s: not active", rsc->id);
3177  return;
3178  }
3179 
3180  pe_rsc_trace(rsc, "Processing %s", rsc->id);
3182 
3183  reload = custom_action(rsc, reload_key(rsc), CRMD_ACTION_RELOAD_AGENT, node,
3184  FALSE, TRUE, data_set);
3185  pe_action_set_reason(reload, "resource definition change", FALSE);
3186 
3187  custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
3189  data_set);
3190  custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
3192  data_set);
3193 }
3194 
3195 void
3196 native_append_meta(pe_resource_t * rsc, xmlNode * xml)
3197 {
3198  char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
3199  pe_resource_t *parent;
3200 
3201  if (value) {
3202  char *name = NULL;
3203 
3205  crm_xml_add(xml, name, value);
3206  free(name);
3207  }
3208 
3209  value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
3210  if (value) {
3211  char *name = NULL;
3212 
3214  crm_xml_add(xml, name, value);
3215  free(name);
3216  }
3217 
3218  for (parent = rsc; parent != NULL; parent = parent->parent) {
3219  if (parent->container) {
3221  }
3222  }
3223 }
Services API.
pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set)
#define LOG_TRACE
Definition: logging.h:36
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:218
pe_node_t * pe_find_node(GList *node_list, const char *uname)
Definition: status.c:434
enum rsc_role_e role_filter
Definition: internal.h:171
void pcmk__bundle_log_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
enum rsc_start_requirement needs
Definition: pe_types.h:419
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:149
#define RSC_STOP
Definition: crm.h:204
#define crm_notice(fmt, args...)
Definition: logging.h:352
GHashTable * known_on
Definition: pe_types.h:366
xmlNode * ops_xml
Definition: pe_types.h:324
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition: remote.c:41
gboolean(* create_probe)(pe_resource_t *, pe_node_t *, pe_action_t *, gboolean, pe_working_set_t *)
pe_resource_t * rsc_lh
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:19
gboolean unseen
Definition: pe_types.h:218
#define INFINITY
Definition: crm.h:99
GList * rsc_cons
Definition: pe_types.h:356
#define LOAD_STOPPED
#define pe__set_action_flags(action, flags_to_set)
Definition: internal.h:59
#define pe__show_node_weights(level, rsc, text, nodes, data_set)
Definition: internal.h:371
#define promote_action(rsc, node, optional)
Definition: internal.h:411
enum rsc_role_e(* state)(const pe_resource_t *, gboolean)
Definition: pe_types.h:53
GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set)
#define stop_action(rsc, node, optional)
Definition: internal.h:395
pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: complex.c:999
pe_resource_t * container
Definition: pe_types.h:379
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:929
pe_node_t * partial_migration_source
Definition: pe_types.h:364
enum rsc_role_e role
Definition: pe_types.h:369
gboolean PromoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
GList * children
Definition: pe_types.h:376
resource_alloc_functions_t * cmds
Definition: pe_types.h:332
void pcmk__new_colocation(const char *id, const char *node_attr, int score, pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, const char *state_lh, const char *state_rh, bool influence, pe_working_set_t *data_set)
void native_rsc_colocation_lh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
gboolean standby
Definition: pe_types.h:459
#define pe_action_implies(action, reason, flag)
Definition: internal.h:526
#define pe_rsc_stop
Definition: pe_types.h:262
void rsc_ticket_constraint(pe_resource_t *lh_rsc, rsc_ticket_t *rsc_ticket, pe_working_set_t *data_set)
#define delete_action(rsc, node, optional)
Definition: internal.h:385
#define pe_flag_remove_after_stop
Definition: pe_types.h:110
enum pe_action_flags(* action_flags)(pe_action_t *, pe_node_t *)
pe_resource_t * rsc
Definition: pe_types.h:409
#define XML_RSC_ATTR_INCARNATION
Definition: msg_xml.h:225
enum rsc_role_e next_role
Definition: pe_types.h:370
#define INFINITY_HACK
gboolean exclusive_discover
Definition: pe_types.h:351
#define reload_key(rsc)
Definition: internal.h:399
#define pcmk__config_err(fmt...)
gboolean StartRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_resource_t * remote_rsc
Definition: pe_types.h:230
pe_resource_t * rsc_rh
GHashTable * meta
Definition: pe_types.h:372
gboolean native_assign_node(pe_resource_t *rsc, pe_node_t *chosen, gboolean force)
#define pe_rsc_unique
Definition: pe_types.h:254
#define pe_rsc_notify
Definition: pe_types.h:253
char * score2char_stack(int score, char *buf, size_t len)
Definition: utils.c:101
resource_object_functions_t * fns
Definition: pe_types.h:331
#define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear)
#define XML_LRM_ATTR_INTERVAL
Definition: msg_xml.h:291
#define RSC_DELETE
Definition: crm.h:195
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:324
pe_node_t * pe__copy_node(const pe_node_t *this_node)
Definition: utils.c:142
void ReloadRsc(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
int(* message)(pcmk__output_t *out, const char *message_id,...)
GHashTable *(* merge_weights)(pe_resource_t *, const char *, GHashTable *, const char *, float, enum pe_weights)
void resource_location(pe_resource_t *rsc, pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1588
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
GList * rsc_cons_lhs
Definition: pe_types.h:355
void native_expand(pe_resource_t *rsc, pe_working_set_t *data_set)
pe_ticket_t * ticket
enum crm_ais_msg_types type
Definition: cpg.c:48
#define demote_key(rsc)
Definition: internal.h:420
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
pe_node_t * partial_migration_target
Definition: pe_types.h:363
#define RSC_START
Definition: crm.h:201
gboolean RoleError(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_node_t * allocated_to
Definition: pe_types.h:362
GHashTable * pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, uint32_t flags)
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:264
gboolean can_run_resources(const pe_node_t *node)
#define pe_flag_have_quorum
Definition: pe_types.h:94
#define CRM_SCORE_INFINITY
Definition: crm.h:85
#define pe_proc_err(fmt...)
Definition: internal.h:32
gboolean remote_requires_reset
Definition: pe_types.h:224
#define RSC_MIGRATE
Definition: crm.h:198
char * crm_meta_name(const char *field)
Definition: utils.c:511
const char * action
Definition: pcmk_fence.c:30
#define pe__set_resource_flags(resource, flags_to_set)
Definition: internal.h:47
void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t *data_set)
Definition: utils.c:2143
GList * nodes
Definition: pe_types.h:157
#define pe_flag_stop_everything
Definition: pe_types.h:105
#define demote_action(rsc, node, optional)
Definition: internal.h:421
#define pe_rsc_provisional
Definition: pe_types.h:258
const char * role2text(enum rsc_role_e role)
Definition: common.c:459
void native_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
#define CRM_ATTR_UNFENCED
Definition: crm.h:121
int weight
Definition: pe_types.h:241
#define pe_rsc_merging
Definition: pe_types.h:260
GList * dangling_migrations
Definition: pe_types.h:377
enum pe_discover_e discover_mode
Definition: internal.h:172
#define CRMD_ACTION_RELOAD_AGENT
Definition: crm.h:172
void native_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: utils.c:2272
gboolean DemoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_action_flags
Definition: pe_types.h:291
void native_rsc_colocation_rh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, pcmk__colocation_t *constraint, pe_working_set_t *data_set)
#define pe_rsc_allow_migrate
Definition: pe_types.h:273
#define pe_rsc_failed
Definition: pe_types.h:267
pe_action_t * get_pseudo_op(const char *name, pe_working_set_t *data_set)
Definition: utils.c:1848
#define crm_debug(fmt, args...)
Definition: logging.h:355
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:903
pe_resource_t * pe__resource_contains_guest_node(const pe_working_set_t *data_set, const pe_resource_t *rsc)
Definition: remote.c:66
#define XML_RSC_ATTR_CONTAINER
Definition: msg_xml.h:241
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:530
bool pe__is_guest_node(const pe_node_t *node)
Definition: remote.c:33
#define stop_key(rsc)
Definition: internal.h:394
#define pe_rsc_start_pending
Definition: pe_types.h:269
char * task
Definition: pe_types.h:413
gboolean update_action(pe_action_t *action, pe_working_set_t *data_set)
#define pe__clear_action_flags(action, flags_to_clear)
Definition: internal.h:68
#define CRM_ATTR_UNAME
Definition: crm.h:114
int custom_action_order(pe_resource_t *lh_rsc, char *lh_task, pe_action_t *lh_action, pe_resource_t *rh_rsc, char *rh_task, pe_action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
#define crm_trace(fmt, args...)
Definition: logging.h:356
#define do_crm_log(level, fmt, args...)
Log a message.
Definition: logging.h:159
void pe_fence_node(pe_working_set_t *data_set, pe_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition: unpack.c:97
#define promote_key(rsc)
Definition: internal.h:410
char * crm_strdup_printf(char const *format,...) G_GNUC_PRINTF(1
void process_utilization(pe_resource_t *rsc, pe_node_t **prefer, pe_working_set_t *data_set)
GHashTable * meta
Definition: pe_types.h:423
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:114
void rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
struct pe_node_shared_s * details
Definition: pe_types.h:244
enum rsc_recovery_type recovery_type
Definition: pe_types.h:334
pe_node_t * node
Definition: pe_types.h:410
filter_colocation_res
enum loss_ticket_policy_e loss_policy
#define pe_rsc_needs_fencing
Definition: pe_types.h:280
bool pcmk__is_daemon
Definition: logging.c:47
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1802
unsigned long long flags
Definition: pe_types.h:347
const char * uname
Definition: pe_types.h:209
#define pe_rsc_promotable
Definition: pe_types.h:256
xmlNode * find_rsc_op_entry(pe_resource_t *rsc, const char *key)
Definition: utils.c:1321
void(* expand)(pe_resource_t *, pe_working_set_t *)
void LogActions(pe_resource_t *rsc, pe_working_set_t *data_set)
#define pe_flag_stonith_enabled
Definition: pe_types.h:98
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
Definition: common.c:635
#define pe__set_graph_flags(graph_flags, gr_action, flags_to_set)
Definition: internal.h:125
GList * actions
Definition: pe_types.h:358
pe_graph_flags
Definition: pe_types.h:283
GHashTable * utilization
Definition: pe_types.h:374
gboolean native_create_probe(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set)
#define XML_RSC_ATTR_TARGET_ROLE
Definition: msg_xml.h:233
#define XML_LRM_ATTR_MIGRATE_TARGET
Definition: msg_xml.h:323
#define XML_RSC_ATTR_REMOTE_NODE
Definition: msg_xml.h:244
char * uuid
Definition: pe_types.h:414
gboolean update_action_flags(pe_action_t *action, enum pe_action_flags flags, const char *source, int line)
#define pe_rsc_allocating
Definition: pe_types.h:259
enum rsc_role_e text2role(const char *role)
Definition: common.c:488
enum pe_obj_types variant
Definition: pe_types.h:329
gboolean granted
Definition: pe_types.h:457
int new_rsc_order(pe_resource_t *lh_rsc, const char *lh_task, pe_resource_t *rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t *data_set)
const char * placement_strategy
Definition: pe_types.h:144
int rsc_discover_mode
Definition: pe_types.h:245
gboolean can_run_any(GHashTable *nodes)
const char * id
Definition: pe_types.h:208
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:45
char * id
Definition: pe_types.h:456
void(* rsc_colocation_rh)(pe_resource_t *, pe_resource_t *, pcmk__colocation_t *, pe_working_set_t *)
#define pe_rsc_fence_device
Definition: pe_types.h:255
GHashTable * pcmk__copy_node_table(GHashTable *nodes)
pe_node_t * pe_find_node_id(GList *node_list, const char *id)
Definition: status.c:418
const char * target
Definition: pcmk_fence.c:29
enum filter_colocation_res filter_colocation_constraint(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, pcmk__colocation_t *constraint, gboolean preview)
#define PCMK_RESOURCE_CLASS_STONITH
Definition: services.h:49
gboolean(* rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
gboolean is_remote_node
Definition: pe_types.h:350
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:579
#define start_action(rsc, node, optional)
Definition: internal.h:401
#define CRM_META
Definition: crm.h:78
int pe__add_scores(int score1, int score2)
Definition: common.c:516
#define crm_err(fmt, args...)
Definition: logging.h:350
#define CRM_ASSERT(expr)
Definition: results.h:42
#define RSC_STATUS
Definition: crm.h:215
char guint crm_parse_interval_spec(const char *input)
Parse milliseconds from a Pacemaker interval specification.
Definition: utils.c:314
#define pe_rsc_reload
Definition: pe_types.h:263
#define RSC_PROMOTE
Definition: crm.h:207
void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
Definition: complex.c:1116
gboolean StopRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
This structure contains everything that makes up a single output formatter.
#define pe_rsc_needs_unfencing
Definition: pe_types.h:281
gboolean shutdown
Definition: pe_types.h:219
#define pe__clear_resource_flags(resource, flags_to_clear)
Definition: internal.h:53
#define crm_str(x)
Definition: logging.h:376
rsc_role_e
Possible roles that a resource can be in.
Definition: common.h:92
void native_append_meta(pe_resource_t *rsc, xmlNode *xml)
GList * running_on
Definition: pe_types.h:365
#define pe_rsc_block
Definition: pe_types.h:250
enum pe_action_flags flags
Definition: pe_types.h:418
gboolean maintenance
Definition: pe_types.h:222
#define pe_rsc_maintenance
Definition: pe_types.h:276
pe_working_set_t * cluster
Definition: pe_types.h:326
const char * node_attribute
#define XML_OP_ATTR_PENDING
Definition: msg_xml.h:258
const char * id
#define pe_flag_have_stonith_resource
Definition: pe_types.h:99
#define RSC_ROLE_MAX
Definition: common.h:108
GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1492
#define pe_flag_enable_unfencing
Definition: pe_types.h:100
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:20
#define pe__set_order_flags(order_flags, flags_to_set)
Definition: internal.h:111
#define start_key(rsc)
Definition: internal.h:400
#define ID(x)
Definition: msg_xml.h:456
unsigned long long flags
Definition: pe_types.h:146
#define pe_err(fmt...)
Definition: internal.h:22
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1540
char * name
Definition: pcmk_fence.c:31
#define XML_LRM_ATTR_MIGRATE_SOURCE
Definition: msg_xml.h:322
#define CRM_OP_LRM_DELETE
Definition: crm.h:151
#define CRM_ATTR_ID
Definition: crm.h:115
gint sort_node_uname(gconstpointer a, gconstpointer b)
Definition: utils.c:218
gboolean unclean
Definition: pe_types.h:217
#define pe_flag_show_scores
Definition: pe_types.h:133
gboolean DeleteRsc(pe_resource_t *rsc, pe_node_t *node, gboolean optional, pe_working_set_t *data_set)
void graph_element_from_action(pe_action_t *action, pe_working_set_t *data_set)
#define crm_info(fmt, args...)
Definition: logging.h:353
enum pe_action_flags native_action_flags(pe_action_t *action, pe_node_t *node)
#define pe_rsc_managed
Definition: pe_types.h:249
#define pe_rsc_orphan
Definition: pe_types.h:248
pe_action_t * find_first_action(GList *input, const char *uuid, const char *task, pe_node_t *on_node)
Definition: utils.c:1422
void(* rsc_colocation_lh)(pe_resource_t *, pe_resource_t *, pcmk__colocation_t *, pe_working_set_t *)
pe_ordering
Definition: pe_types.h:482
gboolean online
Definition: pe_types.h:213
uint64_t flags
Definition: remote.c:149
#define XML_ATTR_TE_TARGET_RC
Definition: msg_xml.h:402
pe_resource_t * parent
Definition: pe_types.h:327
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
Definition: utils.c:2035
#define RSC_DEMOTE
Definition: crm.h:209
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:18
gboolean NullOp(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
void create_secondary_notification(pe_action_t *action, pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
pe_node_t * pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *preferred, pe_working_set_t *data_set)
char * id
Definition: pe_types.h:320
pe_action_t * custom_action(pe_resource_t *rsc, char *key, const char *task, pe_node_t *on_node, gboolean optional, gboolean foo, pe_working_set_t *data_set)
Definition: utils.c:415
GHashTable * allowed_nodes
Definition: pe_types.h:367
#define RSC_MIGRATED
Definition: crm.h:199
#define pe_flag_startup_probes
Definition: pe_types.h:115
pe_node_t *(* allocate)(pe_resource_t *, pe_node_t *, pe_working_set_t *)