pacemaker  2.0.5-ba59be712
Scalable High-Availability cluster resource manager
pcmk_sched_native.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2020 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <crm/pengine/rules.h>
13 #include <crm/msg_xml.h>
15 #include <pacemaker-internal.h>
16 #include <crm/services.h>
17 
18 // The controller removes the resource from the CIB, making this redundant
19 // #define DELETE_THEN_REFRESH 1
20 
21 #define INFINITY_HACK (INFINITY * -100)
22 
23 #define VARIANT_NATIVE 1
24 #include <lib/pengine/variant.h>
25 
26 static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
27  pe_working_set_t *data_set);
28 static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
29  xmlNode *operation, pe_working_set_t *data_set);
30 static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
31  pe_working_set_t *data_set);
32 static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
33  xmlNode *operation, pe_working_set_t *data_set);
34 
35 void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
36 gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
37 gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
38 gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
39 gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
40 gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional,
41  pe_working_set_t * data_set);
42 gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
43 gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
44 
45 /* This array says what the *next* role should be when transitioning from one
46  * role to another. For example going from Stopped to Master, the next role is
47  * RSC_ROLE_SLAVE, because the resource must be started before being promoted.
48  * The current state then becomes Started, which is fed into this array again,
49  * giving a next role of RSC_ROLE_MASTER.
50  */
51 static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
52  /* Current state Next state*/
53  /* Unknown Stopped Started Slave Master */
59 };
60 
61 typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next,
62  gboolean optional,
63  pe_working_set_t *data_set);
64 
65 // This array picks the function needed to transition from one role to another
66 static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
67  /* Current state Next state */
68  /* Unknown Stopped Started Slave Master */
69  /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
70  /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
71  /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
72  /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
73  /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp , },
74 };
75 
76 #define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear) do { \
77  flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
78  "Node weight", (nw_rsc)->id, (flags), \
79  (flags_to_clear), #flags_to_clear); \
80  } while (0)
81 
82 static gboolean
83 native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set)
84 {
85  GListPtr nodes = NULL;
86  pe_node_t *chosen = NULL;
87  pe_node_t *best = NULL;
88  int multiple = 1;
89  int length = 0;
90  gboolean result = FALSE;
91 
92  process_utilization(rsc, &prefer, data_set);
93 
94  if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
95  return rsc->allocated_to ? TRUE : FALSE;
96  }
97 
98  // Sort allowed nodes by weight
99  if (rsc->allowed_nodes) {
100  length = g_hash_table_size(rsc->allowed_nodes);
101  }
102  if (length > 0) {
103  nodes = g_hash_table_get_values(rsc->allowed_nodes);
104  nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
105 
106  // First node in sorted list has the best score
107  best = g_list_nth_data(nodes, 0);
108  }
109 
110  if (prefer && nodes) {
111  chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
112 
113  if (chosen == NULL) {
114  pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
115  prefer->details->uname, rsc->id);
116 
117  /* Favor the preferred node as long as its weight is at least as good as
118  * the best allowed node's.
119  *
120  * An alternative would be to favor the preferred node even if the best
121  * node is better, when the best node's weight is less than INFINITY.
122  */
123  } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
124  pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
125  chosen->details->uname, rsc->id);
126  chosen = NULL;
127 
128  } else if (!can_run_resources(chosen)) {
129  pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
130  chosen->details->uname, rsc->id);
131  chosen = NULL;
132 
133  } else {
134  pe_rsc_trace(rsc,
135  "Chose preferred node %s for %s (ignoring %d candidates)",
136  chosen->details->uname, rsc->id, length);
137  }
138  }
139 
140  if ((chosen == NULL) && nodes) {
141  /* Either there is no preferred node, or the preferred node is not
142  * available, but there are other nodes allowed to run the resource.
143  */
144 
145  chosen = best;
146  pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
147  chosen ? chosen->details->uname : "<none>", rsc->id, length);
148 
149  if (!pe_rsc_is_unique_clone(rsc->parent)
150  && chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
151  /* If the resource is already running on a node, prefer that node if
152  * it is just as good as the chosen node.
153  *
154  * We don't do this for unique clone instances, because
155  * distribute_children() has already assigned instances to their
156  * running nodes when appropriate, and if we get here, we don't want
157  * remaining unallocated instances to prefer a node that's already
158  * running another instance.
159  */
160  pe_node_t *running = pe__current_node(rsc);
161 
162  if (running && (can_run_resources(running) == FALSE)) {
163  pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
164  rsc->id, running->details->uname);
165  } else if (running) {
166  for (GList *iter = nodes->next; iter; iter = iter->next) {
167  pe_node_t *tmp = (pe_node_t *) iter->data;
168 
169  if (tmp->weight != chosen->weight) {
170  // The nodes are sorted by weight, so no more are equal
171  break;
172  }
173  if (tmp->details == running->details) {
174  // Scores are equal, so prefer the current node
175  chosen = tmp;
176  }
177  multiple++;
178  }
179  }
180  }
181  }
182 
183  if (multiple > 1) {
184  static char score[33];
185  int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
186 
187  score2char_stack(chosen->weight, score, sizeof(score));
188  do_crm_log(log_level,
189  "Chose node %s for %s from %d nodes with score %s",
190  chosen->details->uname, rsc->id, multiple, score);
191  }
192 
193  result = native_assign_node(rsc, nodes, chosen, FALSE);
194  g_list_free(nodes);
195  return result;
196 }
197 
206 static int
207 best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
208  const char *value)
209 {
210  GHashTableIter iter;
211  pe_node_t *node = NULL;
212  int best_score = -INFINITY;
213  const char *best_node = NULL;
214 
215  // Find best allowed node with matching attribute
216  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
217  while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
218 
219  if ((node->weight > best_score) && can_run_resources(node)
220  && pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) {
221 
222  best_score = node->weight;
223  best_node = node->details->uname;
224  }
225  }
226 
227  if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) {
228  if (best_node == NULL) {
229  crm_info("No allowed node for %s matches node attribute %s=%s",
230  rsc->id, attr, value);
231  } else {
232  crm_info("Allowed node %s for %s had best score (%d) "
233  "of those matching node attribute %s=%s",
234  best_node, rsc->id, best_score, attr, value);
235  }
236  }
237  return best_score;
238 }
239 
254 static void
255 add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
256  const char *attr, float factor,
257  bool only_positive)
258 {
259  GHashTableIter iter;
260  pe_node_t *node = NULL;
261 
262  if (attr == NULL) {
263  attr = CRM_ATTR_UNAME;
264  }
265 
266  // Iterate through each node
267  g_hash_table_iter_init(&iter, nodes);
268  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
269  float weight_f = 0;
270  int weight = 0;
271  int score = 0;
272  int new_score = 0;
273 
274  score = best_node_score_matching_attr(rsc, attr,
275  pe_node_attribute_raw(node, attr));
276 
277  if ((factor < 0) && (score < 0)) {
278  /* Negative preference for a node with a negative score
279  * should not become a positive preference.
280  *
281  * @TODO Consider filtering only if weight is -INFINITY
282  */
283  crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
284  node->details->uname, node->weight, factor, score);
285  continue;
286  }
287 
288  if (node->weight == INFINITY_HACK) {
289  crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
290  node->details->uname, node->weight, factor, score);
291  continue;
292  }
293 
294  weight_f = factor * score;
295 
296  // Round the number; see http://c-faq.com/fp/round.html
297  weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
298 
299  /* Small factors can obliterate the small scores that are often actually
300  * used in configurations. If the score and factor are nonzero, ensure
301  * that the result is nonzero as well.
302  */
303  if ((weight == 0) && (score != 0)) {
304  if (factor > 0.0) {
305  weight = 1;
306  } else if (factor < 0.0) {
307  weight = -1;
308  }
309  }
310 
311  new_score = pe__add_scores(weight, node->weight);
312 
313  if (only_positive && (new_score < 0) && (node->weight > 0)) {
314  crm_trace("%s: Filtering %d + %f * %d = %d "
315  "(negative disallowed, marking node unusable)",
316  node->details->uname, node->weight, factor, score,
317  new_score);
318  node->weight = INFINITY_HACK;
319  continue;
320  }
321 
322  if (only_positive && (new_score < 0) && (node->weight == 0)) {
323  crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
324  node->details->uname, node->weight, factor, score,
325  new_score);
326  continue;
327  }
328 
329  crm_trace("%s: %d + %f * %d = %d", node->details->uname,
330  node->weight, factor, score, new_score);
331  node->weight = new_score;
332  }
333 }
334 
335 static inline bool
336 is_nonempty_group(pe_resource_t *rsc)
337 {
338  return rsc && (rsc->variant == pe_group) && (rsc->children != NULL);
339 }
340 
356 GHashTable *
358  GHashTable *nodes, const char *attr, float factor,
359  uint32_t flags)
360 {
361  GHashTable *work = NULL;
362 
363  // Avoid infinite recursion
364  if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
365  pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
366  return nodes;
367  }
369 
371  if (is_nonempty_group(rsc)) {
372  GList *last = g_list_last(rsc->children);
373  pe_resource_t *last_rsc = last->data;
374 
375  pe_rsc_trace(rsc, "%s: Merging scores from group %s "
376  "using last member %s (at %.6f)",
377  rhs, rsc->id, last_rsc->id, factor);
378  work = pcmk__native_merge_weights(last_rsc, rhs, NULL, attr, factor,
379  flags);
380  } else {
382  }
384 
385  } else if (is_nonempty_group(rsc)) {
386  /* The first member of the group will recursively incorporate any
387  * constraints involving other members (including the group internal
388  * colocation).
389  *
390  * @TODO The indirect colocations from the dependent group's other
391  * members will be incorporated at full strength rather than by
392  * factor, so the group's combined stickiness will be treated as
393  * (factor + (#members - 1)) * stickiness. It is questionable what
394  * the right approach should be.
395  */
396  pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s "
397  "(at %.6f)", rhs, rsc->id, factor);
398  work = pcmk__copy_node_table(nodes);
399  work = pcmk__native_merge_weights(rsc->children->data, rhs, work, attr,
400  factor, flags);
401 
402  } else {
403  pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
404  rhs, rsc->id, factor);
405  work = pcmk__copy_node_table(nodes);
406  add_node_scores_matching_attr(work, rsc, attr, factor,
408  }
409 
410  if (can_run_any(work)) {
411  GListPtr gIter = NULL;
412  int multiplier = (factor < 0)? -1 : 1;
413 
415  gIter = rsc->rsc_cons;
416  pe_rsc_trace(rsc,
417  "Checking additional %d optional '%s with' constraints",
418  g_list_length(gIter), rsc->id);
419 
420  } else if (is_nonempty_group(rsc)) {
421  pe_resource_t *last_rsc = g_list_last(rsc->children)->data;
422 
423  gIter = last_rsc->rsc_cons_lhs;
424  pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' "
425  "constraints using last member %s",
426  g_list_length(gIter), rsc->id, last_rsc->id);
427 
428  } else {
429  gIter = rsc->rsc_cons_lhs;
430  pe_rsc_trace(rsc,
431  "Checking additional %d optional 'with %s' constraints",
432  g_list_length(gIter), rsc->id);
433  }
434 
435  for (; gIter != NULL; gIter = gIter->next) {
436  pe_resource_t *other = NULL;
437  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
438 
439  if (constraint->score == 0) {
440  continue;
441  }
442 
444  other = constraint->rsc_rh;
445  } else {
446  other = constraint->rsc_lh;
447  }
448 
449  pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
450  constraint->id, constraint->rsc_lh->id,
451  constraint->rsc_rh->id);
452  work = pcmk__native_merge_weights(other, rhs, work,
453  constraint->node_attribute,
454  multiplier * constraint->score / (float) INFINITY,
456  pe__show_node_weights(true, NULL, rhs, work);
457  }
458 
459  } else if (pcmk_is_set(flags, pe_weights_rollback)) {
460  pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
461  rhs, rsc->id);
462  g_hash_table_destroy(work);
464  return nodes;
465  }
466 
467 
469  pe_node_t *node = NULL;
470  GHashTableIter iter;
471 
472  g_hash_table_iter_init(&iter, work);
473  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
474  if (node->weight == INFINITY_HACK) {
475  node->weight = 1;
476  }
477  }
478  }
479 
480  if (nodes) {
481  g_hash_table_destroy(nodes);
482  }
483 
485  return work;
486 }
487 
488 static inline bool
489 node_has_been_unfenced(pe_node_t *node)
490 {
491  const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
492 
493  return !pcmk__str_eq(unfenced, "0", pcmk__str_null_matches);
494 }
495 
496 static inline bool
497 is_unfence_device(pe_resource_t *rsc, pe_working_set_t *data_set)
498 {
501 }
502 
503 pe_node_t *
505  pe_working_set_t *data_set)
506 {
507  GListPtr gIter = NULL;
508 
509  if (rsc->parent && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
510  /* never allocate children on their own */
511  pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
512  rsc->parent->id);
513  rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
514  }
515 
516  if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
517  return rsc->allocated_to;
518  }
519 
520  if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
521  pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
522  return NULL;
523  }
524 
526  pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes);
527 
528  for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
529  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
530 
531  GHashTable *archive = NULL;
532  pe_resource_t *rsc_rh = constraint->rsc_rh;
533 
534  if (constraint->score == 0) {
535  continue;
536  }
537 
538  if (constraint->role_lh >= RSC_ROLE_MASTER
539  || (constraint->score < 0 && constraint->score > -INFINITY)) {
540  archive = pcmk__copy_node_table(rsc->allowed_nodes);
541  }
542 
543  pe_rsc_trace(rsc,
544  "%s: Allocating %s first (constraint=%s score=%d role=%s)",
545  rsc->id, rsc_rh->id, constraint->id,
546  constraint->score, role2text(constraint->role_lh));
547  rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
548  rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set);
549  if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
550  pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
551  g_hash_table_destroy(rsc->allowed_nodes);
552  rsc->allowed_nodes = archive;
553  archive = NULL;
554  }
555  if (archive) {
556  g_hash_table_destroy(archive);
557  }
558  }
559 
560  pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes);
561 
562  for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
563  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
564 
565  if (constraint->score == 0) {
566  continue;
567  }
568  pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
569  constraint->id, constraint->rsc_lh->id,
570  constraint->rsc_rh->id);
571  rsc->allowed_nodes =
572  constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
573  constraint->node_attribute,
574  (float)constraint->score / INFINITY,
576  }
577 
578  if (rsc->next_role == RSC_ROLE_STOPPED) {
579  pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
580  /* make sure it doesn't come up again */
581  resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
582 
583  } else if(rsc->next_role > rsc->role
584  && !pcmk_is_set(data_set->flags, pe_flag_have_quorum)
585  && data_set->no_quorum_policy == no_quorum_freeze) {
586  crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
587  rsc->id, role2text(rsc->role), role2text(rsc->next_role));
588  rsc->next_role = rsc->role;
589  }
590 
591  pe__show_node_weights(!show_scores, rsc, __func__, rsc->allowed_nodes);
595  }
596 
597  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
598  const char *reason = NULL;
599  pe_node_t *assign_to = NULL;
600 
601  rsc->next_role = rsc->role;
602  assign_to = pe__current_node(rsc);
603  if (assign_to == NULL) {
604  reason = "inactive";
605  } else if (rsc->role == RSC_ROLE_MASTER) {
606  reason = "master";
607  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
608  reason = "failed";
609  } else {
610  reason = "active";
611  }
612  pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
613  (assign_to? assign_to->details->uname : "no node"), reason);
614  native_assign_node(rsc, NULL, assign_to, TRUE);
615 
616  } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
617  pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
618  native_assign_node(rsc, NULL, NULL, TRUE);
619 
620  } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
621  && native_choose_node(rsc, prefer, data_set)) {
622  pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
623  rsc->allocated_to->details->uname);
624 
625  } else if (rsc->allocated_to == NULL) {
626  if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
627  pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
628  } else if (rsc->running_on != NULL) {
629  pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
630  }
631 
632  } else {
633  pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
634  rsc->allocated_to->details->uname);
635  }
636 
638 
639  if (rsc->is_remote_node) {
640  pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
641 
642  CRM_ASSERT(remote_node != NULL);
643  if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
644  crm_trace("Setting Pacemaker Remote node %s to ONLINE",
645  remote_node->details->id);
646  remote_node->details->online = TRUE;
647  /* We shouldn't consider an unseen remote-node unclean if we are going
648  * to try and connect to it. Otherwise we get an unnecessary fence */
649  if (remote_node->details->unseen == TRUE) {
650  remote_node->details->unclean = FALSE;
651  }
652 
653  } else {
654  crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
655  remote_node->details->id, role2text(rsc->next_role),
656  (rsc->allocated_to? "" : "un"));
657  remote_node->details->shutdown = TRUE;
658  }
659  }
660 
661  return rsc->allocated_to;
662 }
663 
664 static gboolean
665 is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
666 {
667  gboolean dup = FALSE;
668  const char *id = NULL;
669  const char *value = NULL;
670  xmlNode *operation = NULL;
671  guint interval2_ms = 0;
672 
673  CRM_ASSERT(rsc);
674  for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
675  operation = pcmk__xe_next(operation)) {
676 
677  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
678  value = crm_element_value(operation, "name");
679  if (!pcmk__str_eq(value, name, pcmk__str_casei)) {
680  continue;
681  }
682 
683  value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
684  interval2_ms = crm_parse_interval_spec(value);
685  if (interval_ms != interval2_ms) {
686  continue;
687  }
688 
689  if (id == NULL) {
690  id = ID(operation);
691 
692  } else {
693  pcmk__config_err("Operation %s is duplicate of %s (do not use "
694  "same name and interval combination more "
695  "than once per resource)", ID(operation), id);
696  dup = TRUE;
697  }
698  }
699  }
700 
701  return dup;
702 }
703 
704 static bool
705 op_cannot_recur(const char *name)
706 {
708 }
709 
710 static void
711 RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
712  xmlNode * operation, pe_working_set_t * data_set)
713 {
714  char *key = NULL;
715  const char *name = NULL;
716  const char *role = NULL;
717  const char *interval_spec = NULL;
718  const char *node_uname = node? node->details->uname : "n/a";
719 
720  guint interval_ms = 0;
721  pe_action_t *mon = NULL;
722  gboolean is_optional = TRUE;
723  GListPtr possible_matches = NULL;
724 
725  CRM_ASSERT(rsc);
726 
727  /* Only process for the operations without role="Stopped" */
728  role = crm_element_value(operation, "role");
729  if (role && text2role(role) == RSC_ROLE_STOPPED) {
730  return;
731  }
732 
733  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
734  interval_ms = crm_parse_interval_spec(interval_spec);
735  if (interval_ms == 0) {
736  return;
737  }
738 
739  name = crm_element_value(operation, "name");
740  if (is_op_dup(rsc, name, interval_ms)) {
741  crm_trace("Not creating duplicate recurring action %s for %dms %s",
742  ID(operation), interval_ms, name);
743  return;
744  }
745 
746  if (op_cannot_recur(name)) {
747  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
748  ID(operation), name);
749  return;
750  }
751 
752  key = pcmk__op_key(rsc->id, name, interval_ms);
753  if (find_rsc_op_entry(rsc, key) == NULL) {
754  crm_trace("Not creating recurring action %s for disabled resource %s",
755  ID(operation), rsc->id);
756  free(key);
757  return;
758  }
759 
760  pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
761  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
762 
763  if (start != NULL) {
764  pe_rsc_trace(rsc, "Marking %s %s due to %s", key,
765  pcmk_is_set(start->flags, pe_action_optional)? "optional" : "mandatory",
766  start->uuid);
767  is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
768  } else {
769  pe_rsc_trace(rsc, "Marking %s optional", key);
770  is_optional = TRUE;
771  }
772 
773  /* start a monitor for an already active resource */
774  possible_matches = find_actions_exact(rsc->actions, key, node);
775  if (possible_matches == NULL) {
776  is_optional = FALSE;
777  pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
778 
779  } else {
780  GListPtr gIter = NULL;
781 
782  for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
783  pe_action_t *op = (pe_action_t *) gIter->data;
784 
786  is_optional = FALSE;
787  break;
788  }
789  }
790  g_list_free(possible_matches);
791  }
792 
793  if ((rsc->next_role == RSC_ROLE_MASTER && role == NULL)
794  || (role != NULL && text2role(role) != rsc->next_role)) {
795  int log_level = LOG_TRACE;
796  const char *result = "Ignoring";
797 
798  if (is_optional) {
799  char *after_key = NULL;
800  pe_action_t *cancel_op = NULL;
801 
802  // It's running, so cancel it
803  log_level = LOG_INFO;
804  result = "Cancelling";
805  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
806 
807  switch (rsc->role) {
808  case RSC_ROLE_SLAVE:
809  case RSC_ROLE_STARTED:
810  if (rsc->next_role == RSC_ROLE_MASTER) {
811  after_key = promote_key(rsc);
812 
813  } else if (rsc->next_role == RSC_ROLE_STOPPED) {
814  after_key = stop_key(rsc);
815  }
816 
817  break;
818  case RSC_ROLE_MASTER:
819  after_key = demote_key(rsc);
820  break;
821  default:
822  break;
823  }
824 
825  if (after_key) {
826  custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL,
827  pe_order_runnable_left, data_set);
828  }
829  }
830 
831  do_crm_log(log_level, "%s action %s (%s vs. %s)",
832  result, key, role ? role : role2text(RSC_ROLE_SLAVE),
833  role2text(rsc->next_role));
834 
835  free(key);
836  return;
837  }
838 
839  mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
840  key = mon->uuid;
841  if (is_optional) {
842  pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
843  }
844 
845  if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
846  pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
847  node_uname, mon->uuid);
849  __func__, __LINE__);
850 
851  } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
852  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
853  node_uname, mon->uuid);
855  __func__, __LINE__);
856 
857  } else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
858  pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
859  mon->task, interval_ms / 1000, rsc->id, node_uname);
860  }
861 
862  if (rsc->next_role == RSC_ROLE_MASTER) {
863  char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
864 
865  add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master);
866  free(running_master);
867  }
868 
869  if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
870  custom_action_order(rsc, start_key(rsc), NULL,
871  NULL, strdup(key), mon,
873 
874  custom_action_order(rsc, reload_key(rsc), NULL,
875  NULL, strdup(key), mon,
877 
878  if (rsc->next_role == RSC_ROLE_MASTER) {
879  custom_action_order(rsc, promote_key(rsc), NULL,
880  rsc, NULL, mon,
882 
883  } else if (rsc->role == RSC_ROLE_MASTER) {
884  custom_action_order(rsc, demote_key(rsc), NULL,
885  rsc, NULL, mon,
887  }
888  }
889 }
890 
891 static void
892 Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
893 {
894  if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
895  (node == NULL || node->details->maintenance == FALSE)) {
896  xmlNode *operation = NULL;
897 
898  for (operation = pcmk__xe_first_child(rsc->ops_xml);
899  operation != NULL;
900  operation = pcmk__xe_next(operation)) {
901 
902  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
903  RecurringOp(rsc, start, node, operation, data_set);
904  }
905  }
906  }
907 }
908 
909 static void
910 RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
911  xmlNode * operation, pe_working_set_t * data_set)
912 {
913  char *key = NULL;
914  const char *name = NULL;
915  const char *role = NULL;
916  const char *interval_spec = NULL;
917  const char *node_uname = node? node->details->uname : "n/a";
918 
919  guint interval_ms = 0;
920  GListPtr possible_matches = NULL;
921  GListPtr gIter = NULL;
922 
923  /* Only process for the operations with role="Stopped" */
924  role = crm_element_value(operation, "role");
925  if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
926  return;
927  }
928 
929  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
930  interval_ms = crm_parse_interval_spec(interval_spec);
931  if (interval_ms == 0) {
932  return;
933  }
934 
935  name = crm_element_value(operation, "name");
936  if (is_op_dup(rsc, name, interval_ms)) {
937  crm_trace("Not creating duplicate recurring action %s for %dms %s",
938  ID(operation), interval_ms, name);
939  return;
940  }
941 
942  if (op_cannot_recur(name)) {
943  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
944  ID(operation), name);
945  return;
946  }
947 
948  key = pcmk__op_key(rsc->id, name, interval_ms);
949  if (find_rsc_op_entry(rsc, key) == NULL) {
950  crm_trace("Not creating recurring action %s for disabled resource %s",
951  ID(operation), rsc->id);
952  free(key);
953  return;
954  }
955 
956  // @TODO add support
957  if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
958  crm_notice("Ignoring %s (recurring monitors for Stopped role are "
959  "not supported for anonymous clones)",
960  ID(operation));
961  return;
962  }
963 
964  pe_rsc_trace(rsc,
965  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
966  ID(operation), rsc->id, role2text(rsc->next_role));
967 
968  /* if the monitor exists on the node where the resource will be running, cancel it */
969  if (node != NULL) {
970  possible_matches = find_actions_exact(rsc->actions, key, node);
971  if (possible_matches) {
972  pe_action_t *cancel_op = NULL;
973 
974  g_list_free(possible_matches);
975 
976  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
977 
978  if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) {
979  /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
980  /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
981  custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
982  pe_order_runnable_left, data_set);
983  }
984 
985  pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
986  key, role, role2text(rsc->next_role), node_uname);
987  }
988  }
989 
990  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
991  pe_node_t *stop_node = (pe_node_t *) gIter->data;
992  const char *stop_node_uname = stop_node->details->uname;
993  gboolean is_optional = TRUE;
994  gboolean probe_is_optional = TRUE;
995  gboolean stop_is_optional = TRUE;
996  pe_action_t *stopped_mon = NULL;
997  char *rc_inactive = NULL;
998  GListPtr probe_complete_ops = NULL;
999  GListPtr stop_ops = NULL;
1000  GListPtr local_gIter = NULL;
1001 
1002  if (node && pcmk__str_eq(stop_node_uname, node_uname, pcmk__str_casei)) {
1003  continue;
1004  }
1005 
1006  pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
1007  ID(operation), rsc->id, crm_str(stop_node_uname));
1008 
1009  /* start a monitor for an already stopped resource */
1010  possible_matches = find_actions_exact(rsc->actions, key, stop_node);
1011  if (possible_matches == NULL) {
1012  pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
1013  crm_str(stop_node_uname));
1014  is_optional = FALSE;
1015  } else {
1016  pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
1017  crm_str(stop_node_uname));
1018  is_optional = TRUE;
1019  g_list_free(possible_matches);
1020  }
1021 
1022  stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
1023 
1024  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
1025  add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
1026  free(rc_inactive);
1027 
1028  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1029  GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
1030  FALSE);
1031  GListPtr pIter = NULL;
1032 
1033  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
1034  pe_action_t *probe = (pe_action_t *) pIter->data;
1035 
1036  order_actions(probe, stopped_mon, pe_order_runnable_left);
1037  crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
1038  }
1039 
1040  g_list_free(probes);
1041  }
1042 
1043  if (probe_complete_ops) {
1044  g_list_free(probe_complete_ops);
1045  }
1046 
1047  stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
1048 
1049  for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
1050  pe_action_t *stop = (pe_action_t *) local_gIter->data;
1051 
1052  if (!pcmk_is_set(stop->flags, pe_action_optional)) {
1053  stop_is_optional = FALSE;
1054  }
1055 
1056  if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
1057  crm_debug("%s\t %s (cancelled : stop un-runnable)",
1058  crm_str(stop_node_uname), stopped_mon->uuid);
1060  __func__, __LINE__);
1061  }
1062 
1063  if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1064  custom_action_order(rsc, stop_key(rsc), stop,
1065  NULL, strdup(key), stopped_mon,
1067  }
1068 
1069  }
1070 
1071  if (stop_ops) {
1072  g_list_free(stop_ops);
1073  }
1074 
1075  if (is_optional == FALSE && probe_is_optional && stop_is_optional
1076  && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1077  pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
1078  key, crm_str(stop_node_uname));
1079  update_action_flags(stopped_mon, pe_action_optional, __func__,
1080  __LINE__);
1081  }
1082 
1083  if (pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
1084  pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
1085  }
1086 
1087  if (stop_node->details->online == FALSE || stop_node->details->unclean) {
1088  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
1089  crm_str(stop_node_uname), stopped_mon->uuid);
1091  __func__, __LINE__);
1092  }
1093 
1094  if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
1095  && !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
1096  crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
1097  interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
1098  }
1099  }
1100 
1101  free(key);
1102 }
1103 
1104 static void
1105 Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
1106 {
1107  if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
1108  (node == NULL || node->details->maintenance == FALSE)) {
1109  xmlNode *operation = NULL;
1110 
1111  for (operation = pcmk__xe_first_child(rsc->ops_xml);
1112  operation != NULL;
1113  operation = pcmk__xe_next(operation)) {
1114 
1115  if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
1116  RecurringOp_Stopped(rsc, start, node, operation, data_set);
1117  }
1118  }
1119  }
1120 }
1121 
1122 static void
1123 handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
1124 {
1125  pe_action_t *migrate_to = NULL;
1126  pe_action_t *migrate_from = NULL;
1127  pe_action_t *start = NULL;
1128  pe_action_t *stop = NULL;
1129  gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
1130 
1131  pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
1132  rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
1133  start = start_action(rsc, chosen, TRUE);
1134  stop = stop_action(rsc, current, TRUE);
1135 
1136  if (partial == FALSE) {
1137  migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1138  RSC_MIGRATE, current, TRUE, TRUE, data_set);
1139  }
1140 
1141  migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1142  RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
1143 
1144  if ((migrate_to && migrate_from) || (migrate_from && partial)) {
1145 
1148 
1149  // This is easier than trying to delete it from the graph
1150  update_action_flags(start, pe_action_pseudo, __func__, __LINE__);
1151 
1152  /* order probes before migrations */
1153  if (partial) {
1155  migrate_from->needs = start->needs;
1156 
1157  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1158  rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1159  NULL, pe_order_optional, data_set);
1160 
1161  } else {
1164  migrate_to->needs = start->needs;
1165 
1166  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1167  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1168  NULL, pe_order_optional, data_set);
1170  NULL, rsc,
1171  pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1173  data_set);
1174  }
1175 
1176  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1177  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1179  data_set);
1180  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1181  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1183  data_set);
1184 
1185  }
1186 
1187  if (migrate_to) {
1188  add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1190 
1191  /* Pacemaker Remote connections don't require pending to be recorded in
1192  * the CIB. We can reduce CIB writes by not setting PENDING for them.
1193  */
1194  if (rsc->is_remote_node == FALSE) {
1195  /* migrate_to takes place on the source node, but can
1196  * have an effect on the target node depending on how
1197  * the agent is written. Because of this, we have to maintain
1198  * a record that the migrate_to occurred, in case the source node
1199  * loses membership while the migrate_to action is still in-flight.
1200  */
1201  add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
1202  }
1203  }
1204 
1205  if (migrate_from) {
1206  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1207  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
1208  }
1209 }
1210 
1211 void
1213 {
1214  pe_action_t *start = NULL;
1215  pe_node_t *chosen = NULL;
1216  pe_node_t *current = NULL;
1217  gboolean need_stop = FALSE;
1218  bool need_promote = FALSE;
1219  gboolean is_moving = FALSE;
1220  gboolean allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
1221 
1222  GListPtr gIter = NULL;
1223  unsigned int num_all_active = 0;
1224  unsigned int num_clean_active = 0;
1225  bool multiply_active = FALSE;
1226  enum rsc_role_e role = RSC_ROLE_UNKNOWN;
1227  enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
1228 
1229  CRM_ASSERT(rsc);
1230  chosen = rsc->allocated_to;
1231  next_role = rsc->next_role;
1232  if (next_role == RSC_ROLE_UNKNOWN) {
1233  rsc->next_role = (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED;
1234  }
1235  pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s",
1236  rsc->id, role2text(rsc->role), role2text(rsc->next_role),
1237  ((next_role == RSC_ROLE_UNKNOWN)? "implicit" : "explicit"),
1238  ((chosen == NULL)? "no node" : chosen->details->uname));
1239 
1240  current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
1241 
1242  for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
1243  pe_node_t *dangling_source = (pe_node_t *) gIter->data;
1244 
1245  pe_action_t *stop = NULL;
1246 
1247  pe_rsc_trace(rsc, "Creating stop action %s cleanup for %s on %s due to dangling migration",
1248  (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)? "and" : "without"),
1249  rsc->id, dangling_source->details->uname);
1250  stop = stop_action(rsc, dangling_source, FALSE);
1252  if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
1253  DeleteRsc(rsc, dangling_source, FALSE, data_set);
1254  }
1255  }
1256 
1257  if ((num_all_active == 2) && (num_clean_active == 2) && chosen
1259  && (current->details == rsc->partial_migration_source->details)
1260  && (chosen->details == rsc->partial_migration_target->details)) {
1261 
1262  /* The chosen node is still the migration target from a partial
1263  * migration. Attempt to continue the migration instead of recovering
1264  * by stopping the resource everywhere and starting it on a single node.
1265  */
1266  pe_rsc_trace(rsc, "Will attempt to continue with partial migration "
1267  "to target %s from %s",
1270 
1271  } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
1272  /* If a resource has "requires" set to nothing or quorum, don't consider
1273  * it active on unclean nodes (similar to how all resources behave when
1274  * stonith-enabled is false). We can start such resources elsewhere
1275  * before fencing completes, and if we considered the resource active on
1276  * the failed node, we would attempt recovery for being active on
1277  * multiple nodes.
1278  */
1279  multiply_active = (num_clean_active > 1);
1280  } else {
1281  multiply_active = (num_all_active > 1);
1282  }
1283 
1284  if (multiply_active) {
1286  // Migration was in progress, but we've chosen a different target
1287  crm_notice("Resource %s can no longer migrate from %s to %s "
1288  "(will stop on both nodes)",
1291 
1292  } else {
1293  // Resource was incorrectly multiply active
1294  pe_proc_err("Resource %s is active on %u nodes (%s)",
1295  rsc->id, num_all_active,
1296  recovery2text(rsc->recovery_type));
1297  crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
1298  }
1299 
1300  if (rsc->recovery_type == recovery_stop_start) {
1301  need_stop = TRUE;
1302  }
1303 
1304  /* If by chance a partial migration is in process, but the migration
1305  * target is not chosen still, clear all partial migration data.
1306  */
1308  allow_migrate = FALSE;
1309  }
1310 
1311  if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
1312  pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start",
1313  rsc->id);
1314  start = start_action(rsc, chosen, TRUE);
1316  }
1317 
1318  if (current && chosen && current->details != chosen->details) {
1319  pe_rsc_trace(rsc, "Moving %s from %s to %s",
1320  rsc->id, crm_str(current->details->uname),
1321  crm_str(chosen->details->uname));
1322  is_moving = TRUE;
1323  need_stop = TRUE;
1324 
1325  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
1326  if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
1327  need_stop = TRUE;
1328  pe_rsc_trace(rsc, "Recovering %s", rsc->id);
1329  } else {
1330  pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
1331  if (rsc->next_role == RSC_ROLE_MASTER) {
1332  need_promote = TRUE;
1333  }
1334  }
1335 
1336  } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
1337  pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
1338  need_stop = TRUE;
1339 
1340  } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
1341  pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
1342  rsc->id);
1343  start = start_action(rsc, chosen, TRUE);
1344  if (!pcmk_is_set(start->flags, pe_action_optional)) {
1345  // Recovery of a promoted resource
1346  pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
1347  need_stop = TRUE;
1348  }
1349  }
1350 
1351  /* Create any additional actions required when bringing resource down and
1352  * back up to same level.
1353  */
1354  role = rsc->role;
1355  while (role != RSC_ROLE_STOPPED) {
1356  next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
1357  pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
1358  (need_stop? "required" : "optional"), rsc->id,
1359  role2text(role), role2text(next_role));
1360  if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
1361  break;
1362  }
1363  role = next_role;
1364  }
1365 
1366 
1367  while ((rsc->role <= rsc->next_role) && (role != rsc->role)
1368  && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
1369  bool required = need_stop;
1370 
1371  next_role = rsc_state_matrix[role][rsc->role];
1372  if ((next_role == RSC_ROLE_MASTER) && need_promote) {
1373  required = true;
1374  }
1375  pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
1376  (required? "required" : "optional"), rsc->id,
1377  role2text(role), role2text(next_role));
1378  if (rsc_action_matrix[role][next_role](rsc, chosen, !required,
1379  data_set) == FALSE) {
1380  break;
1381  }
1382  role = next_role;
1383  }
1384  role = rsc->role;
1385 
1386  /* Required steps from this role to the next */
1387  while (role != rsc->next_role) {
1388  next_role = rsc_state_matrix[role][rsc->next_role];
1389  pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)",
1390  rsc->id, role2text(role), role2text(next_role),
1391  role2text(rsc->next_role));
1392  if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
1393  break;
1394  }
1395  role = next_role;
1396  }
1397 
1398  if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
1399  pe_rsc_trace(rsc, "Not creating recurring monitors for blocked resource %s",
1400  rsc->id);
1401 
1402  } else if ((rsc->next_role != RSC_ROLE_STOPPED)
1403  || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1404  pe_rsc_trace(rsc, "Creating recurring monitors for %s resource %s",
1405  ((rsc->next_role == RSC_ROLE_STOPPED)? "unmanaged" : "active"),
1406  rsc->id);
1407  start = start_action(rsc, chosen, TRUE);
1408  Recurring(rsc, start, chosen, data_set);
1409  Recurring_Stopped(rsc, start, chosen, data_set);
1410 
1411  } else {
1412  pe_rsc_trace(rsc, "Creating recurring monitors for inactive resource %s",
1413  rsc->id);
1414  Recurring_Stopped(rsc, NULL, NULL, data_set);
1415  }
1416 
1417  /* if we are stuck in a partial migration, where the target
1418  * of the partial migration no longer matches the chosen target.
1419  * A full stop/start is required */
1420  if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
1421  pe_rsc_trace(rsc, "Not allowing partial migration of %s to continue",
1422  rsc->id);
1423  allow_migrate = FALSE;
1424 
1425  } else if (!is_moving || !pcmk_is_set(rsc->flags, pe_rsc_managed)
1426  || pcmk_any_flags_set(rsc->flags,
1428  || (current && current->details->unclean)
1429  || rsc->next_role < RSC_ROLE_STARTED) {
1430 
1431  allow_migrate = FALSE;
1432  }
1433 
1434  if (allow_migrate) {
1435  handle_migration_actions(rsc, current, chosen, data_set);
1436  }
1437 }
1438 
1439 static void
1440 rsc_avoids_remote_nodes(pe_resource_t *rsc)
1441 {
1442  GHashTableIter iter;
1443  pe_node_t *node = NULL;
1444  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
1445  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1446  if (node->details->remote_rsc) {
1447  node->weight = -INFINITY;
1448  }
1449  }
1450 }
1451 
1466 static GList *
1467 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
1468 {
1469  GList *allowed_nodes = NULL;
1470 
1471  if (rsc->allowed_nodes) {
1472  allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
1473  }
1474 
1475  if (pcmk_is_set(data_set->flags, pe_flag_stdout)) {
1476  allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
1477  }
1478  return allowed_nodes;
1479 }
1480 
1481 void
1483 {
1484  /* This function is on the critical path and worth optimizing as much as possible */
1485 
1486  pe_resource_t *top = NULL;
1487  GList *allowed_nodes = NULL;
1488  bool check_unfencing = FALSE;
1489  bool check_utilization = FALSE;
1490 
1491  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
1492  pe_rsc_trace(rsc,
1493  "Skipping native constraints for unmanaged resource: %s",
1494  rsc->id);
1495  return;
1496  }
1497 
1498  top = uber_parent(rsc);
1499 
1500  // Whether resource requires unfencing
1501  check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
1504 
1505  // Whether a non-default placement strategy is used
1506  check_utilization = (g_hash_table_size(rsc->utilization) > 0)
1507  && !pcmk__str_eq(data_set->placement_strategy,
1508  "default", pcmk__str_casei);
1509 
1510  // Order stops before starts (i.e. restart)
1511  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1512  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1514  data_set);
1515 
1516  // Promotable ordering: demote before stop, start before promote
1518  || (rsc->role > RSC_ROLE_SLAVE)) {
1519 
1520  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
1521  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1522  pe_order_implies_first_master, data_set);
1523 
1524  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1525  rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
1526  pe_order_runnable_left, data_set);
1527  }
1528 
1529  // Don't clear resource history if probing on same node
1531  NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
1533  data_set);
1534 
1535  // Certain checks need allowed nodes
1536  if (check_unfencing || check_utilization || rsc->container) {
1537  allowed_nodes = allowed_nodes_as_list(rsc, data_set);
1538  }
1539 
1540  if (check_unfencing) {
1541  /* Check if the node needs to be unfenced first */
1542 
1543  for (GList *item = allowed_nodes; item; item = item->next) {
1544  pe_node_t *node = item->data;
1545  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
1546 
1547  crm_debug("Ordering any stops of %s before %s, and any starts after",
1548  rsc->id, unfence->uuid);
1549 
1550  /*
1551  * It would be more efficient to order clone resources once,
1552  * rather than order each instance, but ordering the instance
1553  * allows us to avoid unnecessary dependencies that might conflict
1554  * with user constraints.
1555  *
1556  * @TODO: This constraint can still produce a transition loop if the
1557  * resource has a stop scheduled on the node being unfenced, and
1558  * there is a user ordering constraint to start some other resource
1559  * (which will be ordered after the unfence) before stopping this
1560  * resource. An example is "start some slow-starting cloned service
1561  * before stopping an associated virtual IP that may be moving to
1562  * it":
1563  * stop this -> unfencing -> start that -> stop this
1564  */
1565  custom_action_order(rsc, stop_key(rsc), NULL,
1566  NULL, strdup(unfence->uuid), unfence,
1568 
1569  custom_action_order(NULL, strdup(unfence->uuid), unfence,
1570  rsc, start_key(rsc), NULL,
1572  data_set);
1573  }
1574  }
1575 
1576  if (check_utilization) {
1577  GListPtr gIter = NULL;
1578 
1579  pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
1580  rsc->id, data_set->placement_strategy);
1581 
1582  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
1583  pe_node_t *current = (pe_node_t *) gIter->data;
1584 
1585  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1586  current->details->uname);
1587  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1588 
1589  if (load_stopped->node == NULL) {
1590  load_stopped->node = pe__copy_node(current);
1592  __func__, __LINE__);
1593  }
1594 
1595  custom_action_order(rsc, stop_key(rsc), NULL,
1596  NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
1597  }
1598 
1599  for (GList *item = allowed_nodes; item; item = item->next) {
1600  pe_node_t *next = item->data;
1601  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1602  next->details->uname);
1603  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1604 
1605  if (load_stopped->node == NULL) {
1606  load_stopped->node = pe__copy_node(next);
1608  __func__, __LINE__);
1609  }
1610 
1611  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1612  rsc, start_key(rsc), NULL, pe_order_load, data_set);
1613 
1614  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1615  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1616  NULL, pe_order_load, data_set);
1617 
1618  free(load_stopped_task);
1619  }
1620  }
1621 
1622  if (rsc->container) {
1623  pe_resource_t *remote_rsc = NULL;
1624 
1625  if (rsc->is_remote_node) {
1626  // rsc is the implicit remote connection for a guest or bundle node
1627 
1628  /* Do not allow a guest resource to live on a Pacemaker Remote node,
1629  * to avoid nesting remotes. However, allow bundles to run on remote
1630  * nodes.
1631  */
1633  rsc_avoids_remote_nodes(rsc->container);
1634  }
1635 
1636  /* If someone cleans up a guest or bundle node's container, we will
1637  * likely schedule a (re-)probe of the container and recovery of the
1638  * connection. Order the connection stop after the container probe,
1639  * so that if we detect the container running, we will trigger a new
1640  * transition and avoid the unnecessary recovery.
1641  */
1643  pe_order_optional, data_set);
1644 
1645  /* A user can specify that a resource must start on a Pacemaker Remote
1646  * node by explicitly configuring it with the container=NODENAME
1647  * meta-attribute. This is of questionable merit, since location
1648  * constraints can accomplish the same thing. But we support it, so here
1649  * we check whether a resource (that is not itself a remote connection)
1650  * has container set to a remote node or guest node resource.
1651  */
1652  } else if (rsc->container->is_remote_node) {
1653  remote_rsc = rsc->container;
1654  } else {
1655  remote_rsc = pe__resource_contains_guest_node(data_set,
1656  rsc->container);
1657  }
1658 
1659  if (remote_rsc) {
1660  /* Force the resource on the Pacemaker Remote node instead of
1661  * colocating the resource with the container resource.
1662  */
1663  for (GList *item = allowed_nodes; item; item = item->next) {
1664  pe_node_t *node = item->data;
1665 
1666  if (node->details->remote_rsc != remote_rsc) {
1667  node->weight = -INFINITY;
1668  }
1669  }
1670 
1671  } else {
1672  /* This resource is either a filler for a container that does NOT
1673  * represent a Pacemaker Remote node, or a Pacemaker Remote
1674  * connection resource for a guest node or bundle.
1675  */
1676  int score;
1677 
1678  crm_trace("Order and colocate %s relative to its container %s",
1679  rsc->id, rsc->container->id);
1680 
1682  pcmk__op_key(rsc->container->id, RSC_START, 0),
1683  NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
1684  NULL,
1686  data_set);
1687 
1688  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1689  rsc->container,
1690  pcmk__op_key(rsc->container->id, RSC_STOP, 0),
1691  NULL, pe_order_implies_first, data_set);
1692 
1694  score = 10000; /* Highly preferred but not essential */
1695  } else {
1696  score = INFINITY; /* Force them to run on the same host */
1697  }
1698  rsc_colocation_new("resource-with-container", NULL, score, rsc,
1699  rsc->container, NULL, NULL, data_set);
1700  }
1701  }
1702 
1703  if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
1704  /* don't allow remote nodes to run stonith devices
1705  * or remote connection resources.*/
1706  rsc_avoids_remote_nodes(rsc);
1707  }
1708  g_list_free(allowed_nodes);
1709 }
1710 
1711 void
1713  rsc_colocation_t *constraint,
1714  pe_working_set_t *data_set)
1715 {
1716  if (rsc_lh == NULL) {
1717  pe_err("rsc_lh was NULL for %s", constraint->id);
1718  return;
1719 
1720  } else if (constraint->rsc_rh == NULL) {
1721  pe_err("rsc_rh was NULL for %s", constraint->id);
1722  return;
1723  }
1724 
1725  if (constraint->score == 0) {
1726  return;
1727  }
1728  pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
1729  rsc_rh->id);
1730 
1731  rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set);
1732 }
1733 
1736  rsc_colocation_t * constraint, gboolean preview)
1737 {
1738  if (constraint->score == 0) {
1739  return influence_nothing;
1740  }
1741 
1742  /* rh side must be allocated before we can process constraint */
1743  if (!preview && pcmk_is_set(rsc_rh->flags, pe_rsc_provisional)) {
1744  return influence_nothing;
1745  }
1746 
1747  if ((constraint->role_lh >= RSC_ROLE_SLAVE) &&
1748  rsc_lh->parent && pcmk_is_set(rsc_lh->parent->flags, pe_rsc_promotable)
1749  && !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) {
1750 
1751  /* LH and RH resources have already been allocated, place the correct
1752  * priority on LH rsc for the given promotable clone resource role */
1753  return influence_rsc_priority;
1754  }
1755 
1756  if (!preview && !pcmk_is_set(rsc_lh->flags, pe_rsc_provisional)) {
1757  // Log an error if we violated a mandatory colocation constraint
1758  const pe_node_t *rh_node = rsc_rh->allocated_to;
1759 
1760  if (rsc_lh->allocated_to == NULL) {
1761  // Dependent resource isn't allocated, so constraint doesn't matter
1762  return influence_nothing;
1763  }
1764 
1765  if (constraint->score >= INFINITY) {
1766  // Dependent resource must colocate with rh_node
1767 
1768  if ((rh_node == NULL)
1769  || (rh_node->details != rsc_lh->allocated_to->details)) {
1770  crm_err("%s must be colocated with %s but is not (%s vs. %s)",
1771  rsc_lh->id, rsc_rh->id,
1772  rsc_lh->allocated_to->details->uname,
1773  (rh_node? rh_node->details->uname : "unallocated"));
1774  }
1775 
1776  } else if (constraint->score <= -INFINITY) {
1777  // Dependent resource must anti-colocate with rh_node
1778 
1779  if ((rh_node != NULL)
1780  && (rsc_lh->allocated_to->details == rh_node->details)) {
1781  crm_err("%s and %s must be anti-colocated but are allocated "
1782  "to the same node (%s)",
1783  rsc_lh->id, rsc_rh->id, rh_node->details->uname);
1784  }
1785  }
1786  return influence_nothing;
1787  }
1788 
1789  if (constraint->score > 0
1790  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
1791  crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
1792  role2text(constraint->role_lh), role2text(rsc_lh->next_role));
1793  return influence_nothing;
1794  }
1795 
1796  if (constraint->score > 0
1797  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
1798  crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
1799  return influence_nothing;
1800  }
1801 
1802  if (constraint->score < 0
1803  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
1804  crm_trace("LH: Skipping negative constraint: \"%s\" state filter",
1805  role2text(constraint->role_lh));
1806  return influence_nothing;
1807  }
1808 
1809  if (constraint->score < 0
1810  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
1811  crm_trace("RH: Skipping negative constraint: \"%s\" state filter",
1812  role2text(constraint->role_rh));
1813  return influence_nothing;
1814  }
1815 
1816  return influence_rsc_location;
1817 }
1818 
1819 static void
1820 influence_priority(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocation_t * constraint)
1821 {
1822  const char *rh_value = NULL;
1823  const char *lh_value = NULL;
1824  const char *attribute = CRM_ATTR_ID;
1825  int score_multiplier = 1;
1826 
1827  if (constraint->score == 0) {
1828  return;
1829  }
1830  if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
1831  return;
1832  }
1833 
1834  if (constraint->node_attribute != NULL) {
1835  attribute = constraint->node_attribute;
1836  }
1837 
1838  lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute);
1839  rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1840 
1841  if (!pcmk__str_eq(lh_value, rh_value, pcmk__str_casei)) {
1842  if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) {
1843  rsc_lh->priority = -INFINITY;
1844  }
1845  return;
1846  }
1847 
1848  if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
1849  return;
1850  }
1851 
1852  if (constraint->role_lh == RSC_ROLE_SLAVE) {
1853  score_multiplier = -1;
1854  }
1855 
1856  rsc_lh->priority = pe__add_scores(score_multiplier * constraint->score,
1857  rsc_lh->priority);
1858 }
1859 
1860 static void
1861 colocation_match(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocation_t * constraint)
1862 {
1863  const char *attribute = CRM_ATTR_ID;
1864  const char *value = NULL;
1865  GHashTable *work = NULL;
1866  GHashTableIter iter;
1867  pe_node_t *node = NULL;
1868 
1869  if (constraint->score == 0) {
1870  return;
1871  }
1872  if (constraint->node_attribute != NULL) {
1873  attribute = constraint->node_attribute;
1874  }
1875 
1876  if (rsc_rh->allocated_to) {
1877  value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1878 
1879  } else if (constraint->score < 0) {
1880  // Nothing to do (anti-colocation with something that is not running)
1881  return;
1882  }
1883 
1884  work = pcmk__copy_node_table(rsc_lh->allowed_nodes);
1885 
1886  g_hash_table_iter_init(&iter, work);
1887  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1888  if (rsc_rh->allocated_to == NULL) {
1889  pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s inactive)",
1890  constraint->id, rsc_lh->id, node->details->uname,
1891  constraint->score, rsc_rh->id);
1892  node->weight = pe__add_scores(-constraint->score, node->weight);
1893 
1894  } else if (pcmk__str_eq(pe_node_attribute_raw(node, attribute), value, pcmk__str_casei)) {
1895  if (constraint->score < CRM_SCORE_INFINITY) {
1896  pe_rsc_trace(rsc_lh, "%s: %s@%s += %d",
1897  constraint->id, rsc_lh->id,
1898  node->details->uname, constraint->score);
1899  node->weight = pe__add_scores(constraint->score, node->weight);
1900  }
1901 
1902  } else if (constraint->score >= CRM_SCORE_INFINITY) {
1903  pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s mismatch)",
1904  constraint->id, rsc_lh->id, node->details->uname,
1905  constraint->score, attribute);
1906  node->weight = pe__add_scores(-constraint->score, node->weight);
1907  }
1908  }
1909 
1910  if (can_run_any(work)
1911  || constraint->score <= -INFINITY || constraint->score >= INFINITY) {
1912  g_hash_table_destroy(rsc_lh->allowed_nodes);
1913  rsc_lh->allowed_nodes = work;
1914  work = NULL;
1915 
1916  } else {
1917  pe_rsc_info(rsc_lh,
1918  "%s: Rolling back scores from %s (no available nodes)",
1919  rsc_lh->id, rsc_rh->id);
1920  }
1921 
1922  if (work) {
1923  g_hash_table_destroy(work);
1924  }
1925 }
1926 
1927 void
1929  rsc_colocation_t *constraint,
1930  pe_working_set_t *data_set)
1931 {
1932  enum filter_colocation_res filter_results;
1933 
1934  CRM_ASSERT(rsc_lh);
1935  CRM_ASSERT(rsc_rh);
1936  filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
1937  pe_rsc_trace(rsc_lh, "%s %s with %s (%s, score=%d, filter=%d)",
1938  ((constraint->score >= 0)? "Colocating" : "Anti-colocating"),
1939  rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
1940 
1941  switch (filter_results) {
1943  influence_priority(rsc_lh, rsc_rh, constraint);
1944  break;
1946  colocation_match(rsc_lh, rsc_rh, constraint);
1947  break;
1948  case influence_nothing:
1949  default:
1950  return;
1951  }
1952 }
1953 
1954 static gboolean
1955 filter_rsc_ticket(pe_resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
1956 {
1957  if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
1958  pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
1959  role2text(rsc_ticket->role_lh));
1960  return FALSE;
1961  }
1962 
1963  return TRUE;
1964 }
1965 
1966 void
1968 {
1969  if (rsc_ticket == NULL) {
1970  pe_err("rsc_ticket was NULL");
1971  return;
1972  }
1973 
1974  if (rsc_lh == NULL) {
1975  pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
1976  return;
1977  }
1978 
1979  if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
1980  return;
1981  }
1982 
1983  if (rsc_lh->children) {
1984  GListPtr gIter = rsc_lh->children;
1985 
1986  pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
1987 
1988  for (; gIter != NULL; gIter = gIter->next) {
1989  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
1990 
1991  rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
1992  }
1993  return;
1994  }
1995 
1996  pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
1997  rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
1998  role2text(rsc_ticket->role_lh));
1999 
2000  if ((rsc_ticket->ticket->granted == FALSE)
2001  && (rsc_lh->running_on != NULL)) {
2002 
2003  GListPtr gIter = NULL;
2004 
2005  switch (rsc_ticket->loss_policy) {
2006  case loss_ticket_stop:
2007  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
2008  break;
2009 
2010  case loss_ticket_demote:
2011  // Promotion score will be set to -INFINITY in promotion_order()
2012  if (rsc_ticket->role_lh != RSC_ROLE_MASTER) {
2013  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
2014  }
2015  break;
2016 
2017  case loss_ticket_fence:
2018  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
2019  return;
2020  }
2021 
2022  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
2023 
2024  for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
2025  pe_node_t *node = (pe_node_t *) gIter->data;
2026 
2027  pe_fence_node(data_set, node, "deadman ticket was lost", FALSE);
2028  }
2029  break;
2030 
2031  case loss_ticket_freeze:
2032  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
2033  return;
2034  }
2035  if (rsc_lh->running_on != NULL) {
2038  }
2039  break;
2040  }
2041 
2042  } else if (rsc_ticket->ticket->granted == FALSE) {
2043 
2044  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
2045  resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
2046  }
2047 
2048  } else if (rsc_ticket->ticket->standby) {
2049 
2050  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
2051  resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
2052  }
2053  }
2054 }
2055 
2056 enum pe_action_flags
2058 {
2059  return action->flags;
2060 }
2061 
2062 static inline bool
2063 is_primitive_action(pe_action_t *action)
2064 {
2065  return action && action->rsc && (action->rsc->variant == pe_native);
2066 }
2067 
2079 static void
2080 handle_restart_ordering(pe_action_t *first, pe_action_t *then,
2081  enum pe_action_flags filter)
2082 {
2083  const char *reason = NULL;
2084 
2085  CRM_ASSERT(is_primitive_action(first));
2086  CRM_ASSERT(is_primitive_action(then));
2087 
2088  // We need to update the action in two cases:
2089 
2090  // ... if 'then' is required
2091  if (pcmk_is_set(filter, pe_action_optional)
2092  && !pcmk_is_set(then->flags, pe_action_optional)) {
2093  reason = "restart";
2094  }
2095 
2096  /* ... if 'then' is unrunnable start of managed resource (if a resource
2097  * should restart but can't start, we still want to stop)
2098  */
2099  if (pcmk_is_set(filter, pe_action_runnable)
2101  && pcmk_is_set(then->rsc->flags, pe_rsc_managed)
2102  && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) {
2103  reason = "stop";
2104  }
2105 
2106  if (reason == NULL) {
2107  return;
2108  }
2109 
2110  pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
2111  first->uuid, then->uuid, reason);
2112 
2113  // Make 'first' required if it is runnable
2114  if (pcmk_is_set(first->flags, pe_action_runnable)) {
2115  pe_action_implies(first, then, pe_action_optional);
2116  }
2117 
2118  // Make 'first' required if 'then' is required
2119  if (!pcmk_is_set(then->flags, pe_action_optional)) {
2120  pe_action_implies(first, then, pe_action_optional);
2121  }
2122 
2123  // Make 'first' unmigratable if 'then' is unmigratable
2126  }
2127 
2128  // Make 'then' unrunnable if 'first' is required but unrunnable
2129  if (!pcmk_is_set(first->flags, pe_action_optional)
2130  && !pcmk_is_set(first->flags, pe_action_runnable)) {
2131  pe_action_implies(then, first, pe_action_runnable);
2132  }
2133 }
2134 
2135 enum pe_graph_flags
2137  enum pe_action_flags flags, enum pe_action_flags filter,
2138  enum pe_ordering type, pe_working_set_t *data_set)
2139 {
2140  /* flags == get_action_flags(first, then_node) called from update_action() */
2141  enum pe_graph_flags changed = pe_graph_none;
2142  enum pe_action_flags then_flags = then->flags;
2143  enum pe_action_flags first_flags = first->flags;
2144 
2145  crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
2146  first->uuid, first->node ? first->node->details->uname : "[none]",
2147  first->flags, then->uuid, then->flags);
2148 
2149  if (type & pe_order_asymmetrical) {
2150  pe_resource_t *then_rsc = then->rsc;
2151  enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
2152 
2153  if (!then_rsc) {
2154  /* ignore */
2155  } else if ((then_rsc_role == RSC_ROLE_STOPPED) && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_casei)) {
2156  /* ignore... if 'then' is supposed to be stopped after 'first', but
2157  * then is already stopped, there is nothing to be done when non-symmetrical. */
2158  } else if ((then_rsc_role >= RSC_ROLE_STARTED)
2159  && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)
2161  && then->node
2162  && pcmk__list_of_1(then_rsc->running_on)
2163  && then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) {
2164  /* Ignore. If 'then' is supposed to be started after 'first', but
2165  * 'then' is already started, there is nothing to be done when
2166  * asymmetrical -- unless the start is mandatory, which indicates
2167  * the resource is restarting, and the ordering is still needed.
2168  */
2169  } else if (!(first->flags & pe_action_runnable)) {
2170  /* prevent 'then' action from happening if 'first' is not runnable and
2171  * 'then' has not yet occurred. */
2172  pe_action_implies(then, first, pe_action_optional);
2173  pe_action_implies(then, first, pe_action_runnable);
2174 
2175  pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
2176  } else {
2177  /* ignore... then is allowed to start/stop if it wants to. */
2178  }
2179  }
2180 
2181  if (type & pe_order_implies_first) {
2182  if (pcmk_is_set(filter, pe_action_optional)
2183  && !pcmk_is_set(flags /* Should be then_flags? */, pe_action_optional)) {
2184  // Needs pcmk_is_set(first_flags, pe_action_optional) too?
2185  pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2186  pe_action_implies(first, then, pe_action_optional);
2187  }
2188 
2192 
2193  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
2194  first->uuid, then->uuid);
2196  }
2197  }
2198 
2200  if ((filter & pe_action_optional) &&
2201  ((then->flags & pe_action_optional) == FALSE) &&
2202  then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) {
2203  pe_action_implies(first, then, pe_action_optional);
2204 
2207 
2208  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
2210  }
2211  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2212  }
2213  }
2214 
2216  && pcmk_is_set(filter, pe_action_optional)) {
2217 
2218  if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
2219  ((then->flags & pe_action_runnable) == FALSE)) {
2220 
2221  pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
2222  pe_action_implies(first, then, pe_action_runnable);
2223  }
2224 
2225  if ((then->flags & pe_action_optional) == 0) {
2226  pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
2227  pe_action_implies(first, then, pe_action_optional);
2228  }
2229  }
2230 
2231  if ((type & pe_order_pseudo_left)
2232  && pcmk_is_set(filter, pe_action_optional)) {
2233 
2234  if ((first->flags & pe_action_runnable) == FALSE) {
2237  pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
2238  }
2239 
2240  }
2241 
2243  && pcmk_is_set(filter, pe_action_runnable)
2246  pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
2247  pe_action_implies(then, first, pe_action_runnable);
2249  }
2250 
2252  && pcmk_is_set(filter, pe_action_optional)
2255 
2256  /* in this case, treat migrate_runnable as if first is optional */
2258  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid);
2259  pe_action_implies(then, first, pe_action_optional);
2260  }
2261  }
2262 
2264  handle_restart_ordering(first, then, filter);
2265  }
2266 
2267  if (then_flags != then->flags) {
2268  pe__set_graph_flags(changed, first, pe_graph_updated_then);
2269  pe_rsc_trace(then->rsc,
2270  "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2271  then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
2272  then_flags, first->uuid, first->flags);
2273 
2274  if(then->rsc && then->rsc->parent) {
2275  /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
2276  update_action(then, data_set);
2277  }
2278  }
2279 
2280  if (first_flags != first->flags) {
2282  pe_rsc_trace(first->rsc,
2283  "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2284  first->uuid, first->node ? first->node->details->uname : "[none]",
2285  first->flags, first_flags, then->uuid, then->flags);
2286  }
2287 
2288  return changed;
2289 }
2290 
2291 void
2293 {
2294  GListPtr gIter = NULL;
2295  bool need_role = false;
2296 
2297  CRM_CHECK((constraint != NULL) && (rsc != NULL), return);
2298 
2299  // If a role was specified, ensure constraint is applicable
2300  need_role = (constraint->role_filter > RSC_ROLE_UNKNOWN);
2301  if (need_role && (constraint->role_filter != rsc->next_role)) {
2302  pe_rsc_trace(rsc,
2303  "Not applying %s to %s because role will be %s not %s",
2304  constraint->id, rsc->id, role2text(rsc->next_role),
2305  role2text(constraint->role_filter));
2306  return;
2307  }
2308 
2309  if (constraint->node_list_rh == NULL) {
2310  pe_rsc_trace(rsc, "Not applying %s to %s because no nodes match",
2311  constraint->id, rsc->id);
2312  return;
2313  }
2314 
2315  pe_rsc_trace(rsc, "Applying %s%s%s to %s", constraint->id,
2316  (need_role? " for role " : ""),
2317  (need_role? role2text(constraint->role_filter) : ""), rsc->id);
2318 
2319  for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
2320  pe_node_t *node = (pe_node_t *) gIter->data;
2321  pe_node_t *other_node = NULL;
2322 
2323  other_node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2324 
2325  if (other_node != NULL) {
2326  pe_rsc_trace(rsc, "* + %d on %s",
2327  node->weight, node->details->uname);
2328  other_node->weight = pe__add_scores(other_node->weight,
2329  node->weight);
2330 
2331  } else {
2332  pe_rsc_trace(rsc, "* = %d on %s",
2333  node->weight, node->details->uname);
2334  other_node = pe__copy_node(node);
2335  g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
2336  }
2337 
2338  if (other_node->rsc_discover_mode < constraint->discover_mode) {
2339  if (constraint->discover_mode == pe_discover_exclusive) {
2340  rsc->exclusive_discover = TRUE;
2341  }
2342  /* exclusive > never > always... always is default */
2343  other_node->rsc_discover_mode = constraint->discover_mode;
2344  }
2345  }
2346 }
2347 
2348 void
2350 {
2351  GListPtr gIter = NULL;
2352 
2353  CRM_ASSERT(rsc);
2354  pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
2355 
2356  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2357  pe_action_t *action = (pe_action_t *) gIter->data;
2358 
2359  crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
2360  graph_element_from_action(action, data_set);
2361  }
2362 
2363  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2364  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2365 
2366  child_rsc->cmds->expand(child_rsc, data_set);
2367  }
2368 }
2369 
2370 #define log_change(a, fmt, args...) do { \
2371  if(a && a->reason && terminal) { \
2372  printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \
2373  } else if(a && a->reason) { \
2374  crm_notice(fmt" \tdue to %s", ##args, a->reason); \
2375  } else if(terminal) { \
2376  printf(" * "fmt"\n", ##args); \
2377  } else { \
2378  crm_notice(fmt, ##args); \
2379  } \
2380  } while(0)
2381 
2382 #define STOP_SANITY_ASSERT(lineno) do { \
2383  if(current && current->details->unclean) { \
2384  /* It will be a pseudo op */ \
2385  } else if(stop == NULL) { \
2386  crm_err("%s:%d: No stop action exists for %s", \
2387  __func__, lineno, rsc->id); \
2388  CRM_ASSERT(stop != NULL); \
2389  } else if (pcmk_is_set(stop->flags, pe_action_optional)) { \
2390  crm_err("%s:%d: Action %s is still optional", \
2391  __func__, lineno, stop->uuid); \
2392  CRM_ASSERT(!pcmk_is_set(stop->flags, pe_action_optional)); \
2393  } \
2394  } while(0)
2395 
2396 static void
2397 LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal)
2398 {
2399  int len = 0;
2400  char *reason = NULL;
2401  char *details = NULL;
2402  bool same_host = FALSE;
2403  bool same_role = FALSE;
2404  bool need_role = FALSE;
2405 
2406  static int rsc_width = 5;
2407  static int detail_width = 5;
2408 
2409  CRM_ASSERT(action);
2410  CRM_ASSERT(destination != NULL || origin != NULL);
2411 
2412  if(source == NULL) {
2413  source = action;
2414  }
2415 
2416  len = strlen(rsc->id);
2417  if(len > rsc_width) {
2418  rsc_width = len + 2;
2419  }
2420 
2421  if(rsc->role > RSC_ROLE_STARTED || rsc->next_role > RSC_ROLE_SLAVE) {
2422  need_role = TRUE;
2423  }
2424 
2425  if(origin != NULL && destination != NULL && origin->details == destination->details) {
2426  same_host = TRUE;
2427  }
2428 
2429  if(rsc->role == rsc->next_role) {
2430  same_role = TRUE;
2431  }
2432 
2433  if (need_role && (origin == NULL)) {
2434  /* Starting and promoting a promotable clone instance */
2435  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname);
2436 
2437  } else if (origin == NULL) {
2438  /* Starting a resource */
2439  details = crm_strdup_printf("%s", destination->details->uname);
2440 
2441  } else if (need_role && (destination == NULL)) {
2442  /* Stopping a promotable clone instance */
2443  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2444 
2445  } else if (destination == NULL) {
2446  /* Stopping a resource */
2447  details = crm_strdup_printf("%s", origin->details->uname);
2448 
2449  } else if (need_role && same_role && same_host) {
2450  /* Recovering, restarting or re-promoting a promotable clone instance */
2451  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2452 
2453  } else if (same_role && same_host) {
2454  /* Recovering or Restarting a normal resource */
2455  details = crm_strdup_printf("%s", origin->details->uname);
2456 
2457  } else if (need_role && same_role) {
2458  /* Moving a promotable clone instance */
2459  details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role));
2460 
2461  } else if (same_role) {
2462  /* Moving a normal resource */
2463  details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname);
2464 
2465  } else if (same_host) {
2466  /* Promoting or demoting a promotable clone instance */
2467  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname);
2468 
2469  } else {
2470  /* Moving and promoting/demoting */
2471  details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), origin->details->uname, role2text(rsc->next_role), destination->details->uname);
2472  }
2473 
2474  len = strlen(details);
2475  if(len > detail_width) {
2476  detail_width = len;
2477  }
2478 
2479  if(source->reason && !pcmk_is_set(action->flags, pe_action_runnable)) {
2480  reason = crm_strdup_printf(" due to %s (blocked)", source->reason);
2481 
2482  } else if(source->reason) {
2483  reason = crm_strdup_printf(" due to %s", source->reason);
2484 
2485  } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
2486  reason = strdup(" blocked");
2487 
2488  } else {
2489  reason = strdup("");
2490  }
2491 
2492  if(terminal) {
2493  printf(" * %-8s %-*s ( %*s ) %s\n", change, rsc_width, rsc->id, detail_width, details, reason);
2494  } else {
2495  crm_notice(" * %-8s %-*s ( %*s ) %s", change, rsc_width, rsc->id, detail_width, details, reason);
2496  }
2497 
2498  free(details);
2499  free(reason);
2500 }
2501 
2502 
2503 void
2504 LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal)
2505 {
2506  pe_node_t *next = NULL;
2507  pe_node_t *current = NULL;
2508  pe_node_t *start_node = NULL;
2509 
2510  pe_action_t *stop = NULL;
2511  pe_action_t *start = NULL;
2512  pe_action_t *demote = NULL;
2513  pe_action_t *promote = NULL;
2514 
2515  char *key = NULL;
2516  gboolean moving = FALSE;
2517  GListPtr possible_matches = NULL;
2518 
2519  if(rsc->variant == pe_container) {
2520  pcmk__bundle_log_actions(rsc, data_set, terminal);
2521  return;
2522  }
2523 
2524  if (rsc->children) {
2525  GListPtr gIter = NULL;
2526 
2527  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2528  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2529 
2530  LogActions(child_rsc, data_set, terminal);
2531  }
2532  return;
2533  }
2534 
2535  next = rsc->allocated_to;
2536  if (rsc->running_on) {
2537  current = pe__current_node(rsc);
2538  if (rsc->role == RSC_ROLE_STOPPED) {
2539  /*
2540  * This can occur when resources are being recovered
2541  * We fiddle with the current role in native_create_actions()
2542  */
2543  rsc->role = RSC_ROLE_STARTED;
2544  }
2545  }
2546 
2547  if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
2548  /* Don't log stopped orphans */
2549  return;
2550  }
2551 
2552  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
2553  || (current == NULL && next == NULL)) {
2554  pe_rsc_info(rsc, "Leave %s\t(%s%s)",
2555  rsc->id, role2text(rsc->role),
2556  !pcmk_is_set(rsc->flags, pe_rsc_managed)? " unmanaged" : "");
2557  return;
2558  }
2559 
2560  if (current != NULL && next != NULL && !pcmk__str_eq(current->details->id, next->details->id, pcmk__str_casei)) {
2561  moving = TRUE;
2562  }
2563 
2564  possible_matches = pe__resource_actions(rsc, next, RSC_START, FALSE);
2565  if (possible_matches) {
2566  start = possible_matches->data;
2567  g_list_free(possible_matches);
2568  }
2569 
2570  if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
2571  start_node = NULL;
2572  } else {
2573  start_node = current;
2574  }
2575  possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, FALSE);
2576  if (possible_matches) {
2577  stop = possible_matches->data;
2578  g_list_free(possible_matches);
2579  }
2580 
2581  possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, FALSE);
2582  if (possible_matches) {
2583  promote = possible_matches->data;
2584  g_list_free(possible_matches);
2585  }
2586 
2587  possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, FALSE);
2588  if (possible_matches) {
2589  demote = possible_matches->data;
2590  g_list_free(possible_matches);
2591  }
2592 
2593  if (rsc->role == rsc->next_role) {
2594  pe_action_t *migrate_op = NULL;
2595 
2596  possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, FALSE);
2597  if (possible_matches) {
2598  migrate_op = possible_matches->data;
2599  }
2600 
2601  CRM_CHECK(next != NULL,);
2602  if (next == NULL) {
2603  } else if ((migrate_op != NULL) && (current != NULL)
2604  && pcmk_is_set(migrate_op->flags, pe_action_runnable)) {
2605  LogAction("Migrate", rsc, current, next, start, NULL, terminal);
2606 
2607  } else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) {
2608  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2609 
2610 
2611  } else if (start == NULL || pcmk_is_set(start->flags, pe_action_optional)) {
2612  if ((demote != NULL) && (promote != NULL)
2613  && !pcmk_is_set(demote->flags, pe_action_optional)
2614  && !pcmk_is_set(promote->flags, pe_action_optional)) {
2615  LogAction("Re-promote", rsc, current, next, promote, demote,
2616  terminal);
2617  } else {
2618  pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id,
2619  role2text(rsc->role), next->details->uname);
2620  }
2621 
2622  } else if (!pcmk_is_set(start->flags, pe_action_runnable)) {
2623  LogAction("Stop", rsc, current, NULL, stop,
2624  (stop && stop->reason)? stop : start, terminal);
2625  STOP_SANITY_ASSERT(__LINE__);
2626 
2627  } else if (moving && current) {
2628  LogAction(pcmk_is_set(rsc->flags, pe_rsc_failed)? "Recover" : "Move",
2629  rsc, current, next, stop, NULL, terminal);
2630 
2631  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
2632  LogAction("Recover", rsc, current, NULL, stop, NULL, terminal);
2633  STOP_SANITY_ASSERT(__LINE__);
2634 
2635  } else {
2636  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2637  /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */
2638  }
2639 
2640  g_list_free(possible_matches);
2641  return;
2642  }
2643 
2644  if(stop
2645  && (rsc->next_role == RSC_ROLE_STOPPED
2646  || (start && !pcmk_is_set(start->flags, pe_action_runnable)))) {
2647 
2648  GListPtr gIter = NULL;
2649 
2650  key = stop_key(rsc);
2651  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2652  pe_node_t *node = (pe_node_t *) gIter->data;
2653  pe_action_t *stop_op = NULL;
2654 
2655  possible_matches = find_actions(rsc->actions, key, node);
2656  if (possible_matches) {
2657  stop_op = possible_matches->data;
2658  g_list_free(possible_matches);
2659  }
2660 
2661  if (stop_op && (stop_op->flags & pe_action_runnable)) {
2662  STOP_SANITY_ASSERT(__LINE__);
2663  }
2664 
2665  LogAction("Stop", rsc, node, NULL, stop_op,
2666  (stop_op && stop_op->reason)? stop_op : start, terminal);
2667  }
2668 
2669  free(key);
2670 
2671  } else if ((stop != NULL)
2672  && pcmk_all_flags_set(rsc->flags, pe_rsc_failed|pe_rsc_stop)) {
2673  /* 'stop' may be NULL if the failure was ignored */
2674  LogAction("Recover", rsc, current, next, stop, start, terminal);
2675  STOP_SANITY_ASSERT(__LINE__);
2676 
2677  } else if (moving) {
2678  LogAction("Move", rsc, current, next, stop, NULL, terminal);
2679  STOP_SANITY_ASSERT(__LINE__);
2680 
2681  } else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) {
2682  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2683 
2684  } else if (stop != NULL && !pcmk_is_set(stop->flags, pe_action_optional)) {
2685  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2686  STOP_SANITY_ASSERT(__LINE__);
2687 
2688  } else if (rsc->role == RSC_ROLE_MASTER) {
2689  CRM_LOG_ASSERT(current != NULL);
2690  LogAction("Demote", rsc, current, next, demote, NULL, terminal);
2691 
2692  } else if(rsc->next_role == RSC_ROLE_MASTER) {
2693  CRM_LOG_ASSERT(next);
2694  LogAction("Promote", rsc, current, next, promote, NULL, terminal);
2695 
2696  } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) {
2697  LogAction("Start", rsc, current, next, start, NULL, terminal);
2698  }
2699 }
2700 
2701 gboolean
2702 StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2703 {
2704  GListPtr gIter = NULL;
2705 
2706  CRM_ASSERT(rsc);
2707  pe_rsc_trace(rsc, "%s", rsc->id);
2708 
2709  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2710  pe_node_t *current = (pe_node_t *) gIter->data;
2711  pe_action_t *stop;
2712 
2713  if (rsc->partial_migration_target) {
2714  if (rsc->partial_migration_target->details == current->details) {
2715  pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
2716  next->details->uname, rsc->id);
2717  continue;
2718  } else {
2719  pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
2720  optional = FALSE;
2721  }
2722  }
2723 
2724  pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
2725  stop = stop_action(rsc, current, optional);
2726 
2727  if(rsc->allocated_to == NULL) {
2728  pe_action_set_reason(stop, "node availability", TRUE);
2729  }
2730 
2731  if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
2733  __func__, __LINE__);
2734  }
2735 
2736  if (pcmk_is_set(data_set->flags, pe_flag_remove_after_stop)) {
2737  DeleteRsc(rsc, current, optional, data_set);
2738  }
2739 
2741  pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set);
2742 
2743  order_actions(stop, unfence, pe_order_implies_first);
2744  if (!node_has_been_unfenced(current)) {
2745  pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
2746  }
2747  }
2748  }
2749 
2750  return TRUE;
2751 }
2752 
2753 static void
2754 order_after_unfencing(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action,
2755  enum pe_ordering order, pe_working_set_t *data_set)
2756 {
2757  /* When unfencing is in use, we order unfence actions before any probe or
2758  * start of resources that require unfencing, and also of fence devices.
2759  *
2760  * This might seem to violate the principle that fence devices require
2761  * only quorum. However, fence agents that unfence often don't have enough
2762  * information to even probe or start unless the node is first unfenced.
2763  */
2764  if (is_unfence_device(rsc, data_set)
2766 
2767  /* Start with an optional ordering. Requiring unfencing would result in
2768  * the node being unfenced, and all its resources being stopped,
2769  * whenever a new resource is added -- which would be highly suboptimal.
2770  */
2771  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
2772 
2773  order_actions(unfence, action, order);
2774 
2775  if (!node_has_been_unfenced(node)) {
2776  // But unfencing is required if it has never been done
2777  char *reason = crm_strdup_printf("required by %s %s",
2778  rsc->id, action->task);
2779 
2780  trigger_unfencing(NULL, node, reason, NULL, data_set);
2781  free(reason);
2782  }
2783  }
2784 }
2785 
2786 gboolean
2787 StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2788 {
2789  pe_action_t *start = NULL;
2790 
2791  CRM_ASSERT(rsc);
2792  pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
2793  start = start_action(rsc, next, TRUE);
2794 
2795  order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set);
2796 
2797  if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
2799  __func__, __LINE__);
2800  }
2801 
2802 
2803  return TRUE;
2804 }
2805 
2806 gboolean
2807 PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2808 {
2809  GListPtr gIter = NULL;
2810  gboolean runnable = TRUE;
2811  GListPtr action_list = NULL;
2812 
2813  CRM_ASSERT(rsc);
2814  CRM_CHECK(next != NULL, return FALSE);
2815  pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
2816 
2817  action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
2818 
2819  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2820  pe_action_t *start = (pe_action_t *) gIter->data;
2821 
2822  if (!pcmk_is_set(start->flags, pe_action_runnable)) {
2823  runnable = FALSE;
2824  }
2825  }
2826  g_list_free(action_list);
2827 
2828  if (runnable) {
2829  promote_action(rsc, next, optional);
2830  return TRUE;
2831  }
2832 
2833  pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
2834 
2835  action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
2836 
2837  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2838  pe_action_t *promote = (pe_action_t *) gIter->data;
2839 
2841  __func__, __LINE__);
2842  }
2843 
2844  g_list_free(action_list);
2845  return TRUE;
2846 }
2847 
2848 gboolean
2849 DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2850 {
2851  GListPtr gIter = NULL;
2852 
2853  CRM_ASSERT(rsc);
2854  pe_rsc_trace(rsc, "%s", rsc->id);
2855 
2856 /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */
2857  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2858  pe_node_t *current = (pe_node_t *) gIter->data;
2859 
2860  pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
2861  demote_action(rsc, current, optional);
2862  }
2863  return TRUE;
2864 }
2865 
2866 gboolean
2867 RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2868 {
2869  CRM_ASSERT(rsc);
2870  crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
2871  CRM_CHECK(FALSE, return FALSE);
2872  return FALSE;
2873 }
2874 
2875 gboolean
2876 NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2877 {
2878  CRM_ASSERT(rsc);
2879  pe_rsc_trace(rsc, "%s", rsc->id);
2880  return FALSE;
2881 }
2882 
2883 gboolean
2884 DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
2885 {
2886  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
2887  pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
2888  return FALSE;
2889 
2890  } else if (node == NULL) {
2891  pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
2892  return FALSE;
2893 
2894  } else if (node->details->unclean || node->details->online == FALSE) {
2895  pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
2896  node->details->uname);
2897  return FALSE;
2898  }
2899 
2900  crm_notice("Removing %s from %s", rsc->id, node->details->uname);
2901 
2902  delete_action(rsc, node, optional);
2903 
2904  new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
2905  optional ? pe_order_implies_then : pe_order_optional, data_set);
2906 
2907  new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
2908  optional ? pe_order_implies_then : pe_order_optional, data_set);
2909 
2910  return TRUE;
2911 }
2912 
2913 gboolean
2915  gboolean force, pe_working_set_t * data_set)
2916 {
2918  char *key = NULL;
2919  pe_action_t *probe = NULL;
2920  pe_node_t *running = NULL;
2921  pe_node_t *allowed = NULL;
2922  pe_resource_t *top = uber_parent(rsc);
2923 
2924  static const char *rc_master = NULL;
2925  static const char *rc_inactive = NULL;
2926 
2927  if (rc_inactive == NULL) {
2928  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
2929  rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
2930  }
2931 
2932  CRM_CHECK(node != NULL, return FALSE);
2933  if (!force && !pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
2934  pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
2935  return FALSE;
2936  }
2937 
2938  if (pe__is_guest_or_remote_node(node)) {
2939  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2940 
2941  if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
2942  pe_rsc_trace(rsc,
2943  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
2944  rsc->id, node->details->id);
2945  return FALSE;
2946  } else if (pe__is_guest_node(node)
2947  && pe__resource_contains_guest_node(data_set, rsc)) {
2948  pe_rsc_trace(rsc,
2949  "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
2950  rsc->id, node->details->id);
2951  return FALSE;
2952  } else if (rsc->is_remote_node) {
2953  pe_rsc_trace(rsc,
2954  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
2955  rsc->id, node->details->id);
2956  return FALSE;
2957  }
2958  }
2959 
2960  if (rsc->children) {
2961  GListPtr gIter = NULL;
2962  gboolean any_created = FALSE;
2963 
2964  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2965  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2966 
2967  any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
2968  || any_created;
2969  }
2970 
2971  return any_created;
2972 
2973  } else if ((rsc->container) && (!rsc->is_remote_node)) {
2974  pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
2975  return FALSE;
2976  }
2977 
2978  if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
2979  pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
2980  return FALSE;
2981  }
2982 
2983  // Check whether resource is already known on node
2984  if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
2985  pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
2986  return FALSE;
2987  }
2988 
2989  allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2990 
2991  if (rsc->exclusive_discover || top->exclusive_discover) {
2992  if (allowed == NULL) {
2993  /* exclusive discover is enabled and this node is not in the allowed list. */
2994  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
2995  return FALSE;
2996  } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
2997  /* exclusive discover is enabled and this node is not marked
2998  * as a node this resource should be discovered on */
2999  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
3000  return FALSE;
3001  }
3002  }
3003 
3004  if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
3005  /* If this node was allowed to host this resource it would
3006  * have been explicitly added to the 'allowed_nodes' list.
3007  * However it wasn't and the node has discovery disabled, so
3008  * no need to probe for this resource.
3009  */
3010  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
3011  return FALSE;
3012  }
3013 
3014  if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
3015  /* this resource is marked as not needing to be discovered on this node */
3016  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
3017  return FALSE;
3018  }
3019 
3020  if (pe__is_guest_node(node)) {
3021  pe_resource_t *remote = node->details->remote_rsc->container;
3022 
3023  if(remote->role == RSC_ROLE_STOPPED) {
3024  /* If the container is stopped, then we know anything that
3025  * might have been inside it is also stopped and there is
3026  * no need to probe.
3027  *
3028  * If we don't know the container's state on the target
3029  * either:
3030  *
3031  * - the container is running, the transition will abort
3032  * and we'll end up in a different case next time, or
3033  *
3034  * - the container is stopped
3035  *
3036  * Either way there is no need to probe.
3037  *
3038  */
3039  if(remote->allocated_to
3040  && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
3041  /* For safety, we order the 'rsc' start after 'remote'
3042  * has been probed.
3043  *
3044  * Using 'top' helps for groups, but we may need to
3045  * follow the start's ordering chain backwards.
3046  */
3047  custom_action_order(remote,
3048  pcmk__op_key(remote->id, RSC_STATUS, 0),
3049  NULL, top,
3050  pcmk__op_key(top->id, RSC_START, 0), NULL,
3051  pe_order_optional, data_set);
3052  }
3053  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
3054  rsc->id, node->details->id, remote->id);
3055  return FALSE;
3056 
3057  /* Here we really we want to check if remote->stop is required,
3058  * but that information doesn't exist yet
3059  */
3060  } else if(node->details->remote_requires_reset
3061  || node->details->unclean
3062  || pcmk_is_set(remote->flags, pe_rsc_failed)
3063  || remote->next_role == RSC_ROLE_STOPPED
3064  || (remote->allocated_to
3065  && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
3066  ) {
3067  /* The container is stopping or restarting, don't start
3068  * 'rsc' until 'remote' stops as this also implies that
3069  * 'rsc' is stopped - avoiding the need to probe
3070  */
3071  custom_action_order(remote, pcmk__op_key(remote->id, RSC_STOP, 0),
3072  NULL, top, pcmk__op_key(top->id, RSC_START, 0),
3073  NULL, pe_order_optional, data_set);
3074  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
3075  rsc->id, node->details->id, remote->id);
3076  return FALSE;
3077 /* } else {
3078  * The container is running so there is no problem probing it
3079  */
3080  }
3081  }
3082 
3083  key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
3084  probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
3086  __LINE__);
3087 
3088  order_after_unfencing(rsc, node, probe, pe_order_optional, data_set);
3089 
3090  /*
3091  * We need to know if it's running_on (not just known_on) this node
3092  * to correctly determine the target rc.
3093  */
3094  running = pe_find_node_id(rsc->running_on, node->details->id);
3095  if (running == NULL) {
3096  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
3097 
3098  } else if (rsc->role == RSC_ROLE_MASTER) {
3099  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master);
3100  }
3101 
3102  crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
3104 
3105  if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
3106  top = rsc;
3107  } else {
3108  crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
3109  }
3110 
3111  if (!pcmk_is_set(probe->flags, pe_action_runnable)
3112  && (rsc->running_on == NULL)) {
3113  /* Prevent the start from occurring if rsc isn't active, but
3114  * don't cause it to stop if it was active already
3115  */
3117  }
3118 
3119  custom_action_order(rsc, NULL, probe,
3120  top, pcmk__op_key(top->id, RSC_START, 0), NULL,
3121  flags, data_set);
3122 
3123  /* Before any reloads, if they exist */
3124  custom_action_order(rsc, NULL, probe,
3125  top, reload_key(rsc), NULL,
3126  pe_order_optional, data_set);
3127 
3128 #if 0
3129  // complete is always null currently
3130  if (!is_unfence_device(rsc, data_set)) {
3131  /* Normally rsc.start depends on probe complete which depends
3132  * on rsc.probe. But this can't be the case for fence devices
3133  * with unfencing, as it would create graph loops.
3134  *
3135  * So instead we explicitly order 'rsc.probe then rsc.start'
3136  */
3137  order_actions(probe, complete, pe_order_implies_then);
3138  }
3139 #endif
3140  return TRUE;
3141 }
3142 
3152 static bool
3153 rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
3154 {
3155  if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
3156  return TRUE;
3157 
3158  } else if ((rsc->variant == pe_native)
3159  && pe_rsc_is_anon_clone(rsc->parent)
3160  && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
3161  /* We check only the parent, not the uber-parent, because we cannot
3162  * assume that the resource is known if it is in an anonymously cloned
3163  * group (which may be only partially known).
3164  */
3165  return TRUE;
3166  }
3167  return FALSE;
3168 }
3169 
3178 static void
3179 native_start_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
3180 {
3181  pe_node_t *target;
3182  GListPtr gIter = NULL;
3183 
3184  CRM_CHECK(stonith_op && stonith_op->node, return);
3185  target = stonith_op->node;
3186 
3187  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
3188  pe_action_t *action = (pe_action_t *) gIter->data;
3189 
3190  switch (action->needs) {
3191  case rsc_req_nothing:
3192  // Anything other than start or promote requires nothing
3193  break;
3194 
3195  case rsc_req_stonith:
3196  order_actions(stonith_op, action, pe_order_optional);
3197  break;
3198 
3199  case rsc_req_quorum:
3200  if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
3201  && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
3202  && !rsc_is_known_on(rsc, target)) {
3203 
3204  /* If we don't know the status of the resource on the node
3205  * we're about to shoot, we have to assume it may be active
3206  * there. Order the resource start after the fencing. This
3207  * is analogous to waiting for all the probes for a resource
3208  * to complete before starting it.
3209  *
3210  * The most likely explanation is that the DC died and took
3211  * its status with it.
3212  */
3213  pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
3214  target->details->uname);
3215  order_actions(stonith_op, action,
3217  }
3218  break;
3219  }
3220  }
3221 }
3222 
3223 static void
3224 native_stop_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
3225 {
3226  GListPtr gIter = NULL;
3227  GListPtr action_list = NULL;
3228  bool order_implicit = false;
3229 
3230  pe_resource_t *top = uber_parent(rsc);
3231  pe_action_t *parent_stop = NULL;
3232  pe_node_t *target;
3233 
3234  CRM_CHECK(stonith_op && stonith_op->node, return);
3235  target = stonith_op->node;
3236 
3237  /* Get a list of stop actions potentially implied by the fencing */
3238  action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
3239 
3240  /* If resource requires fencing, implicit actions must occur after fencing.
3241  *
3242  * Implied stops and demotes of resources running on guest nodes are always
3243  * ordered after fencing, even if the resource does not require fencing,
3244  * because guest node "fencing" is actually just a resource stop.
3245  */
3247  || pe__is_guest_node(target)) {
3248 
3249  order_implicit = true;
3250  }
3251 
3252  if (action_list && order_implicit) {
3253  parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
3254  }
3255 
3256  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3257  pe_action_t *action = (pe_action_t *) gIter->data;
3258 
3259  // The stop would never complete, so convert it into a pseudo-action.
3261  __func__, __LINE__);
3262 
3263  if (order_implicit) {
3265  __func__, __LINE__);
3266 
3267  /* Order the stonith before the parent stop (if any).
3268  *
3269  * Also order the stonith before the resource stop, unless the
3270  * resource is inside a bundle -- that would cause a graph loop.
3271  * We can rely on the parent stop's ordering instead.
3272  *
3273  * User constraints must not order a resource in a guest node
3274  * relative to the guest node container resource. The
3275  * pe_order_preserve flag marks constraints as generated by the
3276  * cluster and thus immune to that check (and is irrelevant if
3277  * target is not a guest).
3278  */
3279  if (!pe_rsc_is_bundled(rsc)) {
3280  order_actions(stonith_op, action, pe_order_preserve);
3281  }
3282  order_actions(stonith_op, parent_stop, pe_order_preserve);
3283  }
3284 
3285  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3286  crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
3287  rsc->id, (order_implicit? "after" : "because"),
3288  target->details->uname);
3289  } else {
3290  crm_info("%s is implicit %s %s is fenced",
3291  action->uuid, (order_implicit? "after" : "because"),
3292  target->details->uname);
3293  }
3294 
3295  if (pcmk_is_set(rsc->flags, pe_rsc_notify)) {
3296  /* Create a second notification that will be delivered
3297  * immediately after the node is fenced
3298  *
3299  * Basic problem:
3300  * - C is a clone active on the node to be shot and stopping on another
3301  * - R is a resource that depends on C
3302  *
3303  * + C.stop depends on R.stop
3304  * + C.stopped depends on STONITH
3305  * + C.notify depends on C.stopped
3306  * + C.healthy depends on C.notify
3307  * + R.stop depends on C.healthy
3308  *
3309  * The extra notification here changes
3310  * + C.healthy depends on C.notify
3311  * into:
3312  * + C.healthy depends on C.notify'
3313  * + C.notify' depends on STONITH'
3314  * thus breaking the loop
3315  */
3316  create_secondary_notification(action, rsc, stonith_op, data_set);
3317  }
3318 
3319 /* From Bug #1601, successful fencing must be an input to a failed resources stop action.
3320 
3321  However given group(rA, rB) running on nodeX and B.stop has failed,
3322  A := stop healthy resource (rA.stop)
3323  B := stop failed resource (pseudo operation B.stop)
3324  C := stonith nodeX
3325  A requires B, B requires C, C requires A
3326  This loop would prevent the cluster from making progress.
3327 
3328  This block creates the "C requires A" dependency and therefore must (at least
3329  for now) be disabled.
3330 
3331  Instead, run the block above and treat all resources on nodeX as B would be
3332  (marked as a pseudo op depending on the STONITH).
3333 
3334  TODO: Break the "A requires B" dependency in update_action() and re-enable this block
3335 
3336  } else if(is_stonith == FALSE) {
3337  crm_info("Moving healthy resource %s"
3338  " off %s before fencing",
3339  rsc->id, node->details->uname);
3340 
3341  * stop healthy resources before the
3342  * stonith op
3343  *
3344  custom_action_order(
3345  rsc, stop_key(rsc), NULL,
3346  NULL,strdup(CRM_OP_FENCE),stonith_op,
3347  pe_order_optional, data_set);
3348 */
3349  }
3350 
3351  g_list_free(action_list);
3352 
3353  /* Get a list of demote actions potentially implied by the fencing */
3354  action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
3355 
3356  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3357  pe_action_t *action = (pe_action_t *) gIter->data;
3358 
3359  if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
3360  || pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3361 
3362  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3363  pe_rsc_info(rsc,
3364  "Demote of failed resource %s is implicit after %s is fenced",
3365  rsc->id, target->details->uname);
3366  } else {
3367  pe_rsc_info(rsc, "%s is implicit after %s is fenced",
3368  action->uuid, target->details->uname);
3369  }
3370 
3371  /* The demote would never complete and is now implied by the
3372  * fencing, so convert it into a pseudo-action.
3373  */
3375  __func__, __LINE__);
3376 
3377  if (pe_rsc_is_bundled(rsc)) {
3378  /* Do nothing, let the recovery be ordered after the parent's implied stop */
3379 
3380  } else if (order_implicit) {
3382  }
3383  }
3384  }
3385 
3386  g_list_free(action_list);
3387 }
3388 
3389 void
3391 {
3392  if (rsc->children) {
3393  GListPtr gIter = NULL;
3394 
3395  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3396  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
3397 
3398  rsc_stonith_ordering(child_rsc, stonith_op, data_set);
3399  }
3400 
3401  } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
3402  pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
3403 
3404  } else {
3405  native_start_constraints(rsc, stonith_op, data_set);
3406  native_stop_constraints(rsc, stonith_op, data_set);
3407  }
3408 }
3409 
3410 void
3412 {
3413  GListPtr gIter = NULL;
3414  pe_action_t *reload = NULL;
3415 
3416  if (rsc->children) {
3417  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3418  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
3419 
3420  ReloadRsc(child_rsc, node, data_set);
3421  }
3422  return;
3423 
3424  } else if (rsc->variant > pe_native) {
3425  /* Complex resource with no children */
3426  return;
3427 
3428  } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
3429  pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
3430  return;
3431 
3432  } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
3433  /* We don't need to specify any particular actions here, normal failure
3434  * recovery will apply.
3435  */
3436  pe_rsc_trace(rsc, "%s: preventing reload because failed", rsc->id);
3437  return;
3438 
3439  } else if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
3440  /* If a resource's configuration changed while a start was pending,
3441  * force a full restart.
3442  */
3443  pe_rsc_trace(rsc, "%s: preventing reload because start pending", rsc->id);
3444  stop_action(rsc, node, FALSE);
3445  return;
3446 
3447  } else if (node == NULL) {
3448  pe_rsc_trace(rsc, "%s: not active", rsc->id);
3449  return;
3450  }
3451 
3452  pe_rsc_trace(rsc, "Processing %s", rsc->id);
3454 
3455  reload = custom_action(
3456  rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set);
3457  pe_action_set_reason(reload, "resource definition change", FALSE);
3458 
3459  custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
3461  data_set);
3462  custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
3464  data_set);
3465 }
3466 
3467 void
3468 native_append_meta(pe_resource_t * rsc, xmlNode * xml)
3469 {
3470  char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
3471  pe_resource_t *parent;
3472 
3473  if (value) {
3474  char *name = NULL;
3475 
3477  crm_xml_add(xml, name, value);
3478  free(name);
3479  }
3480 
3481  value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
3482  if (value) {
3483  char *name = NULL;
3484 
3486  crm_xml_add(xml, name, value);
3487  free(name);
3488  }
3489 
3490  for (parent = rsc; parent != NULL; parent = parent->parent) {
3491  if (parent->container) {
3493  }
3494  }
3495 }
Services API.
pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set)
#define LOG_TRACE
Definition: logging.h:36
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:215
GListPtr nodes
Definition: pe_types.h:148
gboolean rsc_colocation_new(const char *id, const char *node_attr, int score, pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, const char *state_lh, const char *state_rh, pe_working_set_t *data_set)
enum rsc_role_e role_filter
Definition: internal.h:170
enum rsc_start_requirement needs
Definition: pe_types.h:401
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:140
#define RSC_STOP
Definition: crm.h:199
#define crm_notice(fmt, args...)
Definition: logging.h:349
GHashTable * known_on
Definition: pe_types.h:354
xmlNode * ops_xml
Definition: pe_types.h:312
bool pe__is_guest_or_remote_node(const pe_node_t *node)
Definition: remote.c:41
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:19
gboolean unseen
Definition: pe_types.h:207
#define INFINITY
Definition: crm.h:95
#define LOAD_STOPPED
#define pe__set_action_flags(action, flags_to_set)
Definition: internal.h:59
GListPtr dangling_migrations
Definition: pe_types.h:365
#define promote_action(rsc, node, optional)
Definition: internal.h:433
GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set)
#define stop_action(rsc, node, optional)
Definition: internal.h:417
pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: complex.c:895
pe_resource_t * container
Definition: pe_types.h:367
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:842
pe_node_t * partial_migration_source
Definition: pe_types.h:352
enum rsc_role_e role
Definition: pe_types.h:357
gboolean PromoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
resource_alloc_functions_t * cmds
Definition: pe_types.h:320
gboolean standby
Definition: pe_types.h:441
#define pe_action_implies(action, reason, flag)
Definition: internal.h:538
#define pe_rsc_stop
Definition: pe_types.h:251
#define pcmk__config_err(fmt...)
Definition: internal.h:107
#define delete_action(rsc, node, optional)
Definition: internal.h:407
enum filter_colocation_res filter_colocation_constraint(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, gboolean preview)
#define pe_flag_remove_after_stop
Definition: pe_types.h:107
pe_resource_t * rsc
Definition: pe_types.h:391
#define XML_RSC_ATTR_INCARNATION
Definition: msg_xml.h:193
enum rsc_role_e next_role
Definition: pe_types.h:358
#define pe__show_node_weights(level, rsc, text, nodes)
Definition: internal.h:393
#define INFINITY_HACK
gboolean exclusive_discover
Definition: pe_types.h:339
#define reload_key(rsc)
Definition: internal.h:421
gboolean StartRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_resource_t * remote_rsc
Definition: pe_types.h:219
GHashTable * meta
Definition: pe_types.h:360
#define pe_rsc_unique
Definition: pe_types.h:243
#define pe_rsc_notify
Definition: pe_types.h:242
void pcmk__bundle_log_actions(pe_resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
char * score2char_stack(int score, char *buf, size_t len)
Definition: utils.c:96
resource_object_functions_t * fns
Definition: pe_types.h:319
#define clear_node_weights_flags(nw_flags, nw_rsc, flags_to_clear)
#define XML_LRM_ATTR_INTERVAL
Definition: msg_xml.h:261
#define RSC_DELETE
Definition: crm.h:190
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:317
pe_node_t * pe__copy_node(const pe_node_t *this_node)
Definition: utils.c:139
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:199
pe_node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:435
void resource_location(pe_resource_t *rsc, pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1669
pe_ticket_t * ticket
#define demote_key(rsc)
Definition: internal.h:442
guint crm_parse_interval_spec(const char *input)
Parse milliseconds from a Pacemaker interval specification.
Definition: utils.c:307
GListPtr rsc_cons
Definition: pe_types.h:344
pe_node_t * partial_migration_target
Definition: pe_types.h:351
gboolean show_scores
#define RSC_START
Definition: crm.h:196
pe_node_t *(* allocate)(pe_resource_t *, pe_node_t *, pe_working_set_t *)
gboolean RoleError(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_node_t * allocated_to
Definition: pe_types.h:350
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:253
gboolean can_run_resources(const pe_node_t *node)
#define pe_flag_have_quorum
Definition: pe_types.h:93
#define CRM_SCORE_INFINITY
Definition: crm.h:81
#define pe_proc_err(fmt...)
Definition: internal.h:32
gboolean remote_requires_reset
Definition: pe_types.h:213
char * reason
Definition: pe_types.h:398
gboolean native_assign_node(pe_resource_t *rsc, GListPtr candidates, pe_node_t *chosen, gboolean force)
pe_node_t * pe_find_node_id(GListPtr node_list, const char *id)
Definition: status.c:419
#define RSC_MIGRATE
Definition: crm.h:193
char * crm_meta_name(const char *field)
Definition: utils.c:457
const char * action
Definition: pcmk_fence.c:30
void native_rsc_colocation_lh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set)
#define pe__set_resource_flags(resource, flags_to_set)
Definition: internal.h:47
void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t *data_set)
Definition: utils.c:2549
#define pe_flag_stop_everything
Definition: pe_types.h:104
#define demote_action(rsc, node, optional)
Definition: internal.h:443
#define pe_rsc_provisional
Definition: pe_types.h:247
const char * role2text(enum rsc_role_e role)
Definition: common.c:463
#define CRM_ATTR_UNFENCED
Definition: crm.h:117
int weight
Definition: pe_types.h:230
#define pe_rsc_merging
Definition: pe_types.h:249
enum pe_discover_e discover_mode
Definition: internal.h:171
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: utils.c:2678
gboolean(* create_probe)(pe_resource_t *, pe_node_t *, pe_action_t *, gboolean, pe_working_set_t *)
gboolean DemoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_action_flags
Definition: pe_types.h:279
#define pe_rsc_allow_migrate
Definition: pe_types.h:261
#define pe_rsc_failed
Definition: pe_types.h:255
pe_action_t * get_pseudo_op(const char *name, pe_working_set_t *data_set)
Definition: utils.c:1929
#define crm_debug(fmt, args...)
Definition: logging.h:352
void native_expand(pe_resource_t *rsc, pe_working_set_t *data_set)
gboolean(* rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:799
void(* rsc_colocation_rh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
pe_resource_t * pe__resource_contains_guest_node(const pe_working_set_t *data_set, const pe_resource_t *rsc)
Definition: remote.c:66
#define XML_RSC_ATTR_CONTAINER
Definition: msg_xml.h:212
void native_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:523
bool pe__is_guest_node(const pe_node_t *node)
Definition: remote.c:33
const char * node_attribute
#define stop_key(rsc)
Definition: internal.h:416
void native_append_meta(pe_resource_t *rsc, xmlNode *xml)
#define pe_rsc_start_pending
Definition: pe_types.h:257
char * task
Definition: pe_types.h:395
gboolean update_action(pe_action_t *action, pe_working_set_t *data_set)
#define pe__clear_action_flags(action, flags_to_clear)
Definition: internal.h:68
#define CRM_ATTR_UNAME
Definition: crm.h:110
int custom_action_order(pe_resource_t *lh_rsc, char *lh_task, pe_action_t *lh_action, pe_resource_t *rh_rsc, char *rh_task, pe_action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
#define crm_trace(fmt, args...)
Definition: logging.h:353
#define do_crm_log(level, fmt, args...)
Log a message.
Definition: logging.h:156
void pe_fence_node(pe_working_set_t *data_set, pe_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition: unpack.c:97
#define promote_key(rsc)
Definition: internal.h:432
void process_utilization(pe_resource_t *rsc, pe_node_t **prefer, pe_working_set_t *data_set)
GHashTable * meta
Definition: pe_types.h:405
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:196
GListPtr find_actions(GListPtr input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1533
struct pe_node_shared_s * details
Definition: pe_types.h:233
GListPtr running_on
Definition: pe_types.h:353
enum rsc_recovery_type recovery_type
Definition: pe_types.h:322
pe_node_t * node
Definition: pe_types.h:392
filter_colocation_res
enum loss_ticket_policy_e loss_policy
#define pe_rsc_needs_fencing
Definition: pe_types.h:268
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1883
unsigned long long flags
Definition: pe_types.h:335
const char * uname
Definition: pe_types.h:198
#define pe_rsc_promotable
Definition: pe_types.h:245
void(* expand)(pe_resource_t *, pe_working_set_t *)
xmlNode * find_rsc_op_entry(pe_resource_t *rsc, const char *key)
Definition: utils.c:1366
#define pe_flag_stonith_enabled
Definition: pe_types.h:97
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
Definition: common.c:629
#define pe__set_graph_flags(graph_flags, gr_action, flags_to_set)
Definition: internal.h:125
pe_graph_flags
Definition: pe_types.h:271
GHashTable * utilization
Definition: pe_types.h:362
#define XML_RSC_ATTR_TARGET_ROLE
Definition: msg_xml.h:203
GListPtr rsc_cons_lhs
Definition: pe_types.h:343
#define XML_LRM_ATTR_MIGRATE_TARGET
Definition: msg_xml.h:293
#define XML_RSC_ATTR_REMOTE_NODE
Definition: msg_xml.h:215
char * uuid
Definition: pe_types.h:396
gboolean update_action_flags(pe_action_t *action, enum pe_action_flags flags, const char *source, int line)
pe_resource_t * rsc_lh
enum pe_action_flags(* action_flags)(pe_action_t *, pe_node_t *)
#define pe_rsc_allocating
Definition: pe_types.h:248
enum rsc_role_e text2role(const char *role)
Definition: common.c:484
enum pe_obj_types variant
Definition: pe_types.h:317
gboolean granted
Definition: pe_types.h:439
int new_rsc_order(pe_resource_t *lh_rsc, const char *lh_task, pe_resource_t *rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t *data_set)
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
const char * placement_strategy
Definition: pe_types.h:135
int rsc_discover_mode
Definition: pe_types.h:234
gboolean can_run_any(GHashTable *nodes)
GListPtr actions
Definition: pe_types.h:346
const char * id
Definition: pe_types.h:197
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:44
char * id
Definition: pe_types.h:438
#define CRMD_ACTION_RELOAD
Definition: crm.h:167
#define pe_rsc_fence_device
Definition: pe_types.h:244
void native_rsc_colocation_rh(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, pe_working_set_t *data_set)
GHashTable * pcmk__copy_node_table(GHashTable *nodes)
const char * target
Definition: pcmk_fence.c:29
#define PCMK_RESOURCE_CLASS_STONITH
Definition: services.h:52
enum pe_action_flags native_action_flags(pe_action_t *action, pe_node_t *node)
enum rsc_role_e(* state)(const pe_resource_t *, gboolean)
Definition: pe_types.h:52
void(* rsc_colocation_lh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
void ReloadRsc(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
#define STOP_SANITY_ASSERT(lineno)
gboolean is_remote_node
Definition: pe_types.h:338
GListPtr children
Definition: pe_types.h:364
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:573
#define start_action(rsc, node, optional)
Definition: internal.h:423
#define CRM_META
Definition: crm.h:71
int pe__add_scores(int score1, int score2)
Definition: common.c:510
#define crm_err(fmt, args...)
Definition: logging.h:347
#define CRM_ASSERT(expr)
Definition: results.h:42
#define RSC_STATUS
Definition: crm.h:210
#define pe_rsc_reload
Definition: pe_types.h:252
#define RSC_PROMOTE
Definition: crm.h:202
gboolean StopRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_node_t * pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer, pe_working_set_t *data_set)
void rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
#define pe_rsc_needs_unfencing
Definition: pe_types.h:269
void native_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
gboolean shutdown
Definition: pe_types.h:208
#define pe__clear_resource_flags(resource, flags_to_clear)
Definition: internal.h:53
#define crm_str(x)
Definition: logging.h:373
void LogActions(pe_resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
GHashTable * pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, uint32_t flags)
rsc_role_e
Definition: common.h:91
#define pe_rsc_block
Definition: pe_types.h:239
#define pe_flag_stdout
Definition: pe_types.h:117
enum pe_action_flags flags
Definition: pe_types.h:400
gboolean maintenance
Definition: pe_types.h:211
#define pe_rsc_maintenance
Definition: pe_types.h:264
#define XML_OP_ATTR_PENDING
Definition: msg_xml.h:228
const char * id
gboolean DeleteRsc(pe_resource_t *rsc, pe_node_t *node, gboolean optional, pe_working_set_t *data_set)
const char * id
#define pe_flag_have_stonith_resource
Definition: pe_types.h:98
#define RSC_ROLE_MAX
Definition: common.h:99
GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1573
#define pe_flag_enable_unfencing
Definition: pe_types.h:99
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:20
#define pe__set_order_flags(order_flags, flags_to_set)
Definition: internal.h:111
#define start_key(rsc)
Definition: internal.h:422
void rsc_ticket_constraint(pe_resource_t *rsc_lh, rsc_ticket_t *rsc_ticket, pe_working_set_t *data_set)
pe_action_t * find_first_action(GListPtr input, const char *uuid, const char *task, pe_node_t *on_node)
Definition: utils.c:1503
#define ID(x)
Definition: msg_xml.h:425
unsigned long long flags
Definition: pe_types.h:137
#define pe_err(fmt...)
Definition: internal.h:22
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1621
char * name
Definition: pcmk_fence.c:31
#define XML_LRM_ATTR_MIGRATE_SOURCE
Definition: msg_xml.h:292
gboolean native_create_probe(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set)
#define CRM_OP_LRM_DELETE
Definition: crm.h:147
#define CRM_ATTR_ID
Definition: crm.h:111
gint sort_node_uname(gconstpointer a, gconstpointer b)
Definition: utils.c:215
gboolean unclean
Definition: pe_types.h:206
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
GList * GListPtr
Definition: crm.h:214
void graph_element_from_action(pe_action_t *action, pe_working_set_t *data_set)
#define crm_info(fmt, args...)
Definition: logging.h:350
#define pe_rsc_managed
Definition: pe_types.h:238
#define pe_rsc_orphan
Definition: pe_types.h:237
pe_ordering
Definition: pe_types.h:464
gboolean online
Definition: pe_types.h:202
uint64_t flags
Definition: remote.c:149
#define XML_ATTR_TE_TARGET_RC
Definition: msg_xml.h:371
pe_resource_t * parent
Definition: pe_types.h:315
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
Definition: utils.c:2443
GHashTable *(* merge_weights)(pe_resource_t *, const char *, GHashTable *, const char *, float, enum pe_weights)
enum crm_ais_msg_types type
Definition: internal.h:83
#define RSC_DEMOTE
Definition: crm.h:204
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:18
pe_resource_t * rsc_rh
gboolean NullOp(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
void create_secondary_notification(pe_action_t *action, pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:236
char * id
Definition: pe_types.h:308
pe_action_t * custom_action(pe_resource_t *rsc, char *key, const char *task, pe_node_t *on_node, gboolean optional, gboolean foo, pe_working_set_t *data_set)
Definition: utils.c:465
GHashTable * allowed_nodes
Definition: pe_types.h:355
#define RSC_MIGRATED
Definition: crm.h:194
#define pe_flag_startup_probes
Definition: pe_types.h:111