pacemaker  2.0.4-2deceaa
Scalable High-Availability cluster resource manager
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pcmk_sched_native.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2020 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <crm/pengine/rules.h>
13 #include <crm/msg_xml.h>
14 #include <pacemaker-internal.h>
15 #include <crm/services.h>
16 
17 // The controller removes the resource from the CIB, making this redundant
18 // #define DELETE_THEN_REFRESH 1
19 
20 #define INFINITY_HACK (INFINITY * -100)
21 
22 #define VARIANT_NATIVE 1
23 #include <lib/pengine/variant.h>
24 
25 static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
26  pe_working_set_t *data_set);
27 static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
28  xmlNode *operation, pe_working_set_t *data_set);
29 static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
30  pe_working_set_t *data_set);
31 static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
32  xmlNode *operation, pe_working_set_t *data_set);
33 
34 void ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set);
35 gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
36 gboolean StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
37 gboolean StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
38 gboolean DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
39 gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional,
40  pe_working_set_t * data_set);
41 gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
42 gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set);
43 
44 /* *INDENT-OFF* */
46 /* Current State */
47 /* Next State: Unknown Stopped Started Slave Master */
53 };
54 
56 /* Current State */
57 /* Next State: Unknown Stopped Started Slave Master */
58  /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
59  /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
60  /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
61  /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
62  /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
63 };
64 /* *INDENT-ON* */
65 
66 static gboolean
67 native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set)
68 {
69  GListPtr nodes = NULL;
70  pe_node_t *chosen = NULL;
71  pe_node_t *best = NULL;
72  int multiple = 1;
73  int length = 0;
74  gboolean result = FALSE;
75 
76  process_utilization(rsc, &prefer, data_set);
77 
78  if (is_not_set(rsc->flags, pe_rsc_provisional)) {
79  return rsc->allocated_to ? TRUE : FALSE;
80  }
81 
82  // Sort allowed nodes by weight
83  if (rsc->allowed_nodes) {
84  length = g_hash_table_size(rsc->allowed_nodes);
85  }
86  if (length > 0) {
87  nodes = g_hash_table_get_values(rsc->allowed_nodes);
88  nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
89 
90  // First node in sorted list has the best score
91  best = g_list_nth_data(nodes, 0);
92  }
93 
94  if (prefer && nodes) {
95  chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
96 
97  if (chosen == NULL) {
98  pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
99  prefer->details->uname, rsc->id);
100 
101  /* Favor the preferred node as long as its weight is at least as good as
102  * the best allowed node's.
103  *
104  * An alternative would be to favor the preferred node even if the best
105  * node is better, when the best node's weight is less than INFINITY.
106  */
107  } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
108  pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
109  chosen->details->uname, rsc->id);
110  chosen = NULL;
111 
112  } else if (!can_run_resources(chosen)) {
113  pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
114  chosen->details->uname, rsc->id);
115  chosen = NULL;
116 
117  } else {
118  pe_rsc_trace(rsc,
119  "Chose preferred node %s for %s (ignoring %d candidates)",
120  chosen->details->uname, rsc->id, length);
121  }
122  }
123 
124  if ((chosen == NULL) && nodes) {
125  /* Either there is no preferred node, or the preferred node is not
126  * available, but there are other nodes allowed to run the resource.
127  */
128 
129  chosen = best;
130  pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
131  chosen ? chosen->details->uname : "<none>", rsc->id, length);
132 
133  if (!pe_rsc_is_unique_clone(rsc->parent)
134  && chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
135  /* If the resource is already running on a node, prefer that node if
136  * it is just as good as the chosen node.
137  *
138  * We don't do this for unique clone instances, because
139  * distribute_children() has already assigned instances to their
140  * running nodes when appropriate, and if we get here, we don't want
141  * remaining unallocated instances to prefer a node that's already
142  * running another instance.
143  */
144  pe_node_t *running = pe__current_node(rsc);
145 
146  if (running && (can_run_resources(running) == FALSE)) {
147  pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
148  rsc->id, running->details->uname);
149  } else if (running) {
150  for (GList *iter = nodes->next; iter; iter = iter->next) {
151  pe_node_t *tmp = (pe_node_t *) iter->data;
152 
153  if (tmp->weight != chosen->weight) {
154  // The nodes are sorted by weight, so no more are equal
155  break;
156  }
157  if (tmp->details == running->details) {
158  // Scores are equal, so prefer the current node
159  chosen = tmp;
160  }
161  multiple++;
162  }
163  }
164  }
165  }
166 
167  if (multiple > 1) {
168  static char score[33];
169  int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
170 
171  score2char_stack(chosen->weight, score, sizeof(score));
172  do_crm_log(log_level,
173  "Chose node %s for %s from %d nodes with score %s",
174  chosen->details->uname, rsc->id, multiple, score);
175  }
176 
177  result = native_assign_node(rsc, nodes, chosen, FALSE);
178  g_list_free(nodes);
179  return result;
180 }
181 
190 static int
191 best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
192  const char *value)
193 {
194  GHashTableIter iter;
195  pe_node_t *node = NULL;
196  int best_score = -INFINITY;
197  const char *best_node = NULL;
198 
199  // Find best allowed node with matching attribute
200  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
201  while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
202 
203  if ((node->weight > best_score) && can_run_resources(node)
204  && safe_str_eq(value, pe_node_attribute_raw(node, attr))) {
205 
206  best_score = node->weight;
207  best_node = node->details->uname;
208  }
209  }
210 
211  if (safe_str_neq(attr, CRM_ATTR_UNAME)) {
212  if (best_node == NULL) {
213  crm_info("No allowed node for %s matches node attribute %s=%s",
214  rsc->id, attr, value);
215  } else {
216  crm_info("Allowed node %s for %s had best score (%d) "
217  "of those matching node attribute %s=%s",
218  best_node, rsc->id, best_score, attr, value);
219  }
220  }
221  return best_score;
222 }
223 
238 static void
239 add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
240  const char *attr, float factor,
241  bool only_positive)
242 {
243  GHashTableIter iter;
244  pe_node_t *node = NULL;
245 
246  if (attr == NULL) {
247  attr = CRM_ATTR_UNAME;
248  }
249 
250  // Iterate through each node
251  g_hash_table_iter_init(&iter, nodes);
252  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
253  float weight_f = 0;
254  int weight = 0;
255  int score = 0;
256  int new_score = 0;
257 
258  score = best_node_score_matching_attr(rsc, attr,
259  pe_node_attribute_raw(node, attr));
260 
261  if ((factor < 0) && (score < 0)) {
262  /* Negative preference for a node with a negative score
263  * should not become a positive preference.
264  *
265  * @TODO Consider filtering only if weight is -INFINITY
266  */
267  crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
268  node->details->uname, node->weight, factor, score);
269  continue;
270  }
271 
272  if (node->weight == INFINITY_HACK) {
273  crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
274  node->details->uname, node->weight, factor, score);
275  continue;
276  }
277 
278  weight_f = factor * score;
279 
280  // Round the number; see http://c-faq.com/fp/round.html
281  weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
282 
283  /* Small factors can obliterate the small scores that are often actually
284  * used in configurations. If the score and factor are nonzero, ensure
285  * that the result is nonzero as well.
286  */
287  if ((weight == 0) && (score != 0)) {
288  if (factor > 0.0) {
289  weight = 1;
290  } else if (factor < 0.0) {
291  weight = -1;
292  }
293  }
294 
295  new_score = pe__add_scores(weight, node->weight);
296 
297  if (only_positive && (new_score < 0) && (node->weight > 0)) {
298  crm_trace("%s: Filtering %d + %f * %d = %d "
299  "(negative disallowed, marking node unusable)",
300  node->details->uname, node->weight, factor, score,
301  new_score);
302  node->weight = INFINITY_HACK;
303  continue;
304  }
305 
306  if (only_positive && (new_score < 0) && (node->weight == 0)) {
307  crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
308  node->details->uname, node->weight, factor, score,
309  new_score);
310  continue;
311  }
312 
313  crm_trace("%s: %d + %f * %d = %d", node->details->uname,
314  node->weight, factor, score, new_score);
315  node->weight = new_score;
316  }
317 }
318 
319 static inline bool
320 is_nonempty_group(pe_resource_t *rsc)
321 {
322  return rsc && (rsc->variant == pe_group) && (rsc->children != NULL);
323 }
324 
340 GHashTable *
342  GHashTable *nodes, const char *attr, float factor,
343  uint32_t flags)
344 {
345  GHashTable *work = NULL;
346 
347  // Avoid infinite recursion
348  if (is_set(rsc->flags, pe_rsc_merging)) {
349  pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
350  return nodes;
351  }
353 
354  if (is_set(flags, pe_weights_init)) {
355  if (is_nonempty_group(rsc)) {
356  GList *last = g_list_last(rsc->children);
357  pe_resource_t *last_rsc = last->data;
358 
359  pe_rsc_trace(rsc, "%s: Merging scores from group %s "
360  "using last member %s (at %.6f)",
361  rhs, rsc->id, last_rsc->id, factor);
362  work = pcmk__native_merge_weights(last_rsc, rhs, NULL, attr, factor,
363  flags);
364  } else {
366  }
367  clear_bit(flags, pe_weights_init);
368 
369  } else if (is_nonempty_group(rsc)) {
370  /* The first member of the group will recursively incorporate any
371  * constraints involving other members (including the group internal
372  * colocation).
373  *
374  * @TODO The indirect colocations from the dependent group's other
375  * members will be incorporated at full strength rather than by
376  * factor, so the group's combined stickiness will be treated as
377  * (factor + (#members - 1)) * stickiness. It is questionable what
378  * the right approach should be.
379  */
380  pe_rsc_trace(rsc, "%s: Merging scores from first member of group %s "
381  "(at %.6f)", rhs, rsc->id, factor);
382  work = pcmk__copy_node_table(nodes);
383  work = pcmk__native_merge_weights(rsc->children->data, rhs, work, attr,
384  factor, flags);
385 
386  } else {
387  pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
388  rhs, rsc->id, factor);
389  work = pcmk__copy_node_table(nodes);
390  add_node_scores_matching_attr(work, rsc, attr, factor,
391  is_set(flags, pe_weights_positive));
392  }
393 
394  if (can_run_any(work)) {
395  GListPtr gIter = NULL;
396  int multiplier = (factor < 0)? -1 : 1;
397 
398  if (is_set(flags, pe_weights_forward)) {
399  gIter = rsc->rsc_cons;
400  pe_rsc_trace(rsc,
401  "Checking additional %d optional '%s with' constraints",
402  g_list_length(gIter), rsc->id);
403 
404  } else if (is_nonempty_group(rsc)) {
405  pe_resource_t *last_rsc = g_list_last(rsc->children)->data;
406 
407  gIter = last_rsc->rsc_cons_lhs;
408  pe_rsc_trace(rsc, "Checking additional %d optional 'with group %s' "
409  "constraints using last member %s",
410  g_list_length(gIter), rsc->id, last_rsc->id);
411 
412  } else {
413  gIter = rsc->rsc_cons_lhs;
414  pe_rsc_trace(rsc,
415  "Checking additional %d optional 'with %s' constraints",
416  g_list_length(gIter), rsc->id);
417  }
418 
419  for (; gIter != NULL; gIter = gIter->next) {
420  pe_resource_t *other = NULL;
421  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
422 
423  if (constraint->score == 0) {
424  continue;
425  }
426 
427  if (is_set(flags, pe_weights_forward)) {
428  other = constraint->rsc_rh;
429  } else {
430  other = constraint->rsc_lh;
431  }
432 
433  pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
434  constraint->id, constraint->rsc_lh->id,
435  constraint->rsc_rh->id);
436  work = pcmk__native_merge_weights(other, rhs, work,
437  constraint->node_attribute,
438  multiplier * constraint->score / (float) INFINITY,
439  flags|pe_weights_rollback);
440  pe__show_node_weights(true, NULL, rhs, work);
441  }
442 
443  } else if (is_set(flags, pe_weights_rollback)) {
444  pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
445  rhs, rsc->id);
446  g_hash_table_destroy(work);
448  return nodes;
449  }
450 
451 
452  if (is_set(flags, pe_weights_positive)) {
453  pe_node_t *node = NULL;
454  GHashTableIter iter;
455 
456  g_hash_table_iter_init(&iter, work);
457  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
458  if (node->weight == INFINITY_HACK) {
459  node->weight = 1;
460  }
461  }
462  }
463 
464  if (nodes) {
465  g_hash_table_destroy(nodes);
466  }
467 
469  return work;
470 }
471 
472 static inline bool
473 node_has_been_unfenced(pe_node_t *node)
474 {
475  const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
476 
477  return unfenced && strcmp("0", unfenced);
478 }
479 
480 static inline bool
481 is_unfence_device(pe_resource_t *rsc, pe_working_set_t *data_set)
482 {
483  return is_set(rsc->flags, pe_rsc_fence_device)
484  && is_set(data_set->flags, pe_flag_enable_unfencing);
485 }
486 
487 pe_node_t *
488 pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer,
489  pe_working_set_t *data_set)
490 {
491  GListPtr gIter = NULL;
492 
493  if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) {
494  /* never allocate children on their own */
495  pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
496  rsc->parent->id);
497  rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
498  }
499 
500  if (is_not_set(rsc->flags, pe_rsc_provisional)) {
501  return rsc->allocated_to;
502  }
503 
504  if (is_set(rsc->flags, pe_rsc_allocating)) {
505  pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
506  return NULL;
507  }
508 
510  pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes);
511 
512  for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
513  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
514 
515  GHashTable *archive = NULL;
516  pe_resource_t *rsc_rh = constraint->rsc_rh;
517 
518  if (constraint->score == 0) {
519  continue;
520  }
521 
522  if (constraint->role_lh >= RSC_ROLE_MASTER
523  || (constraint->score < 0 && constraint->score > -INFINITY)) {
524  archive = pcmk__copy_node_table(rsc->allowed_nodes);
525  }
526 
527  pe_rsc_trace(rsc,
528  "%s: Allocating %s first (constraint=%s score=%d role=%s)",
529  rsc->id, rsc_rh->id, constraint->id,
530  constraint->score, role2text(constraint->role_lh));
531  rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
532  rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set);
533  if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
534  pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
535  g_hash_table_destroy(rsc->allowed_nodes);
536  rsc->allowed_nodes = archive;
537  archive = NULL;
538  }
539  if (archive) {
540  g_hash_table_destroy(archive);
541  }
542  }
543 
544  pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes);
545 
546  for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
547  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
548 
549  if (constraint->score == 0) {
550  continue;
551  }
552  pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
553  constraint->id, constraint->rsc_lh->id,
554  constraint->rsc_rh->id);
555  rsc->allowed_nodes =
556  constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
557  constraint->node_attribute,
558  (float)constraint->score / INFINITY,
560  }
561 
562  if (rsc->next_role == RSC_ROLE_STOPPED) {
563  pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
564  /* make sure it doesn't come up again */
565  resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
566 
567  } else if(rsc->next_role > rsc->role
568  && is_set(data_set->flags, pe_flag_have_quorum) == FALSE
569  && data_set->no_quorum_policy == no_quorum_freeze) {
570  crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
571  rsc->id, role2text(rsc->role), role2text(rsc->next_role));
572  rsc->next_role = rsc->role;
573  }
574 
575  pe__show_node_weights(!show_scores, rsc, __FUNCTION__, rsc->allowed_nodes);
576  if (is_set(data_set->flags, pe_flag_stonith_enabled)
577  && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) {
579  }
580 
581  if (is_not_set(rsc->flags, pe_rsc_managed)) {
582  const char *reason = NULL;
583  pe_node_t *assign_to = NULL;
584 
585  rsc->next_role = rsc->role;
586  assign_to = pe__current_node(rsc);
587  if (assign_to == NULL) {
588  reason = "inactive";
589  } else if (rsc->role == RSC_ROLE_MASTER) {
590  reason = "master";
591  } else if (is_set(rsc->flags, pe_rsc_failed)) {
592  reason = "failed";
593  } else {
594  reason = "active";
595  }
596  pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
597  (assign_to? assign_to->details->uname : "no node"), reason);
598  native_assign_node(rsc, NULL, assign_to, TRUE);
599 
600  } else if (is_set(data_set->flags, pe_flag_stop_everything)) {
601  pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
602  native_assign_node(rsc, NULL, NULL, TRUE);
603 
604  } else if (is_set(rsc->flags, pe_rsc_provisional)
605  && native_choose_node(rsc, prefer, data_set)) {
606  pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
607  rsc->allocated_to->details->uname);
608 
609  } else if (rsc->allocated_to == NULL) {
610  if (is_not_set(rsc->flags, pe_rsc_orphan)) {
611  pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
612  } else if (rsc->running_on != NULL) {
613  pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
614  }
615 
616  } else {
617  pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
618  rsc->allocated_to->details->uname);
619  }
620 
622 
623  if (rsc->is_remote_node) {
624  pe_node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
625 
626  CRM_ASSERT(remote_node != NULL);
627  if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
628  crm_trace("Setting Pacemaker Remote node %s to ONLINE",
629  remote_node->details->id);
630  remote_node->details->online = TRUE;
631  /* We shouldn't consider an unseen remote-node unclean if we are going
632  * to try and connect to it. Otherwise we get an unnecessary fence */
633  if (remote_node->details->unseen == TRUE) {
634  remote_node->details->unclean = FALSE;
635  }
636 
637  } else {
638  crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
639  remote_node->details->id, role2text(rsc->next_role),
640  (rsc->allocated_to? "" : "un"));
641  remote_node->details->shutdown = TRUE;
642  }
643  }
644 
645  return rsc->allocated_to;
646 }
647 
648 static gboolean
649 is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
650 {
651  gboolean dup = FALSE;
652  const char *id = NULL;
653  const char *value = NULL;
654  xmlNode *operation = NULL;
655  guint interval2_ms = 0;
656 
657  CRM_ASSERT(rsc);
658  for (operation = __xml_first_child_element(rsc->ops_xml); operation != NULL;
659  operation = __xml_next_element(operation)) {
660 
661  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
662  value = crm_element_value(operation, "name");
663  if (safe_str_neq(value, name)) {
664  continue;
665  }
666 
667  value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
668  interval2_ms = crm_parse_interval_spec(value);
669  if (interval_ms != interval2_ms) {
670  continue;
671  }
672 
673  if (id == NULL) {
674  id = ID(operation);
675 
676  } else {
677  pcmk__config_err("Operation %s is duplicate of %s (do not use "
678  "same name and interval combination more "
679  "than once per resource)", ID(operation), id);
680  dup = TRUE;
681  }
682  }
683  }
684 
685  return dup;
686 }
687 
688 static bool
689 op_cannot_recur(const char *name)
690 {
691  return safe_str_eq(name, RSC_STOP)
692  || safe_str_eq(name, RSC_START)
693  || safe_str_eq(name, RSC_DEMOTE)
694  || safe_str_eq(name, RSC_PROMOTE);
695 }
696 
697 static void
698 RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
699  xmlNode * operation, pe_working_set_t * data_set)
700 {
701  char *key = NULL;
702  const char *name = NULL;
703  const char *role = NULL;
704  const char *interval_spec = NULL;
705  const char *node_uname = node? node->details->uname : "n/a";
706 
707  guint interval_ms = 0;
708  pe_action_t *mon = NULL;
709  gboolean is_optional = TRUE;
710  GListPtr possible_matches = NULL;
711 
712  CRM_ASSERT(rsc);
713 
714  /* Only process for the operations without role="Stopped" */
715  role = crm_element_value(operation, "role");
716  if (role && text2role(role) == RSC_ROLE_STOPPED) {
717  return;
718  }
719 
720  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
721  interval_ms = crm_parse_interval_spec(interval_spec);
722  if (interval_ms == 0) {
723  return;
724  }
725 
726  name = crm_element_value(operation, "name");
727  if (is_op_dup(rsc, name, interval_ms)) {
728  crm_trace("Not creating duplicate recurring action %s for %dms %s",
729  ID(operation), interval_ms, name);
730  return;
731  }
732 
733  if (op_cannot_recur(name)) {
734  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
735  ID(operation), name);
736  return;
737  }
738 
739  key = pcmk__op_key(rsc->id, name, interval_ms);
740  if (find_rsc_op_entry(rsc, key) == NULL) {
741  crm_trace("Not creating recurring action %s for disabled resource %s",
742  ID(operation), rsc->id);
743  free(key);
744  return;
745  }
746 
747  pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
748  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
749 
750  if (start != NULL) {
751  pe_rsc_trace(rsc, "Marking %s %s due to %s",
752  key, is_set(start->flags, pe_action_optional) ? "optional" : "mandatory",
753  start->uuid);
754  is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
755  } else {
756  pe_rsc_trace(rsc, "Marking %s optional", key);
757  is_optional = TRUE;
758  }
759 
760  /* start a monitor for an already active resource */
761  possible_matches = find_actions_exact(rsc->actions, key, node);
762  if (possible_matches == NULL) {
763  is_optional = FALSE;
764  pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
765 
766  } else {
767  GListPtr gIter = NULL;
768 
769  for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
770  pe_action_t *op = (pe_action_t *) gIter->data;
771 
772  if (is_set(op->flags, pe_action_reschedule)) {
773  is_optional = FALSE;
774  break;
775  }
776  }
777  g_list_free(possible_matches);
778  }
779 
780  if ((rsc->next_role == RSC_ROLE_MASTER && role == NULL)
781  || (role != NULL && text2role(role) != rsc->next_role)) {
782  int log_level = LOG_TRACE;
783  const char *result = "Ignoring";
784 
785  if (is_optional) {
786  char *after_key = NULL;
787  pe_action_t *cancel_op = NULL;
788 
789  // It's running, so cancel it
790  log_level = LOG_INFO;
791  result = "Cancelling";
792  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
793 
794  switch (rsc->role) {
795  case RSC_ROLE_SLAVE:
796  case RSC_ROLE_STARTED:
797  if (rsc->next_role == RSC_ROLE_MASTER) {
798  after_key = promote_key(rsc);
799 
800  } else if (rsc->next_role == RSC_ROLE_STOPPED) {
801  after_key = stop_key(rsc);
802  }
803 
804  break;
805  case RSC_ROLE_MASTER:
806  after_key = demote_key(rsc);
807  break;
808  default:
809  break;
810  }
811 
812  if (after_key) {
813  custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL,
814  pe_order_runnable_left, data_set);
815  }
816  }
817 
818  do_crm_log(log_level, "%s action %s (%s vs. %s)",
819  result, key, role ? role : role2text(RSC_ROLE_SLAVE),
820  role2text(rsc->next_role));
821 
822  free(key);
823  return;
824  }
825 
826  mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
827  key = mon->uuid;
828  if (is_optional) {
829  pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
830  }
831 
832  if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) {
833  pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
834  node_uname, mon->uuid);
835  update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
836 
837  } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
838  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
839  node_uname, mon->uuid);
840  update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
841 
842  } else if (is_set(mon->flags, pe_action_optional) == FALSE) {
843  pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
844  mon->task, interval_ms / 1000, rsc->id, node_uname);
845  }
846 
847  if (rsc->next_role == RSC_ROLE_MASTER) {
848  char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
849 
850  add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master);
851  free(running_master);
852  }
853 
854  if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) {
855  custom_action_order(rsc, start_key(rsc), NULL,
856  NULL, strdup(key), mon,
858 
859  custom_action_order(rsc, reload_key(rsc), NULL,
860  NULL, strdup(key), mon,
862 
863  if (rsc->next_role == RSC_ROLE_MASTER) {
864  custom_action_order(rsc, promote_key(rsc), NULL,
865  rsc, NULL, mon,
867 
868  } else if (rsc->role == RSC_ROLE_MASTER) {
869  custom_action_order(rsc, demote_key(rsc), NULL,
870  rsc, NULL, mon,
872  }
873  }
874 }
875 
876 static void
877 Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
878 {
879  if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
880  (node == NULL || node->details->maintenance == FALSE)) {
881  xmlNode *operation = NULL;
882 
883  for (operation = __xml_first_child_element(rsc->ops_xml);
884  operation != NULL;
885  operation = __xml_next_element(operation)) {
886 
887  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
888  RecurringOp(rsc, start, node, operation, data_set);
889  }
890  }
891  }
892 }
893 
894 static void
895 RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
896  xmlNode * operation, pe_working_set_t * data_set)
897 {
898  char *key = NULL;
899  const char *name = NULL;
900  const char *role = NULL;
901  const char *interval_spec = NULL;
902  const char *node_uname = node? node->details->uname : "n/a";
903 
904  guint interval_ms = 0;
905  GListPtr possible_matches = NULL;
906  GListPtr gIter = NULL;
907 
908  /* Only process for the operations with role="Stopped" */
909  role = crm_element_value(operation, "role");
910  if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
911  return;
912  }
913 
914  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
915  interval_ms = crm_parse_interval_spec(interval_spec);
916  if (interval_ms == 0) {
917  return;
918  }
919 
920  name = crm_element_value(operation, "name");
921  if (is_op_dup(rsc, name, interval_ms)) {
922  crm_trace("Not creating duplicate recurring action %s for %dms %s",
923  ID(operation), interval_ms, name);
924  return;
925  }
926 
927  if (op_cannot_recur(name)) {
928  pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
929  ID(operation), name);
930  return;
931  }
932 
933  key = pcmk__op_key(rsc->id, name, interval_ms);
934  if (find_rsc_op_entry(rsc, key) == NULL) {
935  crm_trace("Not creating recurring action %s for disabled resource %s",
936  ID(operation), rsc->id);
937  free(key);
938  return;
939  }
940 
941  // @TODO add support
942  if (is_set(rsc->flags, pe_rsc_unique) == FALSE) {
943  crm_notice("Ignoring %s (recurring monitors for Stopped role are "
944  "not supported for anonymous clones)",
945  ID(operation));
946  return;
947  }
948 
949  pe_rsc_trace(rsc,
950  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
951  ID(operation), rsc->id, role2text(rsc->next_role));
952 
953  /* if the monitor exists on the node where the resource will be running, cancel it */
954  if (node != NULL) {
955  possible_matches = find_actions_exact(rsc->actions, key, node);
956  if (possible_matches) {
957  pe_action_t *cancel_op = NULL;
958 
959  g_list_free(possible_matches);
960 
961  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
962 
963  if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) {
964  /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
965  /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
966  custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
967  pe_order_runnable_left, data_set);
968  }
969 
970  pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
971  key, role, role2text(rsc->next_role), node_uname);
972  }
973  }
974 
975  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
976  pe_node_t *stop_node = (pe_node_t *) gIter->data;
977  const char *stop_node_uname = stop_node->details->uname;
978  gboolean is_optional = TRUE;
979  gboolean probe_is_optional = TRUE;
980  gboolean stop_is_optional = TRUE;
981  pe_action_t *stopped_mon = NULL;
982  char *rc_inactive = NULL;
983  GListPtr probe_complete_ops = NULL;
984  GListPtr stop_ops = NULL;
985  GListPtr local_gIter = NULL;
986 
987  if (node && safe_str_eq(stop_node_uname, node_uname)) {
988  continue;
989  }
990 
991  pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
992  ID(operation), rsc->id, crm_str(stop_node_uname));
993 
994  /* start a monitor for an already stopped resource */
995  possible_matches = find_actions_exact(rsc->actions, key, stop_node);
996  if (possible_matches == NULL) {
997  pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
998  crm_str(stop_node_uname));
999  is_optional = FALSE;
1000  } else {
1001  pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
1002  crm_str(stop_node_uname));
1003  is_optional = TRUE;
1004  g_list_free(possible_matches);
1005  }
1006 
1007  stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
1008 
1009  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
1010  add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
1011  free(rc_inactive);
1012 
1013  if (is_set(rsc->flags, pe_rsc_managed)) {
1014  GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
1015  FALSE);
1016  GListPtr pIter = NULL;
1017 
1018  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
1019  pe_action_t *probe = (pe_action_t *) pIter->data;
1020 
1021  order_actions(probe, stopped_mon, pe_order_runnable_left);
1022  crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
1023  }
1024 
1025  g_list_free(probes);
1026  }
1027 
1028  if (probe_complete_ops) {
1029  g_list_free(probe_complete_ops);
1030  }
1031 
1032  stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
1033 
1034  for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
1035  pe_action_t *stop = (pe_action_t *) local_gIter->data;
1036 
1037  if (is_set(stop->flags, pe_action_optional) == FALSE) {
1038  stop_is_optional = FALSE;
1039  }
1040 
1041  if (is_set(stop->flags, pe_action_runnable) == FALSE) {
1042  crm_debug("%s\t %s (cancelled : stop un-runnable)",
1043  crm_str(stop_node_uname), stopped_mon->uuid);
1044  update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
1045  }
1046 
1047  if (is_set(rsc->flags, pe_rsc_managed)) {
1048  custom_action_order(rsc, stop_key(rsc), stop,
1049  NULL, strdup(key), stopped_mon,
1051  }
1052 
1053  }
1054 
1055  if (stop_ops) {
1056  g_list_free(stop_ops);
1057  }
1058 
1059  if (is_optional == FALSE && probe_is_optional && stop_is_optional
1060  && is_set(rsc->flags, pe_rsc_managed) == FALSE) {
1061  pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
1062  key, crm_str(stop_node_uname));
1063  update_action_flags(stopped_mon, pe_action_optional, __FUNCTION__, __LINE__);
1064  }
1065 
1066  if (is_set(stopped_mon->flags, pe_action_optional)) {
1067  pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
1068  }
1069 
1070  if (stop_node->details->online == FALSE || stop_node->details->unclean) {
1071  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
1072  crm_str(stop_node_uname), stopped_mon->uuid);
1073  update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
1074  }
1075 
1076  if (is_set(stopped_mon->flags, pe_action_runnable)
1077  && is_set(stopped_mon->flags, pe_action_optional) == FALSE) {
1078  crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
1079  interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
1080  }
1081  }
1082 
1083  free(key);
1084 }
1085 
1086 static void
1087 Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
1088 {
1089  if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
1090  (node == NULL || node->details->maintenance == FALSE)) {
1091  xmlNode *operation = NULL;
1092 
1093  for (operation = __xml_first_child_element(rsc->ops_xml);
1094  operation != NULL;
1095  operation = __xml_next_element(operation)) {
1096 
1097  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
1098  RecurringOp_Stopped(rsc, start, node, operation, data_set);
1099  }
1100  }
1101  }
1102 }
1103 
1104 static void
1105 handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
1106 {
1107  pe_action_t *migrate_to = NULL;
1108  pe_action_t *migrate_from = NULL;
1109  pe_action_t *start = NULL;
1110  pe_action_t *stop = NULL;
1111  gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
1112 
1113  pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
1114  rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
1115  start = start_action(rsc, chosen, TRUE);
1116  stop = stop_action(rsc, current, TRUE);
1117 
1118  if (partial == FALSE) {
1119  migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1120  RSC_MIGRATE, current, TRUE, TRUE, data_set);
1121  }
1122 
1123  migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1124  RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
1125 
1126  if ((migrate_to && migrate_from) || (migrate_from && partial)) {
1127 
1130 
1131  update_action_flags(start, pe_action_pseudo, __FUNCTION__, __LINE__); /* easier than trying to delete it from the graph */
1132 
1133  /* order probes before migrations */
1134  if (partial) {
1135  set_bit(migrate_from->flags, pe_action_migrate_runnable);
1136  migrate_from->needs = start->needs;
1137 
1138  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1139  rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
1140  NULL, pe_order_optional, data_set);
1141 
1142  } else {
1143  set_bit(migrate_from->flags, pe_action_migrate_runnable);
1145  migrate_to->needs = start->needs;
1146 
1147  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
1148  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1149  NULL, pe_order_optional, data_set);
1151  NULL, rsc,
1152  pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1154  data_set);
1155  }
1156 
1157  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1158  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1160  data_set);
1161  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1162  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1164  data_set);
1165 
1166  }
1167 
1168  if (migrate_to) {
1169  add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1171 
1172  /* Pacemaker Remote connections don't require pending to be recorded in
1173  * the CIB. We can reduce CIB writes by not setting PENDING for them.
1174  */
1175  if (rsc->is_remote_node == FALSE) {
1176  /* migrate_to takes place on the source node, but can
1177  * have an effect on the target node depending on how
1178  * the agent is written. Because of this, we have to maintain
1179  * a record that the migrate_to occurred, in case the source node
1180  * loses membership while the migrate_to action is still in-flight.
1181  */
1182  add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
1183  }
1184  }
1185 
1186  if (migrate_from) {
1187  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1188  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
1189  }
1190 }
1191 
1192 void
1194 {
1195  pe_action_t *start = NULL;
1196  pe_node_t *chosen = NULL;
1197  pe_node_t *current = NULL;
1198  gboolean need_stop = FALSE;
1199  gboolean is_moving = FALSE;
1200  gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE;
1201 
1202  GListPtr gIter = NULL;
1203  unsigned int num_all_active = 0;
1204  unsigned int num_clean_active = 0;
1205  bool multiply_active = FALSE;
1206  enum rsc_role_e role = RSC_ROLE_UNKNOWN;
1207  enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
1208 
1209  CRM_ASSERT(rsc);
1210  chosen = rsc->allocated_to;
1211  if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) {
1212  rsc->next_role = RSC_ROLE_STARTED;
1213  pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
1214 
1215  } else if (rsc->next_role == RSC_ROLE_UNKNOWN) {
1216  rsc->next_role = RSC_ROLE_STOPPED;
1217  pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
1218  }
1219 
1220  pe_rsc_trace(rsc, "Processing state transition for %s %p: %s->%s", rsc->id, rsc,
1221  role2text(rsc->role), role2text(rsc->next_role));
1222 
1223  current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
1224 
1225  for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
1226  pe_node_t *dangling_source = (pe_node_t *) gIter->data;
1227 
1228  pe_action_t *stop = stop_action(rsc, dangling_source, FALSE);
1229 
1230  set_bit(stop->flags, pe_action_dangle);
1231  pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s",
1232  rsc->id, dangling_source->details->uname);
1233 
1234  if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
1235  DeleteRsc(rsc, dangling_source, FALSE, data_set);
1236  }
1237  }
1238 
1239  if ((num_all_active == 2) && (num_clean_active == 2) && chosen
1241  && (current->details == rsc->partial_migration_source->details)
1242  && (chosen->details == rsc->partial_migration_target->details)) {
1243 
1244  /* The chosen node is still the migration target from a partial
1245  * migration. Attempt to continue the migration instead of recovering
1246  * by stopping the resource everywhere and starting it on a single node.
1247  */
1248  pe_rsc_trace(rsc,
1249  "Will attempt to continue with a partial migration to target %s from %s",
1252 
1253  } else if (is_not_set(rsc->flags, pe_rsc_needs_fencing)) {
1254  /* If a resource has "requires" set to nothing or quorum, don't consider
1255  * it active on unclean nodes (similar to how all resources behave when
1256  * stonith-enabled is false). We can start such resources elsewhere
1257  * before fencing completes, and if we considered the resource active on
1258  * the failed node, we would attempt recovery for being active on
1259  * multiple nodes.
1260  */
1261  multiply_active = (num_clean_active > 1);
1262  } else {
1263  multiply_active = (num_all_active > 1);
1264  }
1265 
1266  if (multiply_active) {
1268  // Migration was in progress, but we've chosen a different target
1269  crm_notice("Resource %s can no longer migrate to %s. Stopping on %s too",
1272 
1273  } else {
1274  // Resource was incorrectly multiply active
1275  pe_proc_err("Resource %s is active on %u nodes (%s)",
1276  rsc->id, num_all_active,
1277  recovery2text(rsc->recovery_type));
1278  crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
1279  }
1280 
1281  if (rsc->recovery_type == recovery_stop_start) {
1282  need_stop = TRUE;
1283  }
1284 
1285  /* If by chance a partial migration is in process, but the migration
1286  * target is not chosen still, clear all partial migration data.
1287  */
1289  allow_migrate = FALSE;
1290  }
1291 
1292  if (is_set(rsc->flags, pe_rsc_start_pending)) {
1293  start = start_action(rsc, chosen, TRUE);
1295  }
1296 
1297  if (current && chosen && current->details != chosen->details) {
1298  pe_rsc_trace(rsc, "Moving %s", rsc->id);
1299  is_moving = TRUE;
1300  need_stop = TRUE;
1301 
1302  } else if (is_set(rsc->flags, pe_rsc_failed)) {
1303  pe_rsc_trace(rsc, "Recovering %s", rsc->id);
1304  need_stop = TRUE;
1305 
1306  } else if (is_set(rsc->flags, pe_rsc_block)) {
1307  pe_rsc_trace(rsc, "Block %s", rsc->id);
1308  need_stop = TRUE;
1309 
1310  } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
1311  /* Recovery of a promoted resource */
1312  start = start_action(rsc, chosen, TRUE);
1313  if (is_set(start->flags, pe_action_optional) == FALSE) {
1314  pe_rsc_trace(rsc, "Forced start %s", rsc->id);
1315  need_stop = TRUE;
1316  }
1317  }
1318 
1319  pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id,
1320  role2text(rsc->role), role2text(rsc->next_role));
1321 
1322  /* Create any additional actions required when bringing resource down and
1323  * back up to same level.
1324  */
1325  role = rsc->role;
1326  while (role != RSC_ROLE_STOPPED) {
1327  next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
1328  pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
1329  rsc->id, need_stop ? " required" : "");
1330  if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
1331  break;
1332  }
1333  role = next_role;
1334  }
1335 
1336 
1337  while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) {
1338  next_role = rsc_state_matrix[role][rsc->role];
1339  pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
1340  rsc->id, need_stop ? " required" : "");
1341  if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) {
1342  break;
1343  }
1344  role = next_role;
1345  }
1346  role = rsc->role;
1347 
1348  /* Required steps from this role to the next */
1349  while (role != rsc->next_role) {
1350  next_role = rsc_state_matrix[role][rsc->next_role];
1351  pe_rsc_trace(rsc, "Role: Executing: %s->%s = (%s on %s)", role2text(role), role2text(next_role), rsc->id, chosen?chosen->details->uname:"NA");
1352  if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
1353  break;
1354  }
1355  role = next_role;
1356  }
1357 
1358  if(is_set(rsc->flags, pe_rsc_block)) {
1359  pe_rsc_trace(rsc, "No monitor additional ops for blocked resource");
1360 
1361  } else if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) {
1362  pe_rsc_trace(rsc, "Monitor ops for active resource");
1363  start = start_action(rsc, chosen, TRUE);
1364  Recurring(rsc, start, chosen, data_set);
1365  Recurring_Stopped(rsc, start, chosen, data_set);
1366  } else {
1367  pe_rsc_trace(rsc, "Monitor ops for inactive resource");
1368  Recurring_Stopped(rsc, NULL, NULL, data_set);
1369  }
1370 
1371  /* if we are stuck in a partial migration, where the target
1372  * of the partial migration no longer matches the chosen target.
1373  * A full stop/start is required */
1374  if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
1375  pe_rsc_trace(rsc, "Not allowing partial migration to continue. %s", rsc->id);
1376  allow_migrate = FALSE;
1377 
1378  } else if (is_moving == FALSE ||
1379  is_not_set(rsc->flags, pe_rsc_managed) ||
1380  is_set(rsc->flags, pe_rsc_failed) ||
1381  is_set(rsc->flags, pe_rsc_start_pending) ||
1382  (current && current->details->unclean) ||
1383  rsc->next_role < RSC_ROLE_STARTED) {
1384 
1385  allow_migrate = FALSE;
1386  }
1387 
1388  if (allow_migrate) {
1389  handle_migration_actions(rsc, current, chosen, data_set);
1390  }
1391 }
1392 
1393 static void
1394 rsc_avoids_remote_nodes(pe_resource_t *rsc)
1395 {
1396  GHashTableIter iter;
1397  pe_node_t *node = NULL;
1398  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
1399  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1400  if (node->details->remote_rsc) {
1401  node->weight = -INFINITY;
1402  }
1403  }
1404 }
1405 
1420 static GList *
1421 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
1422 {
1423  GList *allowed_nodes = NULL;
1424 
1425  if (rsc->allowed_nodes) {
1426  allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
1427  }
1428 
1429  if (is_set(data_set->flags, pe_flag_stdout)) {
1430  allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
1431  }
1432  return allowed_nodes;
1433 }
1434 
1435 void
1437 {
1438  /* This function is on the critical path and worth optimizing as much as possible */
1439 
1440  pe_resource_t *top = NULL;
1441  GList *allowed_nodes = NULL;
1442  bool check_unfencing = FALSE;
1443  bool check_utilization = FALSE;
1444 
1445  if (is_not_set(rsc->flags, pe_rsc_managed)) {
1446  pe_rsc_trace(rsc,
1447  "Skipping native constraints for unmanaged resource: %s",
1448  rsc->id);
1449  return;
1450  }
1451 
1452  top = uber_parent(rsc);
1453 
1454  // Whether resource requires unfencing
1455  check_unfencing = is_not_set(rsc->flags, pe_rsc_fence_device)
1456  && is_set(data_set->flags, pe_flag_enable_unfencing)
1457  && is_set(rsc->flags, pe_rsc_needs_unfencing);
1458 
1459  // Whether a non-default placement strategy is used
1460  check_utilization = (g_hash_table_size(rsc->utilization) > 0)
1461  && safe_str_neq(data_set->placement_strategy, "default");
1462 
1463  // Order stops before starts (i.e. restart)
1464  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1465  rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1467  data_set);
1468 
1469  // Promotable ordering: demote before stop, start before promote
1470  if (is_set(top->flags, pe_rsc_promotable) || (rsc->role > RSC_ROLE_SLAVE)) {
1471  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
1472  rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1473  pe_order_implies_first_master, data_set);
1474 
1475  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
1476  rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
1477  pe_order_runnable_left, data_set);
1478  }
1479 
1480  // Don't clear resource history if probing on same node
1482  NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
1484  data_set);
1485 
1486  // Certain checks need allowed nodes
1487  if (check_unfencing || check_utilization || rsc->container) {
1488  allowed_nodes = allowed_nodes_as_list(rsc, data_set);
1489  }
1490 
1491  if (check_unfencing) {
1492  /* Check if the node needs to be unfenced first */
1493 
1494  for (GList *item = allowed_nodes; item; item = item->next) {
1495  pe_node_t *node = item->data;
1496  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
1497 
1498  crm_debug("Ordering any stops of %s before %s, and any starts after",
1499  rsc->id, unfence->uuid);
1500 
1501  /*
1502  * It would be more efficient to order clone resources once,
1503  * rather than order each instance, but ordering the instance
1504  * allows us to avoid unnecessary dependencies that might conflict
1505  * with user constraints.
1506  *
1507  * @TODO: This constraint can still produce a transition loop if the
1508  * resource has a stop scheduled on the node being unfenced, and
1509  * there is a user ordering constraint to start some other resource
1510  * (which will be ordered after the unfence) before stopping this
1511  * resource. An example is "start some slow-starting cloned service
1512  * before stopping an associated virtual IP that may be moving to
1513  * it":
1514  * stop this -> unfencing -> start that -> stop this
1515  */
1516  custom_action_order(rsc, stop_key(rsc), NULL,
1517  NULL, strdup(unfence->uuid), unfence,
1519 
1520  custom_action_order(NULL, strdup(unfence->uuid), unfence,
1521  rsc, start_key(rsc), NULL,
1523  data_set);
1524  }
1525  }
1526 
1527  if (check_utilization) {
1528  GListPtr gIter = NULL;
1529 
1530  pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
1531  rsc->id, data_set->placement_strategy);
1532 
1533  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
1534  pe_node_t *current = (pe_node_t *) gIter->data;
1535 
1536  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1537  current->details->uname);
1538  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1539 
1540  if (load_stopped->node == NULL) {
1541  load_stopped->node = pe__copy_node(current);
1542  update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
1543  }
1544 
1545  custom_action_order(rsc, stop_key(rsc), NULL,
1546  NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
1547  }
1548 
1549  for (GList *item = allowed_nodes; item; item = item->next) {
1550  pe_node_t *next = item->data;
1551  char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
1552  next->details->uname);
1553  pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1554 
1555  if (load_stopped->node == NULL) {
1556  load_stopped->node = pe__copy_node(next);
1557  update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
1558  }
1559 
1560  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1561  rsc, start_key(rsc), NULL, pe_order_load, data_set);
1562 
1563  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1564  rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
1565  NULL, pe_order_load, data_set);
1566 
1567  free(load_stopped_task);
1568  }
1569  }
1570 
1571  if (rsc->container) {
1572  pe_resource_t *remote_rsc = NULL;
1573 
1574  if (rsc->is_remote_node) {
1575  // rsc is the implicit remote connection for a guest or bundle node
1576 
1577  /* Do not allow a guest resource to live on a Pacemaker Remote node,
1578  * to avoid nesting remotes. However, allow bundles to run on remote
1579  * nodes.
1580  */
1581  if (is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
1582  rsc_avoids_remote_nodes(rsc->container);
1583  }
1584 
1585  /* If someone cleans up a guest or bundle node's container, we will
1586  * likely schedule a (re-)probe of the container and recovery of the
1587  * connection. Order the connection stop after the container probe,
1588  * so that if we detect the container running, we will trigger a new
1589  * transition and avoid the unnecessary recovery.
1590  */
1592  pe_order_optional, data_set);
1593 
1594  /* A user can specify that a resource must start on a Pacemaker Remote
1595  * node by explicitly configuring it with the container=NODENAME
1596  * meta-attribute. This is of questionable merit, since location
1597  * constraints can accomplish the same thing. But we support it, so here
1598  * we check whether a resource (that is not itself a remote connection)
1599  * has container set to a remote node or guest node resource.
1600  */
1601  } else if (rsc->container->is_remote_node) {
1602  remote_rsc = rsc->container;
1603  } else {
1604  remote_rsc = pe__resource_contains_guest_node(data_set,
1605  rsc->container);
1606  }
1607 
1608  if (remote_rsc) {
1609  /* Force the resource on the Pacemaker Remote node instead of
1610  * colocating the resource with the container resource.
1611  */
1612  for (GList *item = allowed_nodes; item; item = item->next) {
1613  pe_node_t *node = item->data;
1614 
1615  if (node->details->remote_rsc != remote_rsc) {
1616  node->weight = -INFINITY;
1617  }
1618  }
1619 
1620  } else {
1621  /* This resource is either a filler for a container that does NOT
1622  * represent a Pacemaker Remote node, or a Pacemaker Remote
1623  * connection resource for a guest node or bundle.
1624  */
1625  int score;
1626 
1627  crm_trace("Order and colocate %s relative to its container %s",
1628  rsc->id, rsc->container->id);
1629 
1631  pcmk__op_key(rsc->container->id, RSC_START, 0),
1632  NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
1633  NULL,
1635  data_set);
1636 
1637  custom_action_order(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
1638  rsc->container,
1639  pcmk__op_key(rsc->container->id, RSC_STOP, 0),
1640  NULL, pe_order_implies_first, data_set);
1641 
1642  if (is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
1643  score = 10000; /* Highly preferred but not essential */
1644  } else {
1645  score = INFINITY; /* Force them to run on the same host */
1646  }
1647  rsc_colocation_new("resource-with-container", NULL, score, rsc,
1648  rsc->container, NULL, NULL, data_set);
1649  }
1650  }
1651 
1652  if (rsc->is_remote_node || is_set(rsc->flags, pe_rsc_fence_device)) {
1653  /* don't allow remote nodes to run stonith devices
1654  * or remote connection resources.*/
1655  rsc_avoids_remote_nodes(rsc);
1656  }
1657  g_list_free(allowed_nodes);
1658 }
1659 
1660 void
1662  rsc_colocation_t *constraint,
1663  pe_working_set_t *data_set)
1664 {
1665  if (rsc_lh == NULL) {
1666  pe_err("rsc_lh was NULL for %s", constraint->id);
1667  return;
1668 
1669  } else if (constraint->rsc_rh == NULL) {
1670  pe_err("rsc_rh was NULL for %s", constraint->id);
1671  return;
1672  }
1673 
1674  if (constraint->score == 0) {
1675  return;
1676  }
1677  pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
1678  rsc_rh->id);
1679 
1680  rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set);
1681 }
1682 
1685  rsc_colocation_t * constraint, gboolean preview)
1686 {
1687  if (constraint->score == 0) {
1688  return influence_nothing;
1689  }
1690 
1691  /* rh side must be allocated before we can process constraint */
1692  if (preview == FALSE && is_set(rsc_rh->flags, pe_rsc_provisional)) {
1693  return influence_nothing;
1694  }
1695 
1696  if ((constraint->role_lh >= RSC_ROLE_SLAVE) &&
1697  rsc_lh->parent && is_set(rsc_lh->parent->flags, pe_rsc_promotable)
1698  && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
1699 
1700  /* LH and RH resources have already been allocated, place the correct
1701  * priority on LH rsc for the given promotable clone resource role */
1702  return influence_rsc_priority;
1703  }
1704 
1705  if (preview == FALSE && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
1706  // Log an error if we violated a mandatory colocation constraint
1707  const pe_node_t *rh_node = rsc_rh->allocated_to;
1708 
1709  if (rsc_lh->allocated_to == NULL) {
1710  // Dependent resource isn't allocated, so constraint doesn't matter
1711  return influence_nothing;
1712  }
1713 
1714  if (constraint->score >= INFINITY) {
1715  // Dependent resource must colocate with rh_node
1716 
1717  if ((rh_node == NULL)
1718  || (rh_node->details != rsc_lh->allocated_to->details)) {
1719  crm_err("%s must be colocated with %s but is not (%s vs. %s)",
1720  rsc_lh->id, rsc_rh->id,
1721  rsc_lh->allocated_to->details->uname,
1722  (rh_node? rh_node->details->uname : "unallocated"));
1723  }
1724 
1725  } else if (constraint->score <= -INFINITY) {
1726  // Dependent resource must anti-colocate with rh_node
1727 
1728  if ((rh_node != NULL)
1729  && (rsc_lh->allocated_to->details == rh_node->details)) {
1730  crm_err("%s and %s must be anti-colocated but are allocated "
1731  "to the same node (%s)",
1732  rsc_lh->id, rsc_rh->id, rh_node->details->uname);
1733  }
1734  }
1735  return influence_nothing;
1736  }
1737 
1738  if (constraint->score > 0
1739  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
1740  crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
1741  role2text(constraint->role_lh), role2text(rsc_lh->next_role));
1742  return influence_nothing;
1743  }
1744 
1745  if (constraint->score > 0
1746  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
1747  crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
1748  return influence_nothing;
1749  }
1750 
1751  if (constraint->score < 0
1752  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
1753  crm_trace("LH: Skipping negative constraint: \"%s\" state filter",
1754  role2text(constraint->role_lh));
1755  return influence_nothing;
1756  }
1757 
1758  if (constraint->score < 0
1759  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
1760  crm_trace("RH: Skipping negative constraint: \"%s\" state filter",
1761  role2text(constraint->role_rh));
1762  return influence_nothing;
1763  }
1764 
1765  return influence_rsc_location;
1766 }
1767 
1768 static void
1769 influence_priority(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocation_t * constraint)
1770 {
1771  const char *rh_value = NULL;
1772  const char *lh_value = NULL;
1773  const char *attribute = CRM_ATTR_ID;
1774  int score_multiplier = 1;
1775 
1776  if (constraint->score == 0) {
1777  return;
1778  }
1779  if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
1780  return;
1781  }
1782 
1783  if (constraint->node_attribute != NULL) {
1784  attribute = constraint->node_attribute;
1785  }
1786 
1787  lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute);
1788  rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1789 
1790  if (!safe_str_eq(lh_value, rh_value)) {
1791  if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) {
1792  rsc_lh->priority = -INFINITY;
1793  }
1794  return;
1795  }
1796 
1797  if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
1798  return;
1799  }
1800 
1801  if (constraint->role_lh == RSC_ROLE_SLAVE) {
1802  score_multiplier = -1;
1803  }
1804 
1805  rsc_lh->priority = pe__add_scores(score_multiplier * constraint->score,
1806  rsc_lh->priority);
1807 }
1808 
1809 static void
1810 colocation_match(pe_resource_t * rsc_lh, pe_resource_t * rsc_rh, rsc_colocation_t * constraint)
1811 {
1812  const char *attribute = CRM_ATTR_ID;
1813  const char *value = NULL;
1814  GHashTable *work = NULL;
1815  GHashTableIter iter;
1816  pe_node_t *node = NULL;
1817 
1818  if (constraint->score == 0) {
1819  return;
1820  }
1821  if (constraint->node_attribute != NULL) {
1822  attribute = constraint->node_attribute;
1823  }
1824 
1825  if (rsc_rh->allocated_to) {
1826  value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1827 
1828  } else if (constraint->score < 0) {
1829  // Nothing to do (anti-colocation with something that is not running)
1830  return;
1831  }
1832 
1833  work = pcmk__copy_node_table(rsc_lh->allowed_nodes);
1834 
1835  g_hash_table_iter_init(&iter, work);
1836  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1837  if (rsc_rh->allocated_to == NULL) {
1838  pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s inactive)",
1839  constraint->id, rsc_lh->id, node->details->uname,
1840  constraint->score, rsc_rh->id);
1841  node->weight = pe__add_scores(-constraint->score, node->weight);
1842 
1843  } else if (safe_str_eq(pe_node_attribute_raw(node, attribute), value)) {
1844  if (constraint->score < CRM_SCORE_INFINITY) {
1845  pe_rsc_trace(rsc_lh, "%s: %s@%s += %d",
1846  constraint->id, rsc_lh->id,
1847  node->details->uname, constraint->score);
1848  node->weight = pe__add_scores(constraint->score, node->weight);
1849  }
1850 
1851  } else if (constraint->score >= CRM_SCORE_INFINITY) {
1852  pe_rsc_trace(rsc_lh, "%s: %s@%s -= %d (%s mismatch)",
1853  constraint->id, rsc_lh->id, node->details->uname,
1854  constraint->score, attribute);
1855  node->weight = pe__add_scores(-constraint->score, node->weight);
1856  }
1857  }
1858 
1859  if (can_run_any(work)
1860  || constraint->score <= -INFINITY || constraint->score >= INFINITY) {
1861  g_hash_table_destroy(rsc_lh->allowed_nodes);
1862  rsc_lh->allowed_nodes = work;
1863  work = NULL;
1864 
1865  } else {
1866  pe_rsc_info(rsc_lh,
1867  "%s: Rolling back scores from %s (no available nodes)",
1868  rsc_lh->id, rsc_rh->id);
1869  }
1870 
1871  if (work) {
1872  g_hash_table_destroy(work);
1873  }
1874 }
1875 
1876 void
1878  rsc_colocation_t *constraint,
1879  pe_working_set_t *data_set)
1880 {
1881  enum filter_colocation_res filter_results;
1882 
1883  CRM_ASSERT(rsc_lh);
1884  CRM_ASSERT(rsc_rh);
1885  filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
1886  pe_rsc_trace(rsc_lh, "%s %s with %s (%s, score=%d, filter=%d)",
1887  ((constraint->score >= 0)? "Colocating" : "Anti-colocating"),
1888  rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
1889 
1890  switch (filter_results) {
1892  influence_priority(rsc_lh, rsc_rh, constraint);
1893  break;
1895  colocation_match(rsc_lh, rsc_rh, constraint);
1896  break;
1897  case influence_nothing:
1898  default:
1899  return;
1900  }
1901 }
1902 
1903 static gboolean
1904 filter_rsc_ticket(pe_resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
1905 {
1906  if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
1907  pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
1908  role2text(rsc_ticket->role_lh));
1909  return FALSE;
1910  }
1911 
1912  return TRUE;
1913 }
1914 
1915 void
1917 {
1918  if (rsc_ticket == NULL) {
1919  pe_err("rsc_ticket was NULL");
1920  return;
1921  }
1922 
1923  if (rsc_lh == NULL) {
1924  pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
1925  return;
1926  }
1927 
1928  if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
1929  return;
1930  }
1931 
1932  if (rsc_lh->children) {
1933  GListPtr gIter = rsc_lh->children;
1934 
1935  pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
1936 
1937  for (; gIter != NULL; gIter = gIter->next) {
1938  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
1939 
1940  rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
1941  }
1942  return;
1943  }
1944 
1945  pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
1946  rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
1947  role2text(rsc_ticket->role_lh));
1948 
1949  if ((rsc_ticket->ticket->granted == FALSE)
1950  && (rsc_lh->running_on != NULL)) {
1951 
1952  GListPtr gIter = NULL;
1953 
1954  switch (rsc_ticket->loss_policy) {
1955  case loss_ticket_stop:
1956  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1957  break;
1958 
1959  case loss_ticket_demote:
1960  // Promotion score will be set to -INFINITY in promotion_order()
1961  if (rsc_ticket->role_lh != RSC_ROLE_MASTER) {
1962  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1963  }
1964  break;
1965 
1966  case loss_ticket_fence:
1967  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
1968  return;
1969  }
1970 
1971  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1972 
1973  for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
1974  pe_node_t *node = (pe_node_t *) gIter->data;
1975 
1976  pe_fence_node(data_set, node, "deadman ticket was lost", FALSE);
1977  }
1978  break;
1979 
1980  case loss_ticket_freeze:
1981  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
1982  return;
1983  }
1984  if (rsc_lh->running_on != NULL) {
1985  clear_bit(rsc_lh->flags, pe_rsc_managed);
1986  set_bit(rsc_lh->flags, pe_rsc_block);
1987  }
1988  break;
1989  }
1990 
1991  } else if (rsc_ticket->ticket->granted == FALSE) {
1992 
1993  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
1994  resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
1995  }
1996 
1997  } else if (rsc_ticket->ticket->standby) {
1998 
1999  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
2000  resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
2001  }
2002  }
2003 }
2004 
2005 enum pe_action_flags
2007 {
2008  return action->flags;
2009 }
2010 
2011 static inline bool
2012 is_primitive_action(pe_action_t *action)
2013 {
2014  return action && action->rsc && (action->rsc->variant == pe_native);
2015 }
2016 
2028 static void
2029 handle_restart_ordering(pe_action_t *first, pe_action_t *then,
2030  enum pe_action_flags filter)
2031 {
2032  const char *reason = NULL;
2033 
2034  CRM_ASSERT(is_primitive_action(first));
2035  CRM_ASSERT(is_primitive_action(then));
2036 
2037  // We need to update the action in two cases:
2038 
2039  // ... if 'then' is required
2040  if (is_set(filter, pe_action_optional)
2041  && is_not_set(then->flags, pe_action_optional)) {
2042  reason = "restart";
2043  }
2044 
2045  /* ... if 'then' is unrunnable start of managed resource (if a resource
2046  * should restart but can't start, we still want to stop)
2047  */
2048  if (is_set(filter, pe_action_runnable)
2049  && is_not_set(then->flags, pe_action_runnable)
2050  && is_set(then->rsc->flags, pe_rsc_managed)
2051  && safe_str_eq(then->task, RSC_START)) {
2052  reason = "stop";
2053  }
2054 
2055  if (reason == NULL) {
2056  return;
2057  }
2058 
2059  pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
2060  first->uuid, then->uuid, reason);
2061 
2062  // Make 'first' required if it is runnable
2063  if (is_set(first->flags, pe_action_runnable)) {
2064  pe_action_implies(first, then, pe_action_optional);
2065  }
2066 
2067  // Make 'first' required if 'then' is required
2068  if (is_not_set(then->flags, pe_action_optional)) {
2069  pe_action_implies(first, then, pe_action_optional);
2070  }
2071 
2072  // Make 'first' unmigratable if 'then' is unmigratable
2073  if (is_not_set(then->flags, pe_action_migrate_runnable)) {
2075  }
2076 
2077  // Make 'then' unrunnable if 'first' is required but unrunnable
2078  if (is_not_set(first->flags, pe_action_optional)
2079  && is_not_set(first->flags, pe_action_runnable)) {
2080  pe_action_implies(then, first, pe_action_runnable);
2081  }
2082 }
2083 
2084 enum pe_graph_flags
2085 native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node,
2086  enum pe_action_flags flags, enum pe_action_flags filter,
2087  enum pe_ordering type, pe_working_set_t *data_set)
2088 {
2089  /* flags == get_action_flags(first, then_node) called from update_action() */
2090  enum pe_graph_flags changed = pe_graph_none;
2091  enum pe_action_flags then_flags = then->flags;
2092  enum pe_action_flags first_flags = first->flags;
2093 
2094  crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
2095  first->uuid, first->node ? first->node->details->uname : "[none]",
2096  first->flags, then->uuid, then->flags);
2097 
2098  if (type & pe_order_asymmetrical) {
2099  pe_resource_t *then_rsc = then->rsc;
2100  enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
2101 
2102  if (!then_rsc) {
2103  /* ignore */
2104  } else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) {
2105  /* ignore... if 'then' is supposed to be stopped after 'first', but
2106  * then is already stopped, there is nothing to be done when non-symmetrical. */
2107  } else if ((then_rsc_role >= RSC_ROLE_STARTED)
2108  && safe_str_eq(then->task, RSC_START)
2109  && is_set(then->flags, pe_action_optional)
2110  && then->node
2111  && pcmk__list_of_1(then_rsc->running_on)
2112  && then->node->details == ((pe_node_t *) then_rsc->running_on->data)->details) {
2113  /* Ignore. If 'then' is supposed to be started after 'first', but
2114  * 'then' is already started, there is nothing to be done when
2115  * asymmetrical -- unless the start is mandatory, which indicates
2116  * the resource is restarting, and the ordering is still needed.
2117  */
2118  } else if (!(first->flags & pe_action_runnable)) {
2119  /* prevent 'then' action from happening if 'first' is not runnable and
2120  * 'then' has not yet occurred. */
2121  pe_action_implies(then, first, pe_action_optional);
2122  pe_action_implies(then, first, pe_action_runnable);
2123 
2124  pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
2125  } else {
2126  /* ignore... then is allowed to start/stop if it wants to. */
2127  }
2128  }
2129 
2130  if (type & pe_order_implies_first) {
2131  if (is_set(filter, pe_action_optional) && is_not_set(flags /* Should be then_flags? */, pe_action_optional)) {
2132  // Needs is_set(first_flags, pe_action_optional) too?
2133  pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2134  pe_action_implies(first, then, pe_action_optional);
2135  }
2136 
2137  if (is_set(flags, pe_action_migrate_runnable) &&
2138  is_set(then->flags, pe_action_migrate_runnable) == FALSE &&
2139  is_set(then->flags, pe_action_optional) == FALSE) {
2140 
2141  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
2142  first->uuid, then->uuid);
2144  }
2145  }
2146 
2147  if (type & pe_order_implies_first_master) {
2148  if ((filter & pe_action_optional) &&
2149  ((then->flags & pe_action_optional) == FALSE) &&
2150  then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) {
2151  pe_action_implies(first, then, pe_action_optional);
2152 
2153  if (is_set(first->flags, pe_action_migrate_runnable) &&
2154  is_set(then->flags, pe_action_migrate_runnable) == FALSE) {
2155 
2156  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
2158  }
2159  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2160  }
2161  }
2162 
2164  && is_set(filter, pe_action_optional)) {
2165 
2166  if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
2167  ((then->flags & pe_action_runnable) == FALSE)) {
2168 
2169  pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
2170  pe_action_implies(first, then, pe_action_runnable);
2171  }
2172 
2173  if ((then->flags & pe_action_optional) == 0) {
2174  pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
2175  pe_action_implies(first, then, pe_action_optional);
2176  }
2177  }
2178 
2179  if ((type & pe_order_pseudo_left)
2180  && is_set(filter, pe_action_optional)) {
2181 
2182  if ((first->flags & pe_action_runnable) == FALSE) {
2185  pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
2186  }
2187 
2188  }
2189 
2190  if (is_set(type, pe_order_runnable_left)
2191  && is_set(filter, pe_action_runnable)
2192  && is_set(then->flags, pe_action_runnable)
2193  && is_set(flags, pe_action_runnable) == FALSE) {
2194  pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
2195  pe_action_implies(then, first, pe_action_runnable);
2197  }
2198 
2199  if (is_set(type, pe_order_implies_then)
2200  && is_set(filter, pe_action_optional)
2201  && is_set(then->flags, pe_action_optional)
2202  && is_set(flags, pe_action_optional) == FALSE) {
2203 
2204  /* in this case, treat migrate_runnable as if first is optional */
2205  if (is_set(first->flags, pe_action_migrate_runnable) == FALSE) {
2206  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid);
2207  pe_action_implies(then, first, pe_action_optional);
2208  }
2209  }
2210 
2211  if (is_set(type, pe_order_restart)) {
2212  handle_restart_ordering(first, then, filter);
2213  }
2214 
2215  if (then_flags != then->flags) {
2216  changed |= pe_graph_updated_then;
2217  pe_rsc_trace(then->rsc,
2218  "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2219  then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
2220  then_flags, first->uuid, first->flags);
2221 
2222  if(then->rsc && then->rsc->parent) {
2223  /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
2224  update_action(then, data_set);
2225  }
2226  }
2227 
2228  if (first_flags != first->flags) {
2229  changed |= pe_graph_updated_first;
2230  pe_rsc_trace(first->rsc,
2231  "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2232  first->uuid, first->node ? first->node->details->uname : "[none]",
2233  first->flags, first_flags, then->uuid, then->flags);
2234  }
2235 
2236  return changed;
2237 }
2238 
2239 void
2241 {
2242  GListPtr gIter = NULL;
2243  GHashTableIter iter;
2244  pe_node_t *node = NULL;
2245 
2246  if (constraint == NULL) {
2247  pe_err("Constraint is NULL");
2248  return;
2249 
2250  } else if (rsc == NULL) {
2251  pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id);
2252  return;
2253  }
2254 
2255  pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id,
2256  role2text(constraint->role_filter), rsc->id);
2257 
2258  /* take "lifetime" into account */
2259  if (constraint->role_filter > RSC_ROLE_UNKNOWN && constraint->role_filter != rsc->next_role) {
2260  pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s vs. %s)",
2261  constraint->id, role2text(constraint->role_filter), role2text(rsc->next_role));
2262  return;
2263  }
2264 
2265  if (constraint->node_list_rh == NULL) {
2266  pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id);
2267  return;
2268  }
2269 
2270  for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
2271  pe_node_t *node = (pe_node_t *) gIter->data;
2272  pe_node_t *other_node = NULL;
2273 
2274  other_node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2275 
2276  if (other_node != NULL) {
2277  pe_rsc_trace(rsc, "%s + %s: %d + %d",
2278  node->details->uname,
2279  other_node->details->uname, node->weight, other_node->weight);
2280  other_node->weight = pe__add_scores(other_node->weight,
2281  node->weight);
2282 
2283  } else {
2284  other_node = pe__copy_node(node);
2285 
2286  pe_rsc_trace(rsc, "%s: %d (insert %d)", other_node->details->uname, other_node->weight, constraint->discover_mode);
2287  g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
2288  }
2289 
2290  if (other_node->rsc_discover_mode < constraint->discover_mode) {
2291  if (constraint->discover_mode == pe_discover_exclusive) {
2292  rsc->exclusive_discover = TRUE;
2293  }
2294  /* exclusive > never > always... always is default */
2295  other_node->rsc_discover_mode = constraint->discover_mode;
2296  }
2297  }
2298 
2299  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
2300  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
2301  pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight);
2302  }
2303 }
2304 
2305 void
2307 {
2308  GListPtr gIter = NULL;
2309 
2310  CRM_ASSERT(rsc);
2311  pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
2312 
2313  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2314  pe_action_t *action = (pe_action_t *) gIter->data;
2315 
2316  crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
2317  graph_element_from_action(action, data_set);
2318  }
2319 
2320  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2321  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2322 
2323  child_rsc->cmds->expand(child_rsc, data_set);
2324  }
2325 }
2326 
2327 #define log_change(a, fmt, args...) do { \
2328  if(a && a->reason && terminal) { \
2329  printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \
2330  } else if(a && a->reason) { \
2331  crm_notice(fmt" \tdue to %s", ##args, a->reason); \
2332  } else if(terminal) { \
2333  printf(" * "fmt"\n", ##args); \
2334  } else { \
2335  crm_notice(fmt, ##args); \
2336  } \
2337  } while(0)
2338 
2339 #define STOP_SANITY_ASSERT(lineno) do { \
2340  if(current && current->details->unclean) { \
2341  /* It will be a pseudo op */ \
2342  } else if(stop == NULL) { \
2343  crm_err("%s:%d: No stop action exists for %s", __FUNCTION__, lineno, rsc->id); \
2344  CRM_ASSERT(stop != NULL); \
2345  } else if(is_set(stop->flags, pe_action_optional)) { \
2346  crm_err("%s:%d: Action %s is still optional", __FUNCTION__, lineno, stop->uuid); \
2347  CRM_ASSERT(is_not_set(stop->flags, pe_action_optional)); \
2348  } \
2349  } while(0)
2350 
2351 static int rsc_width = 5;
2352 static int detail_width = 5;
2353 static void
2354 LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal)
2355 {
2356  int len = 0;
2357  char *reason = NULL;
2358  char *details = NULL;
2359  bool same_host = FALSE;
2360  bool same_role = FALSE;
2361  bool need_role = FALSE;
2362 
2363  CRM_ASSERT(action);
2364  CRM_ASSERT(destination != NULL || origin != NULL);
2365 
2366  if(source == NULL) {
2367  source = action;
2368  }
2369 
2370  len = strlen(rsc->id);
2371  if(len > rsc_width) {
2372  rsc_width = len + 2;
2373  }
2374 
2375  if(rsc->role > RSC_ROLE_STARTED || rsc->next_role > RSC_ROLE_SLAVE) {
2376  need_role = TRUE;
2377  }
2378 
2379  if(origin != NULL && destination != NULL && origin->details == destination->details) {
2380  same_host = TRUE;
2381  }
2382 
2383  if(rsc->role == rsc->next_role) {
2384  same_role = TRUE;
2385  }
2386 
2387  if(need_role && origin == NULL) {
2388  /* Promoting from Stopped */
2389  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname);
2390 
2391  } else if(need_role && destination == NULL) {
2392  /* Demoting a Master or Stopping a Slave */
2393  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2394 
2395  } else if(origin == NULL || destination == NULL) {
2396  /* Starting or stopping a resource */
2397  details = crm_strdup_printf("%s", origin?origin->details->uname:destination->details->uname);
2398 
2399  } else if(need_role && same_role && same_host) {
2400  /* Recovering or restarting a promotable clone resource */
2401  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2402 
2403  } else if(same_role && same_host) {
2404  /* Recovering or Restarting a normal resource */
2405  details = crm_strdup_printf("%s", origin->details->uname);
2406 
2407  } else if(same_role && need_role) {
2408  /* Moving a promotable clone resource */
2409  details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role));
2410 
2411  } else if(same_role) {
2412  /* Moving a normal resource */
2413  details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname);
2414 
2415  } else if(same_host) {
2416  /* Promoting or demoting a promotable clone resource */
2417  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname);
2418 
2419  } else {
2420  /* Moving and promoting/demoting */
2421  details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), origin->details->uname, role2text(rsc->next_role), destination->details->uname);
2422  }
2423 
2424  len = strlen(details);
2425  if(len > detail_width) {
2426  detail_width = len;
2427  }
2428 
2429  if(source->reason && is_not_set(action->flags, pe_action_runnable)) {
2430  reason = crm_strdup_printf(" due to %s (blocked)", source->reason);
2431 
2432  } else if(source->reason) {
2433  reason = crm_strdup_printf(" due to %s", source->reason);
2434 
2435  } else if(is_not_set(action->flags, pe_action_runnable)) {
2436  reason = strdup(" blocked");
2437 
2438  } else {
2439  reason = strdup("");
2440  }
2441 
2442  if(terminal) {
2443  printf(" * %-8s %-*s ( %*s ) %s\n", change, rsc_width, rsc->id, detail_width, details, reason);
2444  } else {
2445  crm_notice(" * %-8s %-*s ( %*s ) %s", change, rsc_width, rsc->id, detail_width, details, reason);
2446  }
2447 
2448  free(details);
2449  free(reason);
2450 }
2451 
2452 
2453 void
2454 LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal)
2455 {
2456  pe_node_t *next = NULL;
2457  pe_node_t *current = NULL;
2458  pe_node_t *start_node = NULL;
2459 
2460  pe_action_t *stop = NULL;
2461  pe_action_t *start = NULL;
2462  pe_action_t *demote = NULL;
2463  pe_action_t *promote = NULL;
2464 
2465  char *key = NULL;
2466  gboolean moving = FALSE;
2467  GListPtr possible_matches = NULL;
2468 
2469  if(rsc->variant == pe_container) {
2470  pcmk__bundle_log_actions(rsc, data_set, terminal);
2471  return;
2472  }
2473 
2474  if (rsc->children) {
2475  GListPtr gIter = NULL;
2476 
2477  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2478  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2479 
2480  LogActions(child_rsc, data_set, terminal);
2481  }
2482  return;
2483  }
2484 
2485  next = rsc->allocated_to;
2486  if (rsc->running_on) {
2487  current = pe__current_node(rsc);
2488  if (rsc->role == RSC_ROLE_STOPPED) {
2489  /*
2490  * This can occur when resources are being recovered
2491  * We fiddle with the current role in native_create_actions()
2492  */
2493  rsc->role = RSC_ROLE_STARTED;
2494  }
2495  }
2496 
2497  if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) {
2498  /* Don't log stopped orphans */
2499  return;
2500  }
2501 
2502  if (is_not_set(rsc->flags, pe_rsc_managed)
2503  || (current == NULL && next == NULL)) {
2504  pe_rsc_info(rsc, "Leave %s\t(%s%s)",
2505  rsc->id, role2text(rsc->role), is_not_set(rsc->flags,
2506  pe_rsc_managed) ? " unmanaged" : "");
2507  return;
2508  }
2509 
2510  if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) {
2511  moving = TRUE;
2512  }
2513 
2514  possible_matches = pe__resource_actions(rsc, next, RSC_START, FALSE);
2515  if (possible_matches) {
2516  start = possible_matches->data;
2517  g_list_free(possible_matches);
2518  }
2519 
2520  if ((start == NULL) || is_not_set(start->flags, pe_action_runnable)) {
2521  start_node = NULL;
2522  } else {
2523  start_node = current;
2524  }
2525  possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, FALSE);
2526  if (possible_matches) {
2527  stop = possible_matches->data;
2528  g_list_free(possible_matches);
2529  }
2530 
2531  possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, FALSE);
2532  if (possible_matches) {
2533  promote = possible_matches->data;
2534  g_list_free(possible_matches);
2535  }
2536 
2537  possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, FALSE);
2538  if (possible_matches) {
2539  demote = possible_matches->data;
2540  g_list_free(possible_matches);
2541  }
2542 
2543  if (rsc->role == rsc->next_role) {
2544  pe_action_t *migrate_op = NULL;
2545 
2546  possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, FALSE);
2547  if (possible_matches) {
2548  migrate_op = possible_matches->data;
2549  }
2550 
2551  CRM_CHECK(next != NULL,);
2552  if (next == NULL) {
2553  } else if (migrate_op && is_set(migrate_op->flags, pe_action_runnable) && current) {
2554  LogAction("Migrate", rsc, current, next, start, NULL, terminal);
2555 
2556  } else if (is_set(rsc->flags, pe_rsc_reload)) {
2557  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2558 
2559  } else if (start == NULL || is_set(start->flags, pe_action_optional)) {
2560  pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role),
2561  next->details->uname);
2562 
2563  } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) {
2564  LogAction("Stop", rsc, current, NULL, stop,
2565  (stop && stop->reason)? stop : start, terminal);
2566  STOP_SANITY_ASSERT(__LINE__);
2567 
2568  } else if (moving && current) {
2569  LogAction(is_set(rsc->flags, pe_rsc_failed) ? "Recover" : "Move",
2570  rsc, current, next, stop, NULL, terminal);
2571 
2572  } else if (is_set(rsc->flags, pe_rsc_failed)) {
2573  LogAction("Recover", rsc, current, NULL, stop, NULL, terminal);
2574  STOP_SANITY_ASSERT(__LINE__);
2575 
2576  } else {
2577  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2578  /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */
2579  }
2580 
2581  g_list_free(possible_matches);
2582  return;
2583  }
2584 
2585  if(stop
2586  && (rsc->next_role == RSC_ROLE_STOPPED
2587  || (start && is_not_set(start->flags, pe_action_runnable)))) {
2588 
2589  GListPtr gIter = NULL;
2590 
2591  key = stop_key(rsc);
2592  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2593  pe_node_t *node = (pe_node_t *) gIter->data;
2594  pe_action_t *stop_op = NULL;
2595 
2596  possible_matches = find_actions(rsc->actions, key, node);
2597  if (possible_matches) {
2598  stop_op = possible_matches->data;
2599  g_list_free(possible_matches);
2600  }
2601 
2602  if (stop_op && (stop_op->flags & pe_action_runnable)) {
2603  STOP_SANITY_ASSERT(__LINE__);
2604  }
2605 
2606  LogAction("Stop", rsc, node, NULL, stop_op,
2607  (stop_op && stop_op->reason)? stop_op : start, terminal);
2608  }
2609 
2610  free(key);
2611 
2612  } else if (stop && is_set(rsc->flags, pe_rsc_failed)) {
2613  /* 'stop' may be NULL if the failure was ignored */
2614  LogAction("Recover", rsc, current, next, stop, start, terminal);
2615  STOP_SANITY_ASSERT(__LINE__);
2616 
2617  } else if (moving) {
2618  LogAction("Move", rsc, current, next, stop, NULL, terminal);
2619  STOP_SANITY_ASSERT(__LINE__);
2620 
2621  } else if (is_set(rsc->flags, pe_rsc_reload)) {
2622  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2623 
2624  } else if (stop != NULL && is_not_set(stop->flags, pe_action_optional)) {
2625  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2626  STOP_SANITY_ASSERT(__LINE__);
2627 
2628  } else if (rsc->role == RSC_ROLE_MASTER) {
2629  CRM_LOG_ASSERT(current != NULL);
2630  LogAction("Demote", rsc, current, next, demote, NULL, terminal);
2631 
2632  } else if(rsc->next_role == RSC_ROLE_MASTER) {
2633  CRM_LOG_ASSERT(next);
2634  LogAction("Promote", rsc, current, next, promote, NULL, terminal);
2635 
2636  } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) {
2637  LogAction("Start", rsc, current, next, start, NULL, terminal);
2638  }
2639 }
2640 
2641 gboolean
2642 StopRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2643 {
2644  GListPtr gIter = NULL;
2645 
2646  CRM_ASSERT(rsc);
2647  pe_rsc_trace(rsc, "%s", rsc->id);
2648 
2649  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2650  pe_node_t *current = (pe_node_t *) gIter->data;
2651  pe_action_t *stop;
2652 
2653  if (rsc->partial_migration_target) {
2654  if (rsc->partial_migration_target->details == current->details) {
2655  pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
2656  next->details->uname, rsc->id);
2657  continue;
2658  } else {
2659  pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
2660  optional = FALSE;
2661  }
2662  }
2663 
2664  pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
2665  stop = stop_action(rsc, current, optional);
2666 
2667  if(rsc->allocated_to == NULL) {
2668  pe_action_set_reason(stop, "node availability", TRUE);
2669  }
2670 
2671  if (is_not_set(rsc->flags, pe_rsc_managed)) {
2672  update_action_flags(stop, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
2673  }
2674 
2675  if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
2676  DeleteRsc(rsc, current, optional, data_set);
2677  }
2678 
2679  if(is_set(rsc->flags, pe_rsc_needs_unfencing)) {
2680  pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE, data_set);
2681 
2682  order_actions(stop, unfence, pe_order_implies_first);
2683  if (!node_has_been_unfenced(current)) {
2684  pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
2685  }
2686  }
2687  }
2688 
2689  return TRUE;
2690 }
2691 
2692 static void
2693 order_after_unfencing(pe_resource_t *rsc, pe_node_t *node, pe_action_t *action,
2694  enum pe_ordering order, pe_working_set_t *data_set)
2695 {
2696  /* When unfencing is in use, we order unfence actions before any probe or
2697  * start of resources that require unfencing, and also of fence devices.
2698  *
2699  * This might seem to violate the principle that fence devices require
2700  * only quorum. However, fence agents that unfence often don't have enough
2701  * information to even probe or start unless the node is first unfenced.
2702  */
2703  if (is_unfence_device(rsc, data_set)
2704  || is_set(rsc->flags, pe_rsc_needs_unfencing)) {
2705 
2706  /* Start with an optional ordering. Requiring unfencing would result in
2707  * the node being unfenced, and all its resources being stopped,
2708  * whenever a new resource is added -- which would be highly suboptimal.
2709  */
2710  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE, data_set);
2711 
2712  order_actions(unfence, action, order);
2713 
2714  if (!node_has_been_unfenced(node)) {
2715  // But unfencing is required if it has never been done
2716  char *reason = crm_strdup_printf("required by %s %s",
2717  rsc->id, action->task);
2718 
2719  trigger_unfencing(NULL, node, reason, NULL, data_set);
2720  free(reason);
2721  }
2722  }
2723 }
2724 
2725 gboolean
2726 StartRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2727 {
2728  pe_action_t *start = NULL;
2729 
2730  CRM_ASSERT(rsc);
2731  pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
2732  start = start_action(rsc, next, TRUE);
2733 
2734  order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set);
2735 
2736  if (is_set(start->flags, pe_action_runnable) && optional == FALSE) {
2737  update_action_flags(start, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
2738  }
2739 
2740 
2741  return TRUE;
2742 }
2743 
2744 gboolean
2745 PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2746 {
2747  GListPtr gIter = NULL;
2748  gboolean runnable = TRUE;
2749  GListPtr action_list = NULL;
2750 
2751  CRM_ASSERT(rsc);
2752  CRM_CHECK(next != NULL, return FALSE);
2753  pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
2754 
2755  action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
2756 
2757  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2758  pe_action_t *start = (pe_action_t *) gIter->data;
2759 
2760  if (is_set(start->flags, pe_action_runnable) == FALSE) {
2761  runnable = FALSE;
2762  }
2763  }
2764  g_list_free(action_list);
2765 
2766  if (runnable) {
2767  promote_action(rsc, next, optional);
2768  return TRUE;
2769  }
2770 
2771  pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
2772 
2773  action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
2774 
2775  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2776  pe_action_t *promote = (pe_action_t *) gIter->data;
2777 
2778  update_action_flags(promote, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
2779  }
2780 
2781  g_list_free(action_list);
2782  return TRUE;
2783 }
2784 
2785 gboolean
2786 DemoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2787 {
2788  GListPtr gIter = NULL;
2789 
2790  CRM_ASSERT(rsc);
2791  pe_rsc_trace(rsc, "%s", rsc->id);
2792 
2793 /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */
2794  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2795  pe_node_t *current = (pe_node_t *) gIter->data;
2796 
2797  pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
2798  demote_action(rsc, current, optional);
2799  }
2800  return TRUE;
2801 }
2802 
2803 gboolean
2804 RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2805 {
2806  CRM_ASSERT(rsc);
2807  crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
2808  CRM_CHECK(FALSE, return FALSE);
2809  return FALSE;
2810 }
2811 
2812 gboolean
2813 NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set)
2814 {
2815  CRM_ASSERT(rsc);
2816  pe_rsc_trace(rsc, "%s", rsc->id);
2817  return FALSE;
2818 }
2819 
2820 gboolean
2821 DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
2822 {
2823  if (is_set(rsc->flags, pe_rsc_failed)) {
2824  pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
2825  return FALSE;
2826 
2827  } else if (node == NULL) {
2828  pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
2829  return FALSE;
2830 
2831  } else if (node->details->unclean || node->details->online == FALSE) {
2832  pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
2833  node->details->uname);
2834  return FALSE;
2835  }
2836 
2837  crm_notice("Removing %s from %s", rsc->id, node->details->uname);
2838 
2839  delete_action(rsc, node, optional);
2840 
2841  new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
2842  optional ? pe_order_implies_then : pe_order_optional, data_set);
2843 
2844  new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
2845  optional ? pe_order_implies_then : pe_order_optional, data_set);
2846 
2847  return TRUE;
2848 }
2849 
2850 gboolean
2851 native_create_probe(pe_resource_t * rsc, pe_node_t * node, pe_action_t * complete,
2852  gboolean force, pe_working_set_t * data_set)
2853 {
2855  char *key = NULL;
2856  pe_action_t *probe = NULL;
2857  pe_node_t *running = NULL;
2858  pe_node_t *allowed = NULL;
2859  pe_resource_t *top = uber_parent(rsc);
2860 
2861  static const char *rc_master = NULL;
2862  static const char *rc_inactive = NULL;
2863 
2864  if (rc_inactive == NULL) {
2865  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
2866  rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
2867  }
2868 
2869  CRM_CHECK(node != NULL, return FALSE);
2870  if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) {
2871  pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
2872  return FALSE;
2873  }
2874 
2875  if (pe__is_guest_or_remote_node(node)) {
2876  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2877 
2879  pe_rsc_trace(rsc,
2880  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
2881  rsc->id, node->details->id);
2882  return FALSE;
2883  } else if (pe__is_guest_node(node)
2884  && pe__resource_contains_guest_node(data_set, rsc)) {
2885  pe_rsc_trace(rsc,
2886  "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
2887  rsc->id, node->details->id);
2888  return FALSE;
2889  } else if (rsc->is_remote_node) {
2890  pe_rsc_trace(rsc,
2891  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
2892  rsc->id, node->details->id);
2893  return FALSE;
2894  }
2895  }
2896 
2897  if (rsc->children) {
2898  GListPtr gIter = NULL;
2899  gboolean any_created = FALSE;
2900 
2901  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2902  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
2903 
2904  any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
2905  || any_created;
2906  }
2907 
2908  return any_created;
2909 
2910  } else if ((rsc->container) && (!rsc->is_remote_node)) {
2911  pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
2912  return FALSE;
2913  }
2914 
2915  if (is_set(rsc->flags, pe_rsc_orphan)) {
2916  pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
2917  return FALSE;
2918  }
2919 
2920  // Check whether resource is already known on node
2921  if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
2922  pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
2923  return FALSE;
2924  }
2925 
2926  allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2927 
2928  if (rsc->exclusive_discover || top->exclusive_discover) {
2929  if (allowed == NULL) {
2930  /* exclusive discover is enabled and this node is not in the allowed list. */
2931  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
2932  return FALSE;
2933  } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
2934  /* exclusive discover is enabled and this node is not marked
2935  * as a node this resource should be discovered on */
2936  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
2937  return FALSE;
2938  }
2939  }
2940 
2941  if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
2942  /* If this node was allowed to host this resource it would
2943  * have been explicitly added to the 'allowed_nodes' list.
2944  * However it wasn't and the node has discovery disabled, so
2945  * no need to probe for this resource.
2946  */
2947  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
2948  return FALSE;
2949  }
2950 
2951  if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
2952  /* this resource is marked as not needing to be discovered on this node */
2953  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
2954  return FALSE;
2955  }
2956 
2957  if (pe__is_guest_node(node)) {
2958  pe_resource_t *remote = node->details->remote_rsc->container;
2959 
2960  if(remote->role == RSC_ROLE_STOPPED) {
2961  /* If the container is stopped, then we know anything that
2962  * might have been inside it is also stopped and there is
2963  * no need to probe.
2964  *
2965  * If we don't know the container's state on the target
2966  * either:
2967  *
2968  * - the container is running, the transition will abort
2969  * and we'll end up in a different case next time, or
2970  *
2971  * - the container is stopped
2972  *
2973  * Either way there is no need to probe.
2974  *
2975  */
2976  if(remote->allocated_to
2977  && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
2978  /* For safety, we order the 'rsc' start after 'remote'
2979  * has been probed.
2980  *
2981  * Using 'top' helps for groups, but we may need to
2982  * follow the start's ordering chain backwards.
2983  */
2984  custom_action_order(remote,
2985  pcmk__op_key(remote->id, RSC_STATUS, 0),
2986  NULL, top,
2987  pcmk__op_key(top->id, RSC_START, 0), NULL,
2988  pe_order_optional, data_set);
2989  }
2990  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
2991  rsc->id, node->details->id, remote->id);
2992  return FALSE;
2993 
2994  /* Here we really we want to check if remote->stop is required,
2995  * but that information doesn't exist yet
2996  */
2997  } else if(node->details->remote_requires_reset
2998  || node->details->unclean
2999  || is_set(remote->flags, pe_rsc_failed)
3000  || remote->next_role == RSC_ROLE_STOPPED
3001  || (remote->allocated_to
3002  && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
3003  ) {
3004  /* The container is stopping or restarting, don't start
3005  * 'rsc' until 'remote' stops as this also implies that
3006  * 'rsc' is stopped - avoiding the need to probe
3007  */
3008  custom_action_order(remote, pcmk__op_key(remote->id, RSC_STOP, 0),
3009  NULL, top, pcmk__op_key(top->id, RSC_START, 0),
3010  NULL, pe_order_optional, data_set);
3011  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
3012  rsc->id, node->details->id, remote->id);
3013  return FALSE;
3014 /* } else {
3015  * The container is running so there is no problem probing it
3016  */
3017  }
3018  }
3019 
3020  key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
3021  probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
3022  update_action_flags(probe, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
3023 
3024  order_after_unfencing(rsc, node, probe, pe_order_optional, data_set);
3025 
3026  /*
3027  * We need to know if it's running_on (not just known_on) this node
3028  * to correctly determine the target rc.
3029  */
3030  running = pe_find_node_id(rsc->running_on, node->details->id);
3031  if (running == NULL) {
3032  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
3033 
3034  } else if (rsc->role == RSC_ROLE_MASTER) {
3035  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master);
3036  }
3037 
3038  crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
3039  is_set(probe->flags, pe_action_runnable), rsc->running_on);
3040 
3041  if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
3042  top = rsc;
3043  } else {
3044  crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
3045  }
3046 
3047  if(is_not_set(probe->flags, pe_action_runnable) && rsc->running_on == NULL) {
3048  /* Prevent the start from occurring if rsc isn't active, but
3049  * don't cause it to stop if it was active already
3050  */
3051  flags |= pe_order_runnable_left;
3052  }
3053 
3054  custom_action_order(rsc, NULL, probe,
3055  top, pcmk__op_key(top->id, RSC_START, 0), NULL,
3056  flags, data_set);
3057 
3058  /* Before any reloads, if they exist */
3059  custom_action_order(rsc, NULL, probe,
3060  top, reload_key(rsc), NULL,
3061  pe_order_optional, data_set);
3062 
3063 #if 0
3064  // complete is always null currently
3065  if (!is_unfence_device(rsc, data_set)) {
3066  /* Normally rsc.start depends on probe complete which depends
3067  * on rsc.probe. But this can't be the case for fence devices
3068  * with unfencing, as it would create graph loops.
3069  *
3070  * So instead we explicitly order 'rsc.probe then rsc.start'
3071  */
3072  order_actions(probe, complete, pe_order_implies_then);
3073  }
3074 #endif
3075  return TRUE;
3076 }
3077 
3087 static bool
3088 rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
3089 {
3090  if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
3091  return TRUE;
3092 
3093  } else if ((rsc->variant == pe_native)
3094  && pe_rsc_is_anon_clone(rsc->parent)
3095  && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
3096  /* We check only the parent, not the uber-parent, because we cannot
3097  * assume that the resource is known if it is in an anonymously cloned
3098  * group (which may be only partially known).
3099  */
3100  return TRUE;
3101  }
3102  return FALSE;
3103 }
3104 
3113 static void
3114 native_start_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
3115 {
3116  pe_node_t *target;
3117  GListPtr gIter = NULL;
3118 
3119  CRM_CHECK(stonith_op && stonith_op->node, return);
3120  target = stonith_op->node;
3121 
3122  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
3123  pe_action_t *action = (pe_action_t *) gIter->data;
3124 
3125  switch (action->needs) {
3126  case rsc_req_nothing:
3127  // Anything other than start or promote requires nothing
3128  break;
3129 
3130  case rsc_req_stonith:
3131  order_actions(stonith_op, action, pe_order_optional);
3132  break;
3133 
3134  case rsc_req_quorum:
3135  if (safe_str_eq(action->task, RSC_START)
3136  && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
3137  && !rsc_is_known_on(rsc, target)) {
3138 
3139  /* If we don't know the status of the resource on the node
3140  * we're about to shoot, we have to assume it may be active
3141  * there. Order the resource start after the fencing. This
3142  * is analogous to waiting for all the probes for a resource
3143  * to complete before starting it.
3144  *
3145  * The most likely explanation is that the DC died and took
3146  * its status with it.
3147  */
3148  pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
3149  target->details->uname);
3150  order_actions(stonith_op, action,
3152  }
3153  break;
3154  }
3155  }
3156 }
3157 
3158 static void
3159 native_stop_constraints(pe_resource_t * rsc, pe_action_t * stonith_op, pe_working_set_t * data_set)
3160 {
3161  GListPtr gIter = NULL;
3162  GListPtr action_list = NULL;
3163  bool order_implicit = false;
3164 
3165  pe_resource_t *top = uber_parent(rsc);
3166  pe_action_t *parent_stop = NULL;
3167  pe_node_t *target;
3168 
3169  CRM_CHECK(stonith_op && stonith_op->node, return);
3170  target = stonith_op->node;
3171 
3172  /* Get a list of stop actions potentially implied by the fencing */
3173  action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
3174 
3175  /* If resource requires fencing, implicit actions must occur after fencing.
3176  *
3177  * Implied stops and demotes of resources running on guest nodes are always
3178  * ordered after fencing, even if the resource does not require fencing,
3179  * because guest node "fencing" is actually just a resource stop.
3180  */
3181  if (is_set(rsc->flags, pe_rsc_needs_fencing) || pe__is_guest_node(target)) {
3182  order_implicit = true;
3183  }
3184 
3185  if (action_list && order_implicit) {
3186  parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
3187  }
3188 
3189  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3190  pe_action_t *action = (pe_action_t *) gIter->data;
3191 
3192  // The stop would never complete, so convert it into a pseudo-action.
3194  __FUNCTION__, __LINE__);
3195 
3196  if (order_implicit) {
3198  __FUNCTION__, __LINE__);
3199 
3200  /* Order the stonith before the parent stop (if any).
3201  *
3202  * Also order the stonith before the resource stop, unless the
3203  * resource is inside a bundle -- that would cause a graph loop.
3204  * We can rely on the parent stop's ordering instead.
3205  *
3206  * User constraints must not order a resource in a guest node
3207  * relative to the guest node container resource. The
3208  * pe_order_preserve flag marks constraints as generated by the
3209  * cluster and thus immune to that check (and is irrelevant if
3210  * target is not a guest).
3211  */
3212  if (!pe_rsc_is_bundled(rsc)) {
3213  order_actions(stonith_op, action, pe_order_preserve);
3214  }
3215  order_actions(stonith_op, parent_stop, pe_order_preserve);
3216  }
3217 
3218  if (is_set(rsc->flags, pe_rsc_failed)) {
3219  crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
3220  rsc->id, (order_implicit? "after" : "because"),
3221  target->details->uname);
3222  } else {
3223  crm_info("%s is implicit %s %s is fenced",
3224  action->uuid, (order_implicit? "after" : "because"),
3225  target->details->uname);
3226  }
3227 
3228  if (is_set(rsc->flags, pe_rsc_notify)) {
3229  /* Create a second notification that will be delivered
3230  * immediately after the node is fenced
3231  *
3232  * Basic problem:
3233  * - C is a clone active on the node to be shot and stopping on another
3234  * - R is a resource that depends on C
3235  *
3236  * + C.stop depends on R.stop
3237  * + C.stopped depends on STONITH
3238  * + C.notify depends on C.stopped
3239  * + C.healthy depends on C.notify
3240  * + R.stop depends on C.healthy
3241  *
3242  * The extra notification here changes
3243  * + C.healthy depends on C.notify
3244  * into:
3245  * + C.healthy depends on C.notify'
3246  * + C.notify' depends on STONITH'
3247  * thus breaking the loop
3248  */
3249  create_secondary_notification(action, rsc, stonith_op, data_set);
3250  }
3251 
3252 /* From Bug #1601, successful fencing must be an input to a failed resources stop action.
3253 
3254  However given group(rA, rB) running on nodeX and B.stop has failed,
3255  A := stop healthy resource (rA.stop)
3256  B := stop failed resource (pseudo operation B.stop)
3257  C := stonith nodeX
3258  A requires B, B requires C, C requires A
3259  This loop would prevent the cluster from making progress.
3260 
3261  This block creates the "C requires A" dependency and therefore must (at least
3262  for now) be disabled.
3263 
3264  Instead, run the block above and treat all resources on nodeX as B would be
3265  (marked as a pseudo op depending on the STONITH).
3266 
3267  TODO: Break the "A requires B" dependency in update_action() and re-enable this block
3268 
3269  } else if(is_stonith == FALSE) {
3270  crm_info("Moving healthy resource %s"
3271  " off %s before fencing",
3272  rsc->id, node->details->uname);
3273 
3274  * stop healthy resources before the
3275  * stonith op
3276  *
3277  custom_action_order(
3278  rsc, stop_key(rsc), NULL,
3279  NULL,strdup(CRM_OP_FENCE),stonith_op,
3280  pe_order_optional, data_set);
3281 */
3282  }
3283 
3284  g_list_free(action_list);
3285 
3286  /* Get a list of demote actions potentially implied by the fencing */
3287  action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
3288 
3289  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3290  pe_action_t *action = (pe_action_t *) gIter->data;
3291 
3292  if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
3293  || is_set(rsc->flags, pe_rsc_failed)) {
3294 
3295  if (is_set(rsc->flags, pe_rsc_failed)) {
3296  pe_rsc_info(rsc,
3297  "Demote of failed resource %s is implicit after %s is fenced",
3298  rsc->id, target->details->uname);
3299  } else {
3300  pe_rsc_info(rsc, "%s is implicit after %s is fenced",
3301  action->uuid, target->details->uname);
3302  }
3303 
3304  /* The demote would never complete and is now implied by the
3305  * fencing, so convert it into a pseudo-action.
3306  */
3308  __FUNCTION__, __LINE__);
3309 
3310  if (pe_rsc_is_bundled(rsc)) {
3311  /* Do nothing, let the recovery be ordered after the parent's implied stop */
3312 
3313  } else if (order_implicit) {
3314  order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
3315  }
3316  }
3317  }
3318 
3319  g_list_free(action_list);
3320 }
3321 
3322 void
3324 {
3325  if (rsc->children) {
3326  GListPtr gIter = NULL;
3327 
3328  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3329  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
3330 
3331  rsc_stonith_ordering(child_rsc, stonith_op, data_set);
3332  }
3333 
3334  } else if (is_not_set(rsc->flags, pe_rsc_managed)) {
3335  pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
3336 
3337  } else {
3338  native_start_constraints(rsc, stonith_op, data_set);
3339  native_stop_constraints(rsc, stonith_op, data_set);
3340  }
3341 }
3342 
3343 void
3344 ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set)
3345 {
3346  GListPtr gIter = NULL;
3347  pe_action_t *reload = NULL;
3348 
3349  if (rsc->children) {
3350  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3351  pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
3352 
3353  ReloadRsc(child_rsc, node, data_set);
3354  }
3355  return;
3356 
3357  } else if (rsc->variant > pe_native) {
3358  /* Complex resource with no children */
3359  return;
3360 
3361  } else if (is_not_set(rsc->flags, pe_rsc_managed)) {
3362  pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
3363  return;
3364 
3365  } else if (is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending)) {
3366  pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags);
3367  stop_action(rsc, node, FALSE); /* Force a full restart, overkill? */
3368  return;
3369 
3370  } else if (node == NULL) {
3371  pe_rsc_trace(rsc, "%s: not active", rsc->id);
3372  return;
3373  }
3374 
3375  pe_rsc_trace(rsc, "Processing %s", rsc->id);
3376  set_bit(rsc->flags, pe_rsc_reload);
3377 
3378  reload = custom_action(
3379  rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set);
3380  pe_action_set_reason(reload, "resource definition change", FALSE);
3381 
3382  custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
3384  data_set);
3385  custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
3387  data_set);
3388 }
3389 
3390 void
3391 native_append_meta(pe_resource_t * rsc, xmlNode * xml)
3392 {
3393  char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
3394  pe_resource_t *parent;
3395 
3396  if (value) {
3397  char *name = NULL;
3398 
3400  crm_xml_add(xml, name, value);
3401  free(name);
3402  }
3403 
3404  value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
3405  if (value) {
3406  char *name = NULL;
3407 
3409  crm_xml_add(xml, name, value);
3410  free(name);
3411  }
3412 
3413  for (parent = rsc; parent != NULL; parent = parent->parent) {
3414  if (parent->container) {
3416  }
3417  }
3418 }
Services API.
pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set)
#define LOG_TRACE
Definition: logging.h:36
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:233
GListPtr nodes
Definition: pe_types.h:146
gboolean rsc_colocation_new(const char *id, const char *node_attr, int score, pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, const char *state_lh, const char *state_rh, pe_working_set_t *data_set)
enum rsc_role_e role_filter
Definition: internal.h:57
enum rsc_start_requirement needs
Definition: pe_types.h:398
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:138
#define RSC_STOP
Definition: crm.h:199
#define crm_notice(fmt, args...)
Definition: logging.h:365
GHashTable * known_on
Definition: pe_types.h:351
xmlNode * ops_xml
Definition: pe_types.h:309
gboolean(* create_probe)(pe_resource_t *, pe_node_t *, pe_action_t *, gboolean, pe_working_set_t *)
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:18
gboolean unseen
Definition: pe_types.h:205
gboolean safe_str_neq(const char *a, const char *b)
Definition: strings.c:263
#define INFINITY
Definition: crm.h:95
#define LOAD_STOPPED
GListPtr dangling_migrations
Definition: pe_types.h:362
#define promote_action(rsc, node, optional)
Definition: internal.h:313
enum rsc_role_e(* state)(const pe_resource_t *, gboolean)
Definition: pe_types.h:52
GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set)
#define stop_action(rsc, node, optional)
Definition: internal.h:297
pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: complex.c:858
pe_resource_t * container
Definition: pe_types.h:364
pe_node_t * partial_migration_source
Definition: pe_types.h:349
enum rsc_role_e role
Definition: pe_types.h:354
gboolean PromoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
resource_alloc_functions_t * cmds
Definition: pe_types.h:317
gboolean standby
Definition: pe_types.h:438
#define pe_action_implies(action, reason, flag)
Definition: internal.h:416
void rsc_ticket_constraint(pe_resource_t *lh_rsc, rsc_ticket_t *rsc_ticket, pe_working_set_t *data_set)
#define pcmk__config_err(fmt...)
Definition: internal.h:95
#define delete_action(rsc, node, optional)
Definition: internal.h:287
#define pe_flag_remove_after_stop
Definition: pe_types.h:105
enum pe_action_flags(* action_flags)(pe_action_t *, pe_node_t *)
pe_resource_t * rsc
Definition: pe_types.h:388
#define XML_RSC_ATTR_INCARNATION
Definition: msg_xml.h:186
enum rsc_role_e next_role
Definition: pe_types.h:355
#define pe__show_node_weights(level, rsc, text, nodes)
Definition: internal.h:273
#define INFINITY_HACK
gboolean exclusive_discover
Definition: pe_types.h:336
#define reload_key(rsc)
Definition: internal.h:301
gboolean StartRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_resource_t * remote_rsc
Definition: pe_types.h:217
GHashTable * meta
Definition: pe_types.h:357
#define pe_rsc_unique
Definition: pe_types.h:241
#define pe_rsc_notify
Definition: pe_types.h:240
void pcmk__bundle_log_actions(pe_resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
char * score2char_stack(int score, char *buf, size_t len)
Definition: utils.c:94
void(* rsc_colocation_lh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
resource_object_functions_t * fns
Definition: pe_types.h:316
gboolean pe__is_guest_or_remote_node(pe_node_t *node)
Definition: remote.c:58
#define XML_LRM_ATTR_INTERVAL
Definition: msg_xml.h:254
#define RSC_DELETE
Definition: crm.h:190
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:316
pe_node_t * pe__copy_node(const pe_node_t *this_node)
Definition: utils.c:139
void ReloadRsc(pe_resource_t *rsc, pe_node_t *node, pe_working_set_t *data_set)
GHashTable *(* merge_weights)(pe_resource_t *, const char *, GHashTable *, const char *, float, enum pe_weights)
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:219
pe_node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:427
void resource_location(pe_resource_t *rsc, pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1609
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
void native_expand(pe_resource_t *rsc, pe_working_set_t *data_set)
pe_ticket_t * ticket
#define demote_key(rsc)
Definition: internal.h:322
#define clear_bit(word, bit)
Definition: crm_internal.h:69
guint crm_parse_interval_spec(const char *input)
Parse milliseconds from a Pacemaker interval specification.
Definition: utils.c:309
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
GListPtr rsc_cons
Definition: pe_types.h:341
pe_node_t * partial_migration_target
Definition: pe_types.h:348
gboolean show_scores
#define RSC_START
Definition: crm.h:196
gboolean RoleError(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_node_t * allocated_to
Definition: pe_types.h:347
GHashTable * pcmk__native_merge_weights(pe_resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, uint32_t flags)
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:250
gboolean can_run_resources(const pe_node_t *node)
#define pe_flag_have_quorum
Definition: pe_types.h:91
#define CRM_SCORE_INFINITY
Definition: crm.h:81
#define pe_proc_err(fmt...)
Definition: internal.h:23
gboolean remote_requires_reset
Definition: pe_types.h:211
char * reason
Definition: pe_types.h:395
gboolean native_assign_node(pe_resource_t *rsc, GListPtr candidates, pe_node_t *chosen, gboolean force)
pe_node_t * pe_find_node_id(GListPtr node_list, const char *id)
Definition: status.c:411
#define RSC_MIGRATE
Definition: crm.h:193
char * crm_meta_name(const char *field)
Definition: utils.c:454
const char * action
Definition: pcmk_fence.c:29
void(* rsc_colocation_rh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason, pe_action_t *dependency, pe_working_set_t *data_set)
Definition: utils.c:2504
#define pe_flag_stop_everything
Definition: pe_types.h:102
#define demote_action(rsc, node, optional)
Definition: internal.h:323
void native_rsc_colocation_lh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, rsc_colocation_t *constraint, pe_working_set_t *data_set)
#define pe_rsc_provisional
Definition: pe_types.h:245
const char * role2text(enum rsc_role_e role)
Definition: common.c:463
void native_internal_constraints(pe_resource_t *rsc, pe_working_set_t *data_set)
#define CRM_ATTR_UNFENCED
Definition: crm.h:117
int weight
Definition: pe_types.h:228
#define pe_rsc_merging
Definition: pe_types.h:247
enum pe_discover_e discover_mode
Definition: internal.h:58
void LogActions(pe_resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
void native_create_actions(pe_resource_t *rsc, pe_working_set_t *data_set)
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: utils.c:2632
#define set_bit(word, bit)
Definition: crm_internal.h:68
gboolean DemoteRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
pe_action_flags
Definition: pe_types.h:276
#define pe_rsc_allow_migrate
Definition: pe_types.h:258
#define pe_rsc_failed
Definition: pe_types.h:252
pe_action_t * get_pseudo_op(const char *name, pe_working_set_t *data_set)
Definition: utils.c:1875
#define crm_debug(fmt, args...)
Definition: logging.h:368
enum filter_colocation_res filter_colocation_constraint(pe_resource_t *rsc_lh, pe_resource_t *rsc_rh, rsc_colocation_t *constraint, gboolean preview)
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:762
pe_resource_t * pe__resource_contains_guest_node(const pe_working_set_t *data_set, const pe_resource_t *rsc)
Definition: remote.c:83
#define XML_RSC_ATTR_CONTAINER
Definition: msg_xml.h:205
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:522
const char * node_attribute
#define stop_key(rsc)
Definition: internal.h:296
#define pe_rsc_start_pending
Definition: pe_types.h:254
char * task
Definition: pe_types.h:392
gboolean update_action(pe_action_t *action, pe_working_set_t *data_set)
#define CRM_ATTR_UNAME
Definition: crm.h:110
int custom_action_order(pe_resource_t *lh_rsc, char *lh_task, pe_action_t *lh_action, pe_resource_t *rh_rsc, char *rh_task, pe_action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
#define crm_trace(fmt, args...)
Definition: logging.h:369
#define do_crm_log(level, fmt, args...)
Log a message.
Definition: logging.h:150
void pe_fence_node(pe_working_set_t *data_set, pe_node_t *node, const char *reason, bool priority_delay)
Schedule a fence action for a node.
Definition: unpack.c:85
#define promote_key(rsc)
Definition: internal.h:312
void process_utilization(pe_resource_t *rsc, pe_node_t **prefer, pe_working_set_t *data_set)
GHashTable * meta
Definition: pe_types.h:402
GListPtr find_actions(GListPtr input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1472
void rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
gboolean(* rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(pe_resource_t *, pe_node_t *, gboolean, pe_working_set_t *)
struct pe_node_shared_s * details
Definition: pe_types.h:231
GListPtr running_on
Definition: pe_types.h:350
enum rsc_recovery_type recovery_type
Definition: pe_types.h:319
pe_node_t * node
Definition: pe_types.h:389
filter_colocation_res
enum loss_ticket_policy_e loss_policy
#define pe_rsc_needs_fencing
Definition: pe_types.h:265
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1823
unsigned long long flags
Definition: pe_types.h:332
const char * uname
Definition: pe_types.h:196
#define pe_rsc_promotable
Definition: pe_types.h:243
xmlNode * find_rsc_op_entry(pe_resource_t *rsc, const char *key)
Definition: utils.c:1304
void(* expand)(pe_resource_t *, pe_working_set_t *)
#define pe_flag_stonith_enabled
Definition: pe_types.h:95
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
Definition: common.c:629
pe_graph_flags
Definition: pe_types.h:268
GHashTable * utilization
Definition: pe_types.h:359
gboolean native_create_probe(pe_resource_t *rsc, pe_node_t *node, pe_action_t *complete, gboolean force, pe_working_set_t *data_set)
#define XML_RSC_ATTR_TARGET_ROLE
Definition: msg_xml.h:196
GListPtr rsc_cons_lhs
Definition: pe_types.h:340
#define XML_LRM_ATTR_MIGRATE_TARGET
Definition: msg_xml.h:286
#define XML_RSC_ATTR_REMOTE_NODE
Definition: msg_xml.h:208
char * uuid
Definition: pe_types.h:393
gboolean update_action_flags(pe_action_t *action, enum pe_action_flags flags, const char *source, int line)
pe_resource_t * rsc_lh
#define pe_rsc_allocating
Definition: pe_types.h:246
enum rsc_role_e text2role(const char *role)
Definition: common.c:484
enum pe_obj_types variant
Definition: pe_types.h:314
gboolean granted
Definition: pe_types.h:436
gboolean crm_str_eq(const char *a, const char *b, gboolean use_case)
Definition: strings.c:326
int new_rsc_order(pe_resource_t *lh_rsc, const char *lh_task, pe_resource_t *rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t *data_set)
const char * placement_strategy
Definition: pe_types.h:133
int rsc_discover_mode
Definition: pe_types.h:232
gboolean can_run_any(GHashTable *nodes)
GListPtr actions
Definition: pe_types.h:343
const char * id
Definition: pe_types.h:195
char * pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key (RESOURCE_ACTION_INTERVAL)
Definition: operations.c:40
char * id
Definition: pe_types.h:435
#define CRMD_ACTION_RELOAD
Definition: crm.h:167
#define pe_rsc_fence_device
Definition: pe_types.h:242
GHashTable * pcmk__copy_node_table(GHashTable *nodes)
const char * target
Definition: pcmk_fence.c:28
#define PCMK_RESOURCE_CLASS_STONITH
Definition: services.h:49
gboolean pe__is_guest_node(pe_node_t *node)
Definition: remote.c:47
#define STOP_SANITY_ASSERT(lineno)
gboolean is_remote_node
Definition: pe_types.h:335
GListPtr children
Definition: pe_types.h:361
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:573
#define start_action(rsc, node, optional)
Definition: internal.h:303
#define CRM_META
Definition: crm.h:71
int pe__add_scores(int score1, int score2)
Definition: common.c:510
#define crm_err(fmt, args...)
Definition: logging.h:363
#define CRM_ASSERT(expr)
Definition: results.h:42
#define RSC_STATUS
Definition: crm.h:210
#define pe_rsc_reload
Definition: pe_types.h:249
#define RSC_PROMOTE
Definition: crm.h:202
#define pe_clear_action_bit(action, bit)
Definition: internal.h:26
gboolean StopRsc(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
#define pe_rsc_needs_unfencing
Definition: pe_types.h:266
gboolean shutdown
Definition: pe_types.h:206
#define crm_str(x)
Definition: logging.h:389
rsc_role_e
Definition: common.h:76
void native_append_meta(pe_resource_t *rsc, xmlNode *xml)
#define pe_rsc_block
Definition: pe_types.h:237
#define pe_flag_stdout
Definition: pe_types.h:115
enum pe_action_flags flags
Definition: pe_types.h:397
gboolean maintenance
Definition: pe_types.h:209
#define pe_rsc_maintenance
Definition: pe_types.h:261
#define XML_OP_ATTR_PENDING
Definition: msg_xml.h:221
const char * id
const char * id
#define pe_flag_have_stonith_resource
Definition: pe_types.h:96
#define RSC_ROLE_MAX
Definition: common.h:84
enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX]
GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1512
#define pe_flag_enable_unfencing
Definition: pe_types.h:97
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:19
#define start_key(rsc)
Definition: internal.h:302
pe_action_t * find_first_action(GListPtr input, const char *uuid, const char *task, pe_node_t *on_node)
Definition: utils.c:1442
#define ID(x)
Definition: msg_xml.h:418
unsigned long long flags
Definition: pe_types.h:135
#define pe_err(fmt...)
Definition: internal.h:21
#define safe_str_eq(a, b)
Definition: util.h:65
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1561
char * name
Definition: pcmk_fence.c:30
#define XML_LRM_ATTR_MIGRATE_SOURCE
Definition: msg_xml.h:285
#define CRM_OP_LRM_DELETE
Definition: crm.h:147
#define CRM_ATTR_ID
Definition: crm.h:111
gint sort_node_uname(gconstpointer a, gconstpointer b)
Definition: utils.c:215
gboolean unclean
Definition: pe_types.h:204
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
GList * GListPtr
Definition: crm.h:214
gboolean DeleteRsc(pe_resource_t *rsc, pe_node_t *node, gboolean optional, pe_working_set_t *data_set)
void graph_element_from_action(pe_action_t *action, pe_working_set_t *data_set)
#define crm_info(fmt, args...)
Definition: logging.h:366
enum pe_action_flags native_action_flags(pe_action_t *action, pe_node_t *node)
#define pe_rsc_managed
Definition: pe_types.h:236
#define pe_rsc_orphan
Definition: pe_types.h:235
pe_ordering
Definition: pe_types.h:461
gboolean online
Definition: pe_types.h:200
uint64_t flags
Definition: remote.c:149
#define XML_ATTR_TE_TARGET_RC
Definition: msg_xml.h:364
pe_resource_t * parent
Definition: pe_types.h:312
pe_action_t * pe_fence_op(pe_node_t *node, const char *op, bool optional, const char *reason, bool priority_delay, pe_working_set_t *data_set)
Definition: utils.c:2398
enum crm_ais_msg_types type
Definition: internal.h:83
#define RSC_DEMOTE
Definition: crm.h:204
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:17
pe_resource_t * rsc_rh
gboolean NullOp(pe_resource_t *rsc, pe_node_t *next, gboolean optional, pe_working_set_t *data_set)
void create_secondary_notification(pe_action_t *action, pe_resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
void native_rsc_colocation_rh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, rsc_colocation_t *constraint, pe_working_set_t *data_set)
pe_node_t * pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *preferred, pe_working_set_t *data_set)
char * id
Definition: pe_types.h:305
pe_action_t * custom_action(pe_resource_t *rsc, char *key, const char *task, pe_node_t *on_node, gboolean optional, gboolean foo, pe_working_set_t *data_set)
Definition: utils.c:485
GHashTable * allowed_nodes
Definition: pe_types.h:352
#define RSC_MIGRATED
Definition: crm.h:194
#define pe_flag_startup_probes
Definition: pe_types.h:109
pe_node_t *(* allocate)(pe_resource_t *, pe_node_t *, pe_working_set_t *)