pacemaker  2.0.2-debe490
Scalable High-Availability cluster resource manager
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pcmk_sched_native.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2019 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <crm/pengine/rules.h>
13 #include <crm/msg_xml.h>
14 #include <pacemaker-internal.h>
15 #include <crm/services.h>
16 
17 // The controller removes the resource from the CIB, making this redundant
18 // #define DELETE_THEN_REFRESH 1
19 
20 #define INFINITY_HACK (INFINITY * -100)
21 
22 #define VARIANT_NATIVE 1
23 #include <lib/pengine/variant.h>
24 
25 void native_rsc_colocation_rh_must(resource_t * rsc_lh, gboolean update_lh,
26  resource_t * rsc_rh, gboolean update_rh);
27 
28 void native_rsc_colocation_rh_mustnot(resource_t * rsc_lh, gboolean update_lh,
29  resource_t * rsc_rh, gboolean update_rh);
30 
31 static void Recurring(resource_t *rsc, action_t *start, node_t *node,
32  pe_working_set_t *data_set);
33 static void RecurringOp(resource_t *rsc, action_t *start, node_t *node,
34  xmlNode *operation, pe_working_set_t *data_set);
35 static void Recurring_Stopped(resource_t *rsc, action_t *start, node_t *node,
36  pe_working_set_t *data_set);
37 static void RecurringOp_Stopped(resource_t *rsc, action_t *start, node_t *node,
38  xmlNode *operation, pe_working_set_t *data_set);
39 
40 void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set);
41 gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set);
42 gboolean StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
43 gboolean StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
44 gboolean DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
45 gboolean PromoteRsc(resource_t * rsc, node_t * next, gboolean optional,
46  pe_working_set_t * data_set);
47 gboolean RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
48 gboolean NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set);
49 
50 /* *INDENT-OFF* */
52 /* Current State */
53 /* Next State: Unknown Stopped Started Slave Master */
59 };
60 
62 /* Current State */
63 /* Next State: Unknown Stopped Started Slave Master */
64  /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, },
65  /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, },
66  /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, },
67  /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, },
68  /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, },
69 };
70 /* *INDENT-ON* */
71 
72 static gboolean
73 native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
74 {
75  GListPtr nodes = NULL;
76  node_t *chosen = NULL;
77  node_t *best = NULL;
78  int multiple = 1;
79  int length = 0;
80  gboolean result = FALSE;
81 
82  process_utilization(rsc, &prefer, data_set);
83 
84  if (is_not_set(rsc->flags, pe_rsc_provisional)) {
85  return rsc->allocated_to ? TRUE : FALSE;
86  }
87 
88  // Sort allowed nodes by weight
89  if (rsc->allowed_nodes) {
90  length = g_hash_table_size(rsc->allowed_nodes);
91  }
92  if (length > 0) {
93  nodes = g_hash_table_get_values(rsc->allowed_nodes);
94  nodes = sort_nodes_by_weight(nodes, pe__current_node(rsc), data_set);
95 
96  // First node in sorted list has the best score
97  best = g_list_nth_data(nodes, 0);
98  }
99 
100  if (prefer && nodes) {
101  chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
102 
103  if (chosen == NULL) {
104  pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
105  prefer->details->uname, rsc->id);
106 
107  /* Favor the preferred node as long as its weight is at least as good as
108  * the best allowed node's.
109  *
110  * An alternative would be to favor the preferred node even if the best
111  * node is better, when the best node's weight is less than INFINITY.
112  */
113  } else if ((chosen->weight < 0) || (chosen->weight < best->weight)) {
114  pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
115  chosen->details->uname, rsc->id);
116  chosen = NULL;
117 
118  } else if (!can_run_resources(chosen)) {
119  pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
120  chosen->details->uname, rsc->id);
121  chosen = NULL;
122 
123  } else {
124  pe_rsc_trace(rsc,
125  "Chose preferred node %s for %s (ignoring %d candidates)",
126  chosen->details->uname, rsc->id, length);
127  }
128  }
129 
130  if ((chosen == NULL) && nodes) {
131  /* Either there is no preferred node, or the preferred node is not
132  * available, but there are other nodes allowed to run the resource.
133  */
134 
135  chosen = best;
136  pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
137  chosen ? chosen->details->uname : "<none>", rsc->id, length);
138 
139  if (!pe_rsc_is_unique_clone(rsc->parent)
140  && chosen && (chosen->weight > 0) && can_run_resources(chosen)) {
141  /* If the resource is already running on a node, prefer that node if
142  * it is just as good as the chosen node.
143  *
144  * We don't do this for unique clone instances, because
145  * distribute_children() has already assigned instances to their
146  * running nodes when appropriate, and if we get here, we don't want
147  * remaining unallocated instances to prefer a node that's already
148  * running another instance.
149  */
150  node_t *running = pe__current_node(rsc);
151 
152  if (running && (can_run_resources(running) == FALSE)) {
153  pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
154  rsc->id, running->details->uname);
155  } else if (running) {
156  for (GList *iter = nodes->next; iter; iter = iter->next) {
157  node_t *tmp = (node_t *) iter->data;
158 
159  if (tmp->weight != chosen->weight) {
160  // The nodes are sorted by weight, so no more are equal
161  break;
162  }
163  if (tmp->details == running->details) {
164  // Scores are equal, so prefer the current node
165  chosen = tmp;
166  }
167  multiple++;
168  }
169  }
170  }
171  }
172 
173  if (multiple > 1) {
174  static char score[33];
175  int log_level = (chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO;
176 
177  score2char_stack(chosen->weight, score, sizeof(score));
178  do_crm_log(log_level,
179  "Chose node %s for %s from %d nodes with score %s",
180  chosen->details->uname, rsc->id, multiple, score);
181  }
182 
183  result = native_assign_node(rsc, nodes, chosen, FALSE);
184  g_list_free(nodes);
185  return result;
186 }
187 
188 static int
189 node_list_attr_score(GHashTable * list, const char *attr, const char *value)
190 {
191  GHashTableIter iter;
192  node_t *node = NULL;
193  int best_score = -INFINITY;
194  const char *best_node = NULL;
195 
196  if (attr == NULL) {
197  attr = CRM_ATTR_UNAME;
198  }
199 
200  g_hash_table_iter_init(&iter, list);
201  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
202  int weight = node->weight;
203 
204  if (can_run_resources(node) == FALSE) {
205  weight = -INFINITY;
206  }
207  if (weight > best_score || best_node == NULL) {
208  const char *tmp = pe_node_attribute_raw(node, attr);
209 
210  if (safe_str_eq(value, tmp)) {
211  best_score = weight;
212  best_node = node->details->uname;
213  }
214  }
215  }
216 
217  if (safe_str_neq(attr, CRM_ATTR_UNAME)) {
218  crm_info("Best score for %s=%s was %s with %d",
219  attr, value, best_node ? best_node : "<none>", best_score);
220  }
221 
222  return best_score;
223 }
224 
225 static void
226 node_hash_update(GHashTable * list1, GHashTable * list2, const char *attr, float factor,
227  gboolean only_positive)
228 {
229  int score = 0;
230  int new_score = 0;
231  GHashTableIter iter;
232  node_t *node = NULL;
233 
234  if (attr == NULL) {
235  attr = CRM_ATTR_UNAME;
236  }
237 
238  g_hash_table_iter_init(&iter, list1);
239  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
240  float weight_f = 0;
241  int weight = 0;
242 
243  CRM_LOG_ASSERT(node != NULL);
244  if(node == NULL) { continue; };
245 
246  score = node_list_attr_score(list2, attr, pe_node_attribute_raw(node, attr));
247 
248  weight_f = factor * score;
249  /* Round the number */
250  /* http://c-faq.com/fp/round.html */
251  weight = (int)(weight_f < 0 ? weight_f - 0.5 : weight_f + 0.5);
252 
253  new_score = merge_weights(weight, node->weight);
254 
255  if (factor < 0 && score < 0) {
256  /* Negative preference for a node with a negative score
257  * should not become a positive preference
258  *
259  * TODO - Decide if we want to filter only if weight == -INFINITY
260  *
261  */
262  crm_trace("%s: Filtering %d + %f*%d (factor * score)",
263  node->details->uname, node->weight, factor, score);
264 
265  } else if (node->weight == INFINITY_HACK) {
266  crm_trace("%s: Filtering %d + %f*%d (node < 0)",
267  node->details->uname, node->weight, factor, score);
268 
269  } else if (only_positive && new_score < 0 && node->weight > 0) {
270  node->weight = INFINITY_HACK;
271  crm_trace("%s: Filtering %d + %f*%d (score > 0)",
272  node->details->uname, node->weight, factor, score);
273 
274  } else if (only_positive && new_score < 0 && node->weight == 0) {
275  crm_trace("%s: Filtering %d + %f*%d (score == 0)",
276  node->details->uname, node->weight, factor, score);
277 
278  } else {
279  crm_trace("%s: %d + %f*%d", node->details->uname, node->weight, factor, score);
280  node->weight = new_score;
281  }
282  }
283 }
284 
285 GHashTable *
286 node_hash_dup(GHashTable * hash)
287 {
288  /* Hack! */
289  GListPtr list = g_hash_table_get_values(hash);
290  GHashTable *result = node_hash_from_list(list);
291 
292  g_list_free(list);
293  return result;
294 }
295 
296 GHashTable *
297 native_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
298  float factor, enum pe_weights flags)
299 {
300  return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags);
301 }
302 
303 GHashTable *
304 rsc_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
305  float factor, enum pe_weights flags)
306 {
307  GHashTable *work = NULL;
308  int multiplier = 1;
309 
310  if (factor < 0) {
311  multiplier = -1;
312  }
313 
314  if (is_set(rsc->flags, pe_rsc_merging)) {
315  pe_rsc_info(rsc, "%s: Breaking dependency loop at %s", rhs, rsc->id);
316  return nodes;
317  }
318 
320 
321  if (is_set(flags, pe_weights_init)) {
322  if (rsc->variant == pe_group && rsc->children) {
323  GListPtr last = rsc->children;
324 
325  while (last->next != NULL) {
326  last = last->next;
327  }
328 
329  pe_rsc_trace(rsc, "Merging %s as a group %p %p", rsc->id, rsc->children, last);
330  work = rsc_merge_weights(last->data, rhs, NULL, attr, factor, flags);
331 
332  } else {
333  work = node_hash_dup(rsc->allowed_nodes);
334  }
335  clear_bit(flags, pe_weights_init);
336 
337  } else if (rsc->variant == pe_group && rsc->children) {
338  GListPtr iter = rsc->children;
339 
340  pe_rsc_trace(rsc, "%s: Combining scores from %d children of %s", rhs, g_list_length(iter), rsc->id);
341  work = node_hash_dup(nodes);
342  for(iter = rsc->children; iter->next != NULL; iter = iter->next) {
343  work = rsc_merge_weights(iter->data, rhs, work, attr, factor, flags);
344  }
345 
346  } else {
347  pe_rsc_trace(rsc, "%s: Combining scores from %s", rhs, rsc->id);
348  work = node_hash_dup(nodes);
349  node_hash_update(work, rsc->allowed_nodes, attr, factor,
350  is_set(flags, pe_weights_positive));
351  }
352 
353  if (is_set(flags, pe_weights_rollback) && can_run_any(work) == FALSE) {
354  pe_rsc_info(rsc, "%s: Rolling back scores from %s", rhs, rsc->id);
355  g_hash_table_destroy(work);
357  return nodes;
358  }
359 
360  if (can_run_any(work)) {
361  GListPtr gIter = NULL;
362 
363  if (is_set(flags, pe_weights_forward)) {
364  gIter = rsc->rsc_cons;
365  crm_trace("Checking %d additional colocation constraints", g_list_length(gIter));
366 
367  } else if(rsc->variant == pe_group && rsc->children) {
368  GListPtr last = rsc->children;
369 
370  while (last->next != NULL) {
371  last = last->next;
372  }
373 
374  gIter = ((resource_t*)last->data)->rsc_cons_lhs;
375  crm_trace("Checking %d additional optional group colocation constraints from %s",
376  g_list_length(gIter), ((resource_t*)last->data)->id);
377 
378  } else {
379  gIter = rsc->rsc_cons_lhs;
380  crm_trace("Checking %d additional optional colocation constraints %s", g_list_length(gIter), rsc->id);
381  }
382 
383  for (; gIter != NULL; gIter = gIter->next) {
384  resource_t *other = NULL;
385  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
386 
387  if (is_set(flags, pe_weights_forward)) {
388  other = constraint->rsc_rh;
389  } else {
390  other = constraint->rsc_lh;
391  }
392 
393  pe_rsc_trace(rsc, "Applying %s (%s)", constraint->id, other->id);
394  work = rsc_merge_weights(other, rhs, work, constraint->node_attribute,
395  multiplier * (float)constraint->score / INFINITY, flags|pe_weights_rollback);
396  dump_node_scores(LOG_TRACE, NULL, rhs, work);
397  }
398 
399  }
400 
401  if (is_set(flags, pe_weights_positive)) {
402  node_t *node = NULL;
403  GHashTableIter iter;
404 
405  g_hash_table_iter_init(&iter, work);
406  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
407  if (node->weight == INFINITY_HACK) {
408  node->weight = 1;
409  }
410  }
411  }
412 
413  if (nodes) {
414  g_hash_table_destroy(nodes);
415  }
416 
418  return work;
419 }
420 
421 static inline bool
422 node_has_been_unfenced(node_t *node)
423 {
424  const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
425 
426  return unfenced && strcmp("0", unfenced);
427 }
428 
429 static inline bool
430 is_unfence_device(resource_t *rsc, pe_working_set_t *data_set)
431 {
432  return is_set(rsc->flags, pe_rsc_fence_device)
433  && is_set(data_set->flags, pe_flag_enable_unfencing);
434 }
435 
436 node_t *
437 native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
438 {
439  GListPtr gIter = NULL;
440  int alloc_details = scores_log_level + 1;
441 
442  if (rsc->parent && is_not_set(rsc->parent->flags, pe_rsc_allocating)) {
443  /* never allocate children on their own */
444  pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
445  rsc->parent->id);
446  rsc->parent->cmds->allocate(rsc->parent, prefer, data_set);
447  }
448 
449  if (is_not_set(rsc->flags, pe_rsc_provisional)) {
450  return rsc->allocated_to;
451  }
452 
453  if (is_set(rsc->flags, pe_rsc_allocating)) {
454  pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
455  return NULL;
456  }
457 
459  print_resource(alloc_details, "Allocating: ", rsc, FALSE);
460  dump_node_scores(alloc_details, rsc, "Pre-alloc", rsc->allowed_nodes);
461 
462  for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
463  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
464 
465  GHashTable *archive = NULL;
466  resource_t *rsc_rh = constraint->rsc_rh;
467 
468  pe_rsc_trace(rsc, "%s: Pre-Processing %s (%s, %d, %s)",
469  rsc->id, constraint->id, rsc_rh->id,
470  constraint->score, role2text(constraint->role_lh));
471  if (constraint->role_lh >= RSC_ROLE_MASTER
472  || (constraint->score < 0 && constraint->score > -INFINITY)) {
473  archive = node_hash_dup(rsc->allowed_nodes);
474  }
475  rsc_rh->cmds->allocate(rsc_rh, NULL, data_set);
476  rsc->cmds->rsc_colocation_lh(rsc, rsc_rh, constraint, data_set);
477  if (archive && can_run_any(rsc->allowed_nodes) == FALSE) {
478  pe_rsc_info(rsc, "%s: Rolling back scores from %s", rsc->id, rsc_rh->id);
479  g_hash_table_destroy(rsc->allowed_nodes);
480  rsc->allowed_nodes = archive;
481  archive = NULL;
482  }
483  if (archive) {
484  g_hash_table_destroy(archive);
485  }
486  }
487 
488  dump_node_scores(alloc_details, rsc, "Post-coloc", rsc->allowed_nodes);
489 
490  for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
491  rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
492 
493  rsc->allowed_nodes =
494  constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id, rsc->allowed_nodes,
495  constraint->node_attribute,
496  (float)constraint->score / INFINITY,
498  }
499 
500  print_resource(LOG_TRACE, "Allocating: ", rsc, FALSE);
501  if (rsc->next_role == RSC_ROLE_STOPPED) {
502  pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
503  /* make sure it doesn't come up again */
504  resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE, data_set);
505 
506  } else if(rsc->next_role > rsc->role
507  && is_set(data_set->flags, pe_flag_have_quorum) == FALSE
508  && data_set->no_quorum_policy == no_quorum_freeze) {
509  crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
510  rsc->id, role2text(rsc->role), role2text(rsc->next_role));
511  rsc->next_role = rsc->role;
512  }
513 
514  dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __FUNCTION__,
515  rsc->allowed_nodes);
516  if (is_set(data_set->flags, pe_flag_stonith_enabled)
517  && is_set(data_set->flags, pe_flag_have_stonith_resource) == FALSE) {
519  }
520 
521  if (is_not_set(rsc->flags, pe_rsc_managed)) {
522  const char *reason = NULL;
523  node_t *assign_to = NULL;
524 
525  rsc->next_role = rsc->role;
526  assign_to = pe__current_node(rsc);
527  if (assign_to == NULL) {
528  reason = "inactive";
529  } else if (rsc->role == RSC_ROLE_MASTER) {
530  reason = "master";
531  } else if (is_set(rsc->flags, pe_rsc_failed)) {
532  reason = "failed";
533  } else {
534  reason = "active";
535  }
536  pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
537  (assign_to? assign_to->details->uname : "no node"), reason);
538  native_assign_node(rsc, NULL, assign_to, TRUE);
539 
540  } else if (is_set(data_set->flags, pe_flag_stop_everything)) {
541  pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
542  native_assign_node(rsc, NULL, NULL, TRUE);
543 
544  } else if (is_set(rsc->flags, pe_rsc_provisional)
545  && native_choose_node(rsc, prefer, data_set)) {
546  pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
547  rsc->allocated_to->details->uname);
548 
549  } else if (rsc->allocated_to == NULL) {
550  if (is_not_set(rsc->flags, pe_rsc_orphan)) {
551  pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
552  } else if (rsc->running_on != NULL) {
553  pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
554  }
555 
556  } else {
557  pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
558  rsc->allocated_to->details->uname);
559  }
560 
562  print_resource(LOG_TRACE, "Allocated ", rsc, TRUE);
563 
564  if (rsc->is_remote_node) {
565  node_t *remote_node = pe_find_node(data_set->nodes, rsc->id);
566 
567  CRM_ASSERT(remote_node != NULL);
568  if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
569  crm_trace("Setting Pacemaker Remote node %s to ONLINE",
570  remote_node->details->id);
571  remote_node->details->online = TRUE;
572  /* We shouldn't consider an unseen remote-node unclean if we are going
573  * to try and connect to it. Otherwise we get an unnecessary fence */
574  if (remote_node->details->unseen == TRUE) {
575  remote_node->details->unclean = FALSE;
576  }
577 
578  } else {
579  crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
580  remote_node->details->id, role2text(rsc->next_role),
581  (rsc->allocated_to? "" : "un"));
582  remote_node->details->shutdown = TRUE;
583  }
584  }
585 
586  return rsc->allocated_to;
587 }
588 
589 static gboolean
590 is_op_dup(resource_t *rsc, const char *name, guint interval_ms)
591 {
592  gboolean dup = FALSE;
593  const char *id = NULL;
594  const char *value = NULL;
595  xmlNode *operation = NULL;
596  guint interval2_ms = 0;
597 
598  CRM_ASSERT(rsc);
599  for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
600  operation = __xml_next_element(operation)) {
601  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
602  value = crm_element_value(operation, "name");
603  if (safe_str_neq(value, name)) {
604  continue;
605  }
606 
607  value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
608  interval2_ms = crm_parse_interval_spec(value);
609  if (interval_ms != interval2_ms) {
610  continue;
611  }
612 
613  if (id == NULL) {
614  id = ID(operation);
615 
616  } else {
617  crm_config_err("Operation %s is a duplicate of %s", ID(operation), id);
619  ("Do not use the same (name, interval) combination more than once per resource");
620  dup = TRUE;
621  }
622  }
623  }
624 
625  return dup;
626 }
627 
628 static bool
629 op_cannot_recur(const char *name)
630 {
631  return safe_str_eq(name, RSC_STOP)
632  || safe_str_eq(name, RSC_START)
633  || safe_str_eq(name, RSC_DEMOTE)
634  || safe_str_eq(name, RSC_PROMOTE);
635 }
636 
637 static void
638 RecurringOp(resource_t * rsc, action_t * start, node_t * node,
639  xmlNode * operation, pe_working_set_t * data_set)
640 {
641  char *key = NULL;
642  const char *name = NULL;
643  const char *role = NULL;
644  const char *interval_spec = NULL;
645  const char *node_uname = node? node->details->uname : "n/a";
646 
647  guint interval_ms = 0;
648  action_t *mon = NULL;
649  gboolean is_optional = TRUE;
650  GListPtr possible_matches = NULL;
651 
652  CRM_ASSERT(rsc);
653 
654  /* Only process for the operations without role="Stopped" */
655  role = crm_element_value(operation, "role");
656  if (role && text2role(role) == RSC_ROLE_STOPPED) {
657  return;
658  }
659 
660  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
661  interval_ms = crm_parse_interval_spec(interval_spec);
662  if (interval_ms == 0) {
663  return;
664  }
665 
666  name = crm_element_value(operation, "name");
667  if (is_op_dup(rsc, name, interval_ms)) {
668  crm_trace("Not creating duplicate recurring action %s for %dms %s",
669  ID(operation), interval_ms, name);
670  return;
671  }
672 
673  if (op_cannot_recur(name)) {
674  crm_config_err("Ignoring %s because action '%s' cannot be recurring",
675  ID(operation), name);
676  return;
677  }
678 
679  key = generate_op_key(rsc->id, name, interval_ms);
680  if (find_rsc_op_entry(rsc, key) == NULL) {
681  crm_trace("Not creating recurring action %s for disabled resource %s",
682  ID(operation), rsc->id);
683  free(key);
684  return;
685  }
686 
687  pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
688  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
689 
690  if (start != NULL) {
691  pe_rsc_trace(rsc, "Marking %s %s due to %s",
692  key, is_set(start->flags, pe_action_optional) ? "optional" : "mandatory",
693  start->uuid);
694  is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
695  } else {
696  pe_rsc_trace(rsc, "Marking %s optional", key);
697  is_optional = TRUE;
698  }
699 
700  /* start a monitor for an already active resource */
701  possible_matches = find_actions_exact(rsc->actions, key, node);
702  if (possible_matches == NULL) {
703  is_optional = FALSE;
704  pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
705 
706  } else {
707  GListPtr gIter = NULL;
708 
709  for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
710  action_t *op = (action_t *) gIter->data;
711 
712  if (is_set(op->flags, pe_action_reschedule)) {
713  is_optional = FALSE;
714  break;
715  }
716  }
717  g_list_free(possible_matches);
718  }
719 
720  if ((rsc->next_role == RSC_ROLE_MASTER && role == NULL)
721  || (role != NULL && text2role(role) != rsc->next_role)) {
722  int log_level = LOG_TRACE;
723  const char *result = "Ignoring";
724 
725  if (is_optional) {
726  char *after_key = NULL;
727  action_t *cancel_op = NULL;
728 
729  // It's running, so cancel it
730  log_level = LOG_INFO;
731  result = "Cancelling";
732  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
733 
734  switch (rsc->role) {
735  case RSC_ROLE_SLAVE:
736  case RSC_ROLE_STARTED:
737  if (rsc->next_role == RSC_ROLE_MASTER) {
738  after_key = promote_key(rsc);
739 
740  } else if (rsc->next_role == RSC_ROLE_STOPPED) {
741  after_key = stop_key(rsc);
742  }
743 
744  break;
745  case RSC_ROLE_MASTER:
746  after_key = demote_key(rsc);
747  break;
748  default:
749  break;
750  }
751 
752  if (after_key) {
753  custom_action_order(rsc, NULL, cancel_op, rsc, after_key, NULL,
754  pe_order_runnable_left, data_set);
755  }
756  }
757 
758  do_crm_log(log_level, "%s action %s (%s vs. %s)",
759  result, key, role ? role : role2text(RSC_ROLE_SLAVE),
760  role2text(rsc->next_role));
761 
762  free(key);
763  return;
764  }
765 
766  mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
767  key = mon->uuid;
768  if (is_optional) {
769  pe_rsc_trace(rsc, "%s\t %s (optional)", node_uname, mon->uuid);
770  }
771 
772  if (start == NULL || is_set(start->flags, pe_action_runnable) == FALSE) {
773  pe_rsc_debug(rsc, "%s\t %s (cancelled : start un-runnable)",
774  node_uname, mon->uuid);
775  update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
776 
777  } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
778  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
779  node_uname, mon->uuid);
780  update_action_flags(mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
781 
782  } else if (is_set(mon->flags, pe_action_optional) == FALSE) {
783  pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
784  mon->task, interval_ms / 1000, rsc->id, node_uname);
785  }
786 
787  if (rsc->next_role == RSC_ROLE_MASTER) {
788  char *running_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
789 
790  add_hash_param(mon->meta, XML_ATTR_TE_TARGET_RC, running_master);
791  free(running_master);
792  }
793 
794  if (node == NULL || is_set(rsc->flags, pe_rsc_managed)) {
795  custom_action_order(rsc, start_key(rsc), NULL,
796  NULL, strdup(key), mon,
798 
799  custom_action_order(rsc, reload_key(rsc), NULL,
800  NULL, strdup(key), mon,
802 
803  if (rsc->next_role == RSC_ROLE_MASTER) {
804  custom_action_order(rsc, promote_key(rsc), NULL,
805  rsc, NULL, mon,
807 
808  } else if (rsc->role == RSC_ROLE_MASTER) {
809  custom_action_order(rsc, demote_key(rsc), NULL,
810  rsc, NULL, mon,
812  }
813  }
814 }
815 
816 static void
817 Recurring(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
818 {
819  if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
820  (node == NULL || node->details->maintenance == FALSE)) {
821  xmlNode *operation = NULL;
822 
823  for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
824  operation = __xml_next_element(operation)) {
825  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
826  RecurringOp(rsc, start, node, operation, data_set);
827  }
828  }
829  }
830 }
831 
832 static void
833 RecurringOp_Stopped(resource_t * rsc, action_t * start, node_t * node,
834  xmlNode * operation, pe_working_set_t * data_set)
835 {
836  char *key = NULL;
837  const char *name = NULL;
838  const char *role = NULL;
839  const char *interval_spec = NULL;
840  const char *node_uname = node? node->details->uname : "n/a";
841 
842  guint interval_ms = 0;
843  GListPtr possible_matches = NULL;
844  GListPtr gIter = NULL;
845 
846  /* Only process for the operations with role="Stopped" */
847  role = crm_element_value(operation, "role");
848  if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
849  return;
850  }
851 
852  interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
853  interval_ms = crm_parse_interval_spec(interval_spec);
854  if (interval_ms == 0) {
855  return;
856  }
857 
858  name = crm_element_value(operation, "name");
859  if (is_op_dup(rsc, name, interval_ms)) {
860  crm_trace("Not creating duplicate recurring action %s for %dms %s",
861  ID(operation), interval_ms, name);
862  return;
863  }
864 
865  if (op_cannot_recur(name)) {
866  crm_config_err("Invalid recurring action %s wth name: '%s'", ID(operation), name);
867  return;
868  }
869 
870  key = generate_op_key(rsc->id, name, interval_ms);
871  if (find_rsc_op_entry(rsc, key) == NULL) {
872  crm_trace("Not creating recurring action %s for disabled resource %s",
873  ID(operation), rsc->id);
874  free(key);
875  return;
876  }
877 
878  // @TODO add support
879  if (is_set(rsc->flags, pe_rsc_unique) == FALSE) {
880  crm_notice("Ignoring %s (recurring monitors for role=Stopped are not supported for anonyous clones)",
881  ID(operation));
882  return;
883  }
884 
885  pe_rsc_trace(rsc,
886  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
887  ID(operation), rsc->id, role2text(rsc->next_role));
888 
889  /* if the monitor exists on the node where the resource will be running, cancel it */
890  if (node != NULL) {
891  possible_matches = find_actions_exact(rsc->actions, key, node);
892  if (possible_matches) {
893  action_t *cancel_op = NULL;
894 
895  g_list_free(possible_matches);
896 
897  cancel_op = pe_cancel_op(rsc, name, interval_ms, node, data_set);
898 
899  if (rsc->next_role == RSC_ROLE_STARTED || rsc->next_role == RSC_ROLE_SLAVE) {
900  /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
901  /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
902  custom_action_order(rsc, NULL, cancel_op, rsc, start_key(rsc), NULL,
903  pe_order_runnable_left, data_set);
904  }
905 
906  pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
907  key, role, role2text(rsc->next_role), node_uname);
908  }
909  }
910 
911  for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
912  node_t *stop_node = (node_t *) gIter->data;
913  const char *stop_node_uname = stop_node->details->uname;
914  gboolean is_optional = TRUE;
915  gboolean probe_is_optional = TRUE;
916  gboolean stop_is_optional = TRUE;
917  action_t *stopped_mon = NULL;
918  char *rc_inactive = NULL;
919  GListPtr probe_complete_ops = NULL;
920  GListPtr stop_ops = NULL;
921  GListPtr local_gIter = NULL;
922 
923  if (node && safe_str_eq(stop_node_uname, node_uname)) {
924  continue;
925  }
926 
927  pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
928  ID(operation), rsc->id, crm_str(stop_node_uname));
929 
930  /* start a monitor for an already stopped resource */
931  possible_matches = find_actions_exact(rsc->actions, key, stop_node);
932  if (possible_matches == NULL) {
933  pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
934  crm_str(stop_node_uname));
935  is_optional = FALSE;
936  } else {
937  pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
938  crm_str(stop_node_uname));
939  is_optional = TRUE;
940  g_list_free(possible_matches);
941  }
942 
943  stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
944 
945  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
946  add_hash_param(stopped_mon->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
947  free(rc_inactive);
948 
949  if (is_set(rsc->flags, pe_rsc_managed)) {
950  GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
951  FALSE);
952  GListPtr pIter = NULL;
953 
954  for (pIter = probes; pIter != NULL; pIter = pIter->next) {
955  action_t *probe = (action_t *) pIter->data;
956 
957  order_actions(probe, stopped_mon, pe_order_runnable_left);
958  crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
959  }
960 
961  g_list_free(probes);
962  }
963 
964  if (probe_complete_ops) {
965  g_list_free(probe_complete_ops);
966  }
967 
968  stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
969 
970  for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
971  action_t *stop = (action_t *) local_gIter->data;
972 
973  if (is_set(stop->flags, pe_action_optional) == FALSE) {
974  stop_is_optional = FALSE;
975  }
976 
977  if (is_set(stop->flags, pe_action_runnable) == FALSE) {
978  crm_debug("%s\t %s (cancelled : stop un-runnable)",
979  crm_str(stop_node_uname), stopped_mon->uuid);
980  update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
981  }
982 
983  if (is_set(rsc->flags, pe_rsc_managed)) {
984  custom_action_order(rsc, stop_key(rsc), stop,
985  NULL, strdup(key), stopped_mon,
987  }
988 
989  }
990 
991  if (stop_ops) {
992  g_list_free(stop_ops);
993  }
994 
995  if (is_optional == FALSE && probe_is_optional && stop_is_optional
996  && is_set(rsc->flags, pe_rsc_managed) == FALSE) {
997  pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
998  key, crm_str(stop_node_uname));
999  update_action_flags(stopped_mon, pe_action_optional, __FUNCTION__, __LINE__);
1000  }
1001 
1002  if (is_set(stopped_mon->flags, pe_action_optional)) {
1003  pe_rsc_trace(rsc, "%s\t %s (optional)", crm_str(stop_node_uname), stopped_mon->uuid);
1004  }
1005 
1006  if (stop_node->details->online == FALSE || stop_node->details->unclean) {
1007  pe_rsc_debug(rsc, "%s\t %s (cancelled : no node available)",
1008  crm_str(stop_node_uname), stopped_mon->uuid);
1009  update_action_flags(stopped_mon, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
1010  }
1011 
1012  if (is_set(stopped_mon->flags, pe_action_runnable)
1013  && is_set(stopped_mon->flags, pe_action_optional) == FALSE) {
1014  crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
1015  interval_ms / 1000, rsc->id, crm_str(stop_node_uname));
1016  }
1017  }
1018 
1019  free(key);
1020 }
1021 
1022 static void
1023 Recurring_Stopped(resource_t * rsc, action_t * start, node_t * node, pe_working_set_t * data_set)
1024 {
1025  if (is_not_set(rsc->flags, pe_rsc_maintenance) &&
1026  (node == NULL || node->details->maintenance == FALSE)) {
1027  xmlNode *operation = NULL;
1028 
1029  for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
1030  operation = __xml_next_element(operation)) {
1031  if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
1032  RecurringOp_Stopped(rsc, start, node, operation, data_set);
1033  }
1034  }
1035  }
1036 }
1037 
1038 static void
1039 handle_migration_actions(resource_t * rsc, node_t *current, node_t *chosen, pe_working_set_t * data_set)
1040 {
1041  action_t *migrate_to = NULL;
1042  action_t *migrate_from = NULL;
1043  action_t *start = NULL;
1044  action_t *stop = NULL;
1045  gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
1046 
1047  pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
1048  rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
1049  start = start_action(rsc, chosen, TRUE);
1050  stop = stop_action(rsc, current, TRUE);
1051 
1052  if (partial == FALSE) {
1053  migrate_to = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), RSC_MIGRATE, current, TRUE, TRUE, data_set);
1054  }
1055 
1056  migrate_from = custom_action(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
1057 
1058  if ((migrate_to && migrate_from) || (migrate_from && partial)) {
1059 
1062 
1063  update_action_flags(start, pe_action_pseudo, __FUNCTION__, __LINE__); /* easier than trying to delete it from the graph */
1064 
1065  /* order probes before migrations */
1066  if (partial) {
1067  set_bit(migrate_from->flags, pe_action_migrate_runnable);
1068  migrate_from->needs = start->needs;
1069 
1070  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL,
1071  rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL, pe_order_optional, data_set);
1072 
1073  } else {
1074  set_bit(migrate_from->flags, pe_action_migrate_runnable);
1076  migrate_to->needs = start->needs;
1077 
1078  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STATUS, 0), NULL,
1079  rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL, pe_order_optional, data_set);
1080  custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL,
1082  }
1083 
1084  custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1086  custom_action_order(rsc, generate_op_key(rsc->id, RSC_MIGRATED, 0), NULL,
1088 
1089  }
1090 
1091  if (migrate_to) {
1092  add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1094 
1095  /* Pacemaker Remote connections don't require pending to be recorded in
1096  * the CIB. We can reduce CIB writes by not setting PENDING for them.
1097  */
1098  if (rsc->is_remote_node == FALSE) {
1099  /* migrate_to takes place on the source node, but can
1100  * have an effect on the target node depending on how
1101  * the agent is written. Because of this, we have to maintain
1102  * a record that the migrate_to occurred, in case the source node
1103  * loses membership while the migrate_to action is still in-flight.
1104  */
1105  add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
1106  }
1107  }
1108 
1109  if (migrate_from) {
1110  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
1111  add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
1112  }
1113 }
1114 
1115 void
1117 {
1118  action_t *start = NULL;
1119  node_t *chosen = NULL;
1120  node_t *current = NULL;
1121  gboolean need_stop = FALSE;
1122  gboolean is_moving = FALSE;
1123  gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE;
1124 
1125  GListPtr gIter = NULL;
1126  unsigned int num_all_active = 0;
1127  unsigned int num_clean_active = 0;
1128  bool multiply_active = FALSE;
1129  enum rsc_role_e role = RSC_ROLE_UNKNOWN;
1130  enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
1131 
1132  CRM_ASSERT(rsc);
1133  chosen = rsc->allocated_to;
1134  if (chosen != NULL && rsc->next_role == RSC_ROLE_UNKNOWN) {
1135  rsc->next_role = RSC_ROLE_STARTED;
1136  pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
1137 
1138  } else if (rsc->next_role == RSC_ROLE_UNKNOWN) {
1139  rsc->next_role = RSC_ROLE_STOPPED;
1140  pe_rsc_trace(rsc, "Fixed next_role: unknown -> %s", role2text(rsc->next_role));
1141  }
1142 
1143  pe_rsc_trace(rsc, "Processing state transition for %s %p: %s->%s", rsc->id, rsc,
1144  role2text(rsc->role), role2text(rsc->next_role));
1145 
1146  current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
1147 
1148  for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
1149  node_t *dangling_source = (node_t *) gIter->data;
1150 
1151  action_t *stop = stop_action(rsc, dangling_source, FALSE);
1152 
1153  set_bit(stop->flags, pe_action_dangle);
1154  pe_rsc_trace(rsc, "Forcing a cleanup of %s on %s",
1155  rsc->id, dangling_source->details->uname);
1156 
1157  if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
1158  DeleteRsc(rsc, dangling_source, FALSE, data_set);
1159  }
1160  }
1161 
1162  if ((num_all_active == 2) && (num_clean_active == 2) && chosen
1164  && (current->details == rsc->partial_migration_source->details)
1165  && (chosen->details == rsc->partial_migration_target->details)) {
1166 
1167  /* The chosen node is still the migration target from a partial
1168  * migration. Attempt to continue the migration instead of recovering
1169  * by stopping the resource everywhere and starting it on a single node.
1170  */
1171  pe_rsc_trace(rsc,
1172  "Will attempt to continue with a partial migration to target %s from %s",
1175 
1176  } else if (is_not_set(rsc->flags, pe_rsc_needs_fencing)) {
1177  /* If a resource has "requires" set to nothing or quorum, don't consider
1178  * it active on unclean nodes (similar to how all resources behave when
1179  * stonith-enabled is false). We can start such resources elsewhere
1180  * before fencing completes, and if we considered the resource active on
1181  * the failed node, we would attempt recovery for being active on
1182  * multiple nodes.
1183  */
1184  multiply_active = (num_clean_active > 1);
1185  } else {
1186  multiply_active = (num_all_active > 1);
1187  }
1188 
1189  if (multiply_active) {
1191  // Migration was in progress, but we've chosen a different target
1192  crm_notice("Resource %s can no longer migrate to %s. Stopping on %s too",
1195 
1196  } else {
1197  // Resource was incorrectly multiply active
1198  pe_proc_err("Resource %s is active on %u nodes (%s)",
1199  rsc->id, num_all_active,
1200  recovery2text(rsc->recovery_type));
1201  crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
1202  }
1203 
1204  if (rsc->recovery_type == recovery_stop_start) {
1205  need_stop = TRUE;
1206  }
1207 
1208  /* If by chance a partial migration is in process, but the migration
1209  * target is not chosen still, clear all partial migration data.
1210  */
1212  allow_migrate = FALSE;
1213  }
1214 
1215  if (is_set(rsc->flags, pe_rsc_start_pending)) {
1216  start = start_action(rsc, chosen, TRUE);
1218  }
1219 
1220  if (current && chosen && current->details != chosen->details) {
1221  pe_rsc_trace(rsc, "Moving %s", rsc->id);
1222  is_moving = TRUE;
1223  need_stop = TRUE;
1224 
1225  } else if (is_set(rsc->flags, pe_rsc_failed)) {
1226  pe_rsc_trace(rsc, "Recovering %s", rsc->id);
1227  need_stop = TRUE;
1228 
1229  } else if (is_set(rsc->flags, pe_rsc_block)) {
1230  pe_rsc_trace(rsc, "Block %s", rsc->id);
1231  need_stop = TRUE;
1232 
1233  } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
1234  /* Recovery of a promoted resource */
1235  start = start_action(rsc, chosen, TRUE);
1236  if (is_set(start->flags, pe_action_optional) == FALSE) {
1237  pe_rsc_trace(rsc, "Forced start %s", rsc->id);
1238  need_stop = TRUE;
1239  }
1240  }
1241 
1242  pe_rsc_trace(rsc, "Creating actions for %s: %s->%s", rsc->id,
1243  role2text(rsc->role), role2text(rsc->next_role));
1244 
1245  /* Create any additional actions required when bringing resource down and
1246  * back up to same level.
1247  */
1248  role = rsc->role;
1249  while (role != RSC_ROLE_STOPPED) {
1250  next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
1251  pe_rsc_trace(rsc, "Down: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
1252  rsc->id, need_stop ? " required" : "");
1253  if (rsc_action_matrix[role][next_role] (rsc, current, !need_stop, data_set) == FALSE) {
1254  break;
1255  }
1256  role = next_role;
1257  }
1258 
1259 
1260  while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) {
1261  next_role = rsc_state_matrix[role][rsc->role];
1262  pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role),
1263  rsc->id, need_stop ? " required" : "");
1264  if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) {
1265  break;
1266  }
1267  role = next_role;
1268  }
1269  role = rsc->role;
1270 
1271  /* Required steps from this role to the next */
1272  while (role != rsc->next_role) {
1273  next_role = rsc_state_matrix[role][rsc->next_role];
1274  pe_rsc_trace(rsc, "Role: Executing: %s->%s = (%s on %s)", role2text(role), role2text(next_role), rsc->id, chosen?chosen->details->uname:"NA");
1275  if (rsc_action_matrix[role][next_role] (rsc, chosen, FALSE, data_set) == FALSE) {
1276  break;
1277  }
1278  role = next_role;
1279  }
1280 
1281  if(is_set(rsc->flags, pe_rsc_block)) {
1282  pe_rsc_trace(rsc, "No monitor additional ops for blocked resource");
1283 
1284  } else if (rsc->next_role != RSC_ROLE_STOPPED || is_set(rsc->flags, pe_rsc_managed) == FALSE) {
1285  pe_rsc_trace(rsc, "Monitor ops for active resource");
1286  start = start_action(rsc, chosen, TRUE);
1287  Recurring(rsc, start, chosen, data_set);
1288  Recurring_Stopped(rsc, start, chosen, data_set);
1289  } else {
1290  pe_rsc_trace(rsc, "Monitor ops for inactive resource");
1291  Recurring_Stopped(rsc, NULL, NULL, data_set);
1292  }
1293 
1294  /* if we are stuck in a partial migration, where the target
1295  * of the partial migration no longer matches the chosen target.
1296  * A full stop/start is required */
1297  if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
1298  pe_rsc_trace(rsc, "Not allowing partial migration to continue. %s", rsc->id);
1299  allow_migrate = FALSE;
1300 
1301  } else if (is_moving == FALSE ||
1302  is_not_set(rsc->flags, pe_rsc_managed) ||
1303  is_set(rsc->flags, pe_rsc_failed) ||
1304  is_set(rsc->flags, pe_rsc_start_pending) ||
1305  (current && current->details->unclean) ||
1306  rsc->next_role < RSC_ROLE_STARTED) {
1307 
1308  allow_migrate = FALSE;
1309  }
1310 
1311  if (allow_migrate) {
1312  handle_migration_actions(rsc, current, chosen, data_set);
1313  }
1314 }
1315 
1316 static void
1317 rsc_avoids_remote_nodes(resource_t *rsc)
1318 {
1319  GHashTableIter iter;
1320  node_t *node = NULL;
1321  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
1322  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1323  if (node->details->remote_rsc) {
1324  node->weight = -INFINITY;
1325  }
1326  }
1327 }
1328 
1343 static GList *
1344 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
1345 {
1346  GList *allowed_nodes = NULL;
1347 
1348  if (rsc->allowed_nodes) {
1349  allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
1350  }
1351 
1352  if (is_set(data_set->flags, pe_flag_stdout)) {
1353  allowed_nodes = g_list_sort(allowed_nodes, sort_node_uname);
1354  }
1355  return allowed_nodes;
1356 }
1357 
1358 void
1360 {
1361  /* This function is on the critical path and worth optimizing as much as possible */
1362 
1363  pe_resource_t *top = NULL;
1364  GList *allowed_nodes = NULL;
1365  bool check_unfencing = FALSE;
1366  bool check_utilization = FALSE;
1367 
1368  if (is_not_set(rsc->flags, pe_rsc_managed)) {
1369  pe_rsc_trace(rsc,
1370  "Skipping native constraints for unmanaged resource: %s",
1371  rsc->id);
1372  return;
1373  }
1374 
1375  top = uber_parent(rsc);
1376 
1377  // Whether resource requires unfencing
1378  check_unfencing = is_not_set(rsc->flags, pe_rsc_fence_device)
1379  && is_set(data_set->flags, pe_flag_enable_unfencing)
1380  && is_set(rsc->flags, pe_rsc_needs_unfencing);
1381 
1382  // Whether a non-default placement strategy is used
1383  check_utilization = (g_hash_table_size(rsc->utilization) > 0)
1384  && safe_str_neq(data_set->placement_strategy, "default");
1385 
1386  // Order stops before starts (i.e. restart)
1387  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
1388  rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
1390  data_set);
1391 
1392  // Promotable ordering: demote before stop, start before promote
1393  if (is_set(top->flags, pe_rsc_promotable) || (rsc->role > RSC_ROLE_SLAVE)) {
1394  custom_action_order(rsc, generate_op_key(rsc->id, RSC_DEMOTE, 0), NULL,
1395  rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
1396  pe_order_implies_first_master, data_set);
1397 
1398  custom_action_order(rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
1399  rsc, generate_op_key(rsc->id, RSC_PROMOTE, 0), NULL,
1400  pe_order_runnable_left, data_set);
1401  }
1402 
1403  // Certain checks need allowed nodes
1404  if (check_unfencing || check_utilization || rsc->container) {
1405  allowed_nodes = allowed_nodes_as_list(rsc, data_set);
1406  }
1407 
1408  if (check_unfencing) {
1409  /* Check if the node needs to be unfenced first */
1410 
1411  for (GList *item = allowed_nodes; item; item = item->next) {
1412  pe_node_t *node = item->data;
1413  pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set);
1414 
1415  crm_debug("Ordering any stops of %s before %s, and any starts after",
1416  rsc->id, unfence->uuid);
1417 
1418  /*
1419  * It would be more efficient to order clone resources once,
1420  * rather than order each instance, but ordering the instance
1421  * allows us to avoid unnecessary dependencies that might conflict
1422  * with user constraints.
1423  *
1424  * @TODO: This constraint can still produce a transition loop if the
1425  * resource has a stop scheduled on the node being unfenced, and
1426  * there is a user ordering constraint to start some other resource
1427  * (which will be ordered after the unfence) before stopping this
1428  * resource. An example is "start some slow-starting cloned service
1429  * before stopping an associated virtual IP that may be moving to
1430  * it":
1431  * stop this -> unfencing -> start that -> stop this
1432  */
1433  custom_action_order(rsc, stop_key(rsc), NULL,
1434  NULL, strdup(unfence->uuid), unfence,
1436 
1437  custom_action_order(NULL, strdup(unfence->uuid), unfence,
1438  rsc, start_key(rsc), NULL,
1440  data_set);
1441  }
1442  }
1443 
1444  if (check_utilization) {
1445  GListPtr gIter = NULL;
1446 
1447  pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
1448  rsc->id, data_set->placement_strategy);
1449 
1450  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
1451  node_t *current = (node_t *) gIter->data;
1452 
1453  char *load_stopped_task = crm_concat(LOAD_STOPPED, current->details->uname, '_');
1454  action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1455 
1456  if (load_stopped->node == NULL) {
1457  load_stopped->node = node_copy(current);
1458  update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
1459  }
1460 
1461  custom_action_order(rsc, stop_key(rsc), NULL,
1462  NULL, load_stopped_task, load_stopped, pe_order_load, data_set);
1463  }
1464 
1465  for (GList *item = allowed_nodes; item; item = item->next) {
1466  pe_node_t *next = item->data;
1467  char *load_stopped_task = crm_concat(LOAD_STOPPED, next->details->uname, '_');
1468  action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
1469 
1470  if (load_stopped->node == NULL) {
1471  load_stopped->node = node_copy(next);
1472  update_action_flags(load_stopped, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
1473  }
1474 
1475  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1476  rsc, start_key(rsc), NULL, pe_order_load, data_set);
1477 
1478  custom_action_order(NULL, strdup(load_stopped_task), load_stopped,
1479  rsc, generate_op_key(rsc->id, RSC_MIGRATE, 0), NULL,
1480  pe_order_load, data_set);
1481 
1482  free(load_stopped_task);
1483  }
1484  }
1485 
1486  if (rsc->container) {
1487  resource_t *remote_rsc = NULL;
1488 
1489  if (rsc->is_remote_node) {
1490  // rsc is the implicit remote connection for a guest or bundle node
1491 
1492  /* Do not allow a guest resource to live on a Pacemaker Remote node,
1493  * to avoid nesting remotes. However, allow bundles to run on remote
1494  * nodes.
1495  */
1496  if (is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
1497  rsc_avoids_remote_nodes(rsc->container);
1498  }
1499 
1500  /* If someone cleans up a guest or bundle node's container, we will
1501  * likely schedule a (re-)probe of the container and recovery of the
1502  * connection. Order the connection stop after the container probe,
1503  * so that if we detect the container running, we will trigger a new
1504  * transition and avoid the unnecessary recovery.
1505  */
1507  pe_order_optional, data_set);
1508 
1509  /* A user can specify that a resource must start on a Pacemaker Remote
1510  * node by explicitly configuring it with the container=NODENAME
1511  * meta-attribute. This is of questionable merit, since location
1512  * constraints can accomplish the same thing. But we support it, so here
1513  * we check whether a resource (that is not itself a remote connection)
1514  * has container set to a remote node or guest node resource.
1515  */
1516  } else if (rsc->container->is_remote_node) {
1517  remote_rsc = rsc->container;
1518  } else {
1519  remote_rsc = pe__resource_contains_guest_node(data_set,
1520  rsc->container);
1521  }
1522 
1523  if (remote_rsc) {
1524  /* Force the resource on the Pacemaker Remote node instead of
1525  * colocating the resource with the container resource.
1526  */
1527  for (GList *item = allowed_nodes; item; item = item->next) {
1528  pe_node_t *node = item->data;
1529 
1530  if (node->details->remote_rsc != remote_rsc) {
1531  node->weight = -INFINITY;
1532  }
1533  }
1534 
1535  } else {
1536  /* This resource is either a filler for a container that does NOT
1537  * represent a Pacemaker Remote node, or a Pacemaker Remote
1538  * connection resource for a guest node or bundle.
1539  */
1540  int score;
1541 
1542  crm_trace("Order and colocate %s relative to its container %s",
1543  rsc->id, rsc->container->id);
1544 
1546  rsc, generate_op_key(rsc->id, RSC_START, 0), NULL,
1548 
1549  custom_action_order(rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
1550  rsc->container, generate_op_key(rsc->container->id, RSC_STOP, 0), NULL,
1551  pe_order_implies_first, data_set);
1552 
1553  if (is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
1554  score = 10000; /* Highly preferred but not essential */
1555  } else {
1556  score = INFINITY; /* Force them to run on the same host */
1557  }
1558  rsc_colocation_new("resource-with-container", NULL, score, rsc,
1559  rsc->container, NULL, NULL, data_set);
1560  }
1561  }
1562 
1563  if (rsc->is_remote_node || is_set(rsc->flags, pe_rsc_fence_device)) {
1564  /* don't allow remote nodes to run stonith devices
1565  * or remote connection resources.*/
1566  rsc_avoids_remote_nodes(rsc);
1567  }
1568  g_list_free(allowed_nodes);
1569 }
1570 
1571 void
1573  rsc_colocation_t *constraint,
1574  pe_working_set_t *data_set)
1575 {
1576  if (rsc_lh == NULL) {
1577  pe_err("rsc_lh was NULL for %s", constraint->id);
1578  return;
1579 
1580  } else if (constraint->rsc_rh == NULL) {
1581  pe_err("rsc_rh was NULL for %s", constraint->id);
1582  return;
1583  }
1584 
1585  pe_rsc_trace(rsc_lh, "Processing colocation constraint between %s and %s", rsc_lh->id,
1586  rsc_rh->id);
1587 
1588  rsc_rh->cmds->rsc_colocation_rh(rsc_lh, rsc_rh, constraint, data_set);
1589 }
1590 
1593  rsc_colocation_t * constraint, gboolean preview)
1594 {
1595  if (constraint->score == 0) {
1596  return influence_nothing;
1597  }
1598 
1599  /* rh side must be allocated before we can process constraint */
1600  if (preview == FALSE && is_set(rsc_rh->flags, pe_rsc_provisional)) {
1601  return influence_nothing;
1602  }
1603 
1604  if ((constraint->role_lh >= RSC_ROLE_SLAVE) &&
1605  rsc_lh->parent && is_set(rsc_lh->parent->flags, pe_rsc_promotable)
1606  && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
1607 
1608  /* LH and RH resources have already been allocated, place the correct
1609  * priority on LH rsc for the given promotable clone resource role */
1610  return influence_rsc_priority;
1611  }
1612 
1613  if (preview == FALSE && is_not_set(rsc_lh->flags, pe_rsc_provisional)) {
1614  // Log an error if we violated a mandatory colocation constraint
1615  const pe_node_t *rh_node = rsc_rh->allocated_to;
1616 
1617  if (rsc_lh->allocated_to == NULL) {
1618  // Dependent resource isn't allocated, so constraint doesn't matter
1619  return influence_nothing;
1620  }
1621 
1622  if (constraint->score >= INFINITY) {
1623  // Dependent resource must colocate with rh_node
1624 
1625  if ((rh_node == NULL)
1626  || (rh_node->details != rsc_lh->allocated_to->details)) {
1627  crm_err("%s must be colocated with %s but is not (%s vs. %s)",
1628  rsc_lh->id, rsc_rh->id,
1629  rsc_lh->allocated_to->details->uname,
1630  (rh_node? rh_node->details->uname : "unallocated"));
1631  }
1632 
1633  } else if (constraint->score <= -INFINITY) {
1634  // Dependent resource must anti-colocate with rh_node
1635 
1636  if ((rh_node != NULL)
1637  && (rsc_lh->allocated_to->details == rh_node->details)) {
1638  crm_err("%s and %s must be anti-colocated but are allocated "
1639  "to the same node (%s)",
1640  rsc_lh->id, rsc_rh->id, rh_node->details->uname);
1641  }
1642  }
1643  return influence_nothing;
1644  }
1645 
1646  if (constraint->score > 0
1647  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh != rsc_lh->next_role) {
1648  crm_trace("LH: Skipping constraint: \"%s\" state filter nextrole is %s",
1649  role2text(constraint->role_lh), role2text(rsc_lh->next_role));
1650  return influence_nothing;
1651  }
1652 
1653  if (constraint->score > 0
1654  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh != rsc_rh->next_role) {
1655  crm_trace("RH: Skipping constraint: \"%s\" state filter", role2text(constraint->role_rh));
1656  return FALSE;
1657  }
1658 
1659  if (constraint->score < 0
1660  && constraint->role_lh != RSC_ROLE_UNKNOWN && constraint->role_lh == rsc_lh->next_role) {
1661  crm_trace("LH: Skipping negative constraint: \"%s\" state filter",
1662  role2text(constraint->role_lh));
1663  return influence_nothing;
1664  }
1665 
1666  if (constraint->score < 0
1667  && constraint->role_rh != RSC_ROLE_UNKNOWN && constraint->role_rh == rsc_rh->next_role) {
1668  crm_trace("RH: Skipping negative constraint: \"%s\" state filter",
1669  role2text(constraint->role_rh));
1670  return influence_nothing;
1671  }
1672 
1673  return influence_rsc_location;
1674 }
1675 
1676 static void
1677 influence_priority(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
1678 {
1679  const char *rh_value = NULL;
1680  const char *lh_value = NULL;
1681  const char *attribute = CRM_ATTR_ID;
1682  int score_multiplier = 1;
1683 
1684  if (constraint->node_attribute != NULL) {
1685  attribute = constraint->node_attribute;
1686  }
1687 
1688  if (!rsc_rh->allocated_to || !rsc_lh->allocated_to) {
1689  return;
1690  }
1691 
1692  lh_value = pe_node_attribute_raw(rsc_lh->allocated_to, attribute);
1693  rh_value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1694 
1695  if (!safe_str_eq(lh_value, rh_value)) {
1696  if(constraint->score == INFINITY && constraint->role_lh == RSC_ROLE_MASTER) {
1697  rsc_lh->priority = -INFINITY;
1698  }
1699  return;
1700  }
1701 
1702  if (constraint->role_rh && (constraint->role_rh != rsc_rh->next_role)) {
1703  return;
1704  }
1705 
1706  if (constraint->role_lh == RSC_ROLE_SLAVE) {
1707  score_multiplier = -1;
1708  }
1709 
1710  rsc_lh->priority = merge_weights(score_multiplier * constraint->score, rsc_lh->priority);
1711 }
1712 
1713 static void
1714 colocation_match(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
1715 {
1716  const char *tmp = NULL;
1717  const char *value = NULL;
1718  const char *attribute = CRM_ATTR_ID;
1719 
1720  GHashTable *work = NULL;
1721  gboolean do_check = FALSE;
1722 
1723  GHashTableIter iter;
1724  node_t *node = NULL;
1725 
1726  if (constraint->node_attribute != NULL) {
1727  attribute = constraint->node_attribute;
1728  }
1729 
1730  if (rsc_rh->allocated_to) {
1731  value = pe_node_attribute_raw(rsc_rh->allocated_to, attribute);
1732  do_check = TRUE;
1733 
1734  } else if (constraint->score < 0) {
1735  /* nothing to do:
1736  * anti-colocation with something that is not running
1737  */
1738  return;
1739  }
1740 
1741  work = node_hash_dup(rsc_lh->allowed_nodes);
1742 
1743  g_hash_table_iter_init(&iter, work);
1744  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
1745  tmp = pe_node_attribute_raw(node, attribute);
1746  if (do_check && safe_str_eq(tmp, value)) {
1747  if (constraint->score < INFINITY) {
1748  pe_rsc_trace(rsc_lh, "%s: %s.%s += %d", constraint->id, rsc_lh->id,
1749  node->details->uname, constraint->score);
1750  node->weight = merge_weights(constraint->score, node->weight);
1751  }
1752 
1753  } else if (do_check == FALSE || constraint->score >= INFINITY) {
1754  pe_rsc_trace(rsc_lh, "%s: %s.%s -= %d (%s)", constraint->id, rsc_lh->id,
1755  node->details->uname, constraint->score,
1756  do_check ? "failed" : "unallocated");
1757  node->weight = merge_weights(-constraint->score, node->weight);
1758  }
1759  }
1760 
1761  if (can_run_any(work)
1762  || constraint->score <= -INFINITY || constraint->score >= INFINITY) {
1763  g_hash_table_destroy(rsc_lh->allowed_nodes);
1764  rsc_lh->allowed_nodes = work;
1765  work = NULL;
1766 
1767  } else {
1768  static char score[33];
1769 
1770  score2char_stack(constraint->score, score, sizeof(score));
1771 
1772  pe_rsc_info(rsc_lh, "%s: Rolling back scores from %s (%d, %s)",
1773  rsc_lh->id, rsc_rh->id, do_check, score);
1774  }
1775 
1776  if (work) {
1777  g_hash_table_destroy(work);
1778  }
1779 }
1780 
1781 void
1783  rsc_colocation_t *constraint,
1784  pe_working_set_t *data_set)
1785 {
1786  enum filter_colocation_res filter_results;
1787 
1788  CRM_ASSERT(rsc_lh);
1789  CRM_ASSERT(rsc_rh);
1790  filter_results = filter_colocation_constraint(rsc_lh, rsc_rh, constraint, FALSE);
1791  pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d, filter=%d)",
1792  constraint->score >= 0 ? "" : "Anti-",
1793  rsc_lh->id, rsc_rh->id, constraint->id, constraint->score, filter_results);
1794 
1795  switch (filter_results) {
1797  influence_priority(rsc_lh, rsc_rh, constraint);
1798  break;
1800  pe_rsc_trace(rsc_lh, "%sColocating %s with %s (%s, weight=%d)",
1801  constraint->score >= 0 ? "" : "Anti-",
1802  rsc_lh->id, rsc_rh->id, constraint->id, constraint->score);
1803  colocation_match(rsc_lh, rsc_rh, constraint);
1804  break;
1805  case influence_nothing:
1806  default:
1807  return;
1808  }
1809 }
1810 
1811 static gboolean
1812 filter_rsc_ticket(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket)
1813 {
1814  if (rsc_ticket->role_lh != RSC_ROLE_UNKNOWN && rsc_ticket->role_lh != rsc_lh->role) {
1815  pe_rsc_trace(rsc_lh, "LH: Skipping constraint: \"%s\" state filter",
1816  role2text(rsc_ticket->role_lh));
1817  return FALSE;
1818  }
1819 
1820  return TRUE;
1821 }
1822 
1823 void
1825 {
1826  if (rsc_ticket == NULL) {
1827  pe_err("rsc_ticket was NULL");
1828  return;
1829  }
1830 
1831  if (rsc_lh == NULL) {
1832  pe_err("rsc_lh was NULL for %s", rsc_ticket->id);
1833  return;
1834  }
1835 
1836  if (rsc_ticket->ticket->granted && rsc_ticket->ticket->standby == FALSE) {
1837  return;
1838  }
1839 
1840  if (rsc_lh->children) {
1841  GListPtr gIter = rsc_lh->children;
1842 
1843  pe_rsc_trace(rsc_lh, "Processing ticket dependencies from %s", rsc_lh->id);
1844 
1845  for (; gIter != NULL; gIter = gIter->next) {
1846  resource_t *child_rsc = (resource_t *) gIter->data;
1847 
1848  rsc_ticket_constraint(child_rsc, rsc_ticket, data_set);
1849  }
1850  return;
1851  }
1852 
1853  pe_rsc_trace(rsc_lh, "%s: Processing ticket dependency on %s (%s, %s)",
1854  rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
1855  role2text(rsc_ticket->role_lh));
1856 
1857  if ((rsc_ticket->ticket->granted == FALSE)
1858  && (rsc_lh->running_on != NULL)) {
1859 
1860  GListPtr gIter = NULL;
1861 
1862  switch (rsc_ticket->loss_policy) {
1863  case loss_ticket_stop:
1864  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1865  break;
1866 
1867  case loss_ticket_demote:
1868  // Promotion score will be set to -INFINITY in promotion_order()
1869  if (rsc_ticket->role_lh != RSC_ROLE_MASTER) {
1870  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1871  }
1872  break;
1873 
1874  case loss_ticket_fence:
1875  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
1876  return;
1877  }
1878 
1879  resource_location(rsc_lh, NULL, -INFINITY, "__loss_of_ticket__", data_set);
1880 
1881  for (gIter = rsc_lh->running_on; gIter != NULL; gIter = gIter->next) {
1882  node_t *node = (node_t *) gIter->data;
1883 
1884  pe_fence_node(data_set, node, "deadman ticket was lost");
1885  }
1886  break;
1887 
1888  case loss_ticket_freeze:
1889  if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
1890  return;
1891  }
1892  if (rsc_lh->running_on != NULL) {
1893  clear_bit(rsc_lh->flags, pe_rsc_managed);
1894  set_bit(rsc_lh->flags, pe_rsc_block);
1895  }
1896  break;
1897  }
1898 
1899  } else if (rsc_ticket->ticket->granted == FALSE) {
1900 
1901  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
1902  resource_location(rsc_lh, NULL, -INFINITY, "__no_ticket__", data_set);
1903  }
1904 
1905  } else if (rsc_ticket->ticket->standby) {
1906 
1907  if (rsc_ticket->role_lh != RSC_ROLE_MASTER || rsc_ticket->loss_policy == loss_ticket_stop) {
1908  resource_location(rsc_lh, NULL, -INFINITY, "__ticket_standby__", data_set);
1909  }
1910  }
1911 }
1912 
1913 enum pe_action_flags
1914 native_action_flags(action_t * action, node_t * node)
1915 {
1916  return action->flags;
1917 }
1918 
1919 static inline bool
1920 is_primitive_action(pe_action_t *action)
1921 {
1922  return action && action->rsc && (action->rsc->variant == pe_native);
1923 }
1924 
1936 static void
1937 handle_restart_ordering(pe_action_t *first, pe_action_t *then,
1938  enum pe_action_flags filter)
1939 {
1940  const char *reason = NULL;
1941 
1942  CRM_ASSERT(is_primitive_action(first));
1943  CRM_ASSERT(is_primitive_action(then));
1944 
1945  // We need to update the action in two cases:
1946 
1947  // ... if 'then' is required
1948  if (is_set(filter, pe_action_optional)
1949  && is_not_set(then->flags, pe_action_optional)) {
1950  reason = "restart";
1951  }
1952 
1953  /* ... if 'then' is unrunnable start of managed resource (if a resource
1954  * should restart but can't start, we still want to stop)
1955  */
1956  if (is_set(filter, pe_action_runnable)
1957  && is_not_set(then->flags, pe_action_runnable)
1958  && is_set(then->rsc->flags, pe_rsc_managed)
1959  && safe_str_eq(then->task, RSC_START)) {
1960  reason = "stop";
1961  }
1962 
1963  if (reason == NULL) {
1964  return;
1965  }
1966 
1967  pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
1968  first->uuid, then->uuid, reason);
1969 
1970  // Make 'first' required if it is runnable
1971  if (is_set(first->flags, pe_action_runnable)) {
1972  pe_action_implies(first, then, pe_action_optional);
1973  }
1974 
1975  // Make 'first' required if 'then' is required
1976  if (is_not_set(then->flags, pe_action_optional)) {
1977  pe_action_implies(first, then, pe_action_optional);
1978  }
1979 
1980  // Make 'first' unmigratable if 'then' is unmigratable
1981  if (is_not_set(then->flags, pe_action_migrate_runnable)) {
1983  }
1984 
1985  // Make 'then' unrunnable if 'first' is required but unrunnable
1986  if (is_not_set(first->flags, pe_action_optional)
1987  && is_not_set(first->flags, pe_action_runnable)) {
1988  pe_action_implies(then, first, pe_action_runnable);
1989  }
1990 }
1991 
1992 enum pe_graph_flags
1994  enum pe_action_flags flags, enum pe_action_flags filter,
1995  enum pe_ordering type, pe_working_set_t *data_set)
1996 {
1997  /* flags == get_action_flags(first, then_node) called from update_action() */
1998  enum pe_graph_flags changed = pe_graph_none;
1999  enum pe_action_flags then_flags = then->flags;
2000  enum pe_action_flags first_flags = first->flags;
2001 
2002  crm_trace( "Testing %s on %s (0x%.6x) with %s 0x%.6x",
2003  first->uuid, first->node ? first->node->details->uname : "[none]",
2004  first->flags, then->uuid, then->flags);
2005 
2006  if (type & pe_order_asymmetrical) {
2007  resource_t *then_rsc = then->rsc;
2008  enum rsc_role_e then_rsc_role = then_rsc ? then_rsc->fns->state(then_rsc, TRUE) : 0;
2009 
2010  if (!then_rsc) {
2011  /* ignore */
2012  } else if ((then_rsc_role == RSC_ROLE_STOPPED) && safe_str_eq(then->task, RSC_STOP)) {
2013  /* ignore... if 'then' is supposed to be stopped after 'first', but
2014  * then is already stopped, there is nothing to be done when non-symmetrical. */
2015  } else if ((then_rsc_role >= RSC_ROLE_STARTED)
2016  && safe_str_eq(then->task, RSC_START)
2017  && is_set(then->flags, pe_action_optional)
2018  && then->node
2019  && g_list_length(then_rsc->running_on) == 1
2020  && then->node->details == ((node_t *) then_rsc->running_on->data)->details) {
2021  /* Ignore. If 'then' is supposed to be started after 'first', but
2022  * 'then' is already started, there is nothing to be done when
2023  * asymmetrical -- unless the start is mandatory, which indicates
2024  * the resource is restarting, and the ordering is still needed.
2025  */
2026  } else if (!(first->flags & pe_action_runnable)) {
2027  /* prevent 'then' action from happening if 'first' is not runnable and
2028  * 'then' has not yet occurred. */
2029  pe_action_implies(then, first, pe_action_optional);
2030  pe_action_implies(then, first, pe_action_runnable);
2031 
2032  pe_rsc_trace(then->rsc, "Unset optional and runnable on %s", then->uuid);
2033  } else {
2034  /* ignore... then is allowed to start/stop if it wants to. */
2035  }
2036  }
2037 
2038  if (type & pe_order_implies_first) {
2039  if (is_set(filter, pe_action_optional) && is_not_set(flags /* Should be then_flags? */, pe_action_optional)) {
2040  // Needs is_set(first_flags, pe_action_optional) too?
2041  pe_rsc_trace(first->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2042  pe_action_implies(first, then, pe_action_optional);
2043  }
2044 
2045  if (is_set(flags, pe_action_migrate_runnable) &&
2046  is_set(then->flags, pe_action_migrate_runnable) == FALSE &&
2047  is_set(then->flags, pe_action_optional) == FALSE) {
2048 
2049  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s",
2050  first->uuid, then->uuid);
2052  }
2053  }
2054 
2055  if (type & pe_order_implies_first_master) {
2056  if ((filter & pe_action_optional) &&
2057  ((then->flags & pe_action_optional) == FALSE) &&
2058  then->rsc && (then->rsc->role == RSC_ROLE_MASTER)) {
2059  pe_action_implies(first, then, pe_action_optional);
2060 
2061  if (is_set(first->flags, pe_action_migrate_runnable) &&
2062  is_set(then->flags, pe_action_migrate_runnable) == FALSE) {
2063 
2064  pe_rsc_trace(first->rsc, "Unset migrate runnable on %s because of %s", first->uuid, then->uuid);
2066  }
2067  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", first->uuid, then->uuid);
2068  }
2069  }
2070 
2072  && is_set(filter, pe_action_optional)) {
2073 
2074  if (((then->flags & pe_action_migrate_runnable) == FALSE) ||
2075  ((then->flags & pe_action_runnable) == FALSE)) {
2076 
2077  pe_rsc_trace(then->rsc, "Unset runnable on %s because %s is neither runnable or migratable", first->uuid, then->uuid);
2078  pe_action_implies(first, then, pe_action_runnable);
2079  }
2080 
2081  if ((then->flags & pe_action_optional) == 0) {
2082  pe_rsc_trace(then->rsc, "Unset optional on %s because %s is not optional", first->uuid, then->uuid);
2083  pe_action_implies(first, then, pe_action_optional);
2084  }
2085  }
2086 
2087  if ((type & pe_order_pseudo_left)
2088  && is_set(filter, pe_action_optional)) {
2089 
2090  if ((first->flags & pe_action_runnable) == FALSE) {
2093  pe_rsc_trace(then->rsc, "Unset pseudo on %s because %s is not runnable", then->uuid, first->uuid);
2094  }
2095 
2096  }
2097 
2098  if (is_set(type, pe_order_runnable_left)
2099  && is_set(filter, pe_action_runnable)
2100  && is_set(then->flags, pe_action_runnable)
2101  && is_set(flags, pe_action_runnable) == FALSE) {
2102  pe_rsc_trace(then->rsc, "Unset runnable on %s because of %s", then->uuid, first->uuid);
2103  pe_action_implies(then, first, pe_action_runnable);
2105  }
2106 
2107  if (is_set(type, pe_order_implies_then)
2108  && is_set(filter, pe_action_optional)
2109  && is_set(then->flags, pe_action_optional)
2110  && is_set(flags, pe_action_optional) == FALSE) {
2111 
2112  /* in this case, treat migrate_runnable as if first is optional */
2113  if (is_set(first->flags, pe_action_migrate_runnable) == FALSE) {
2114  pe_rsc_trace(then->rsc, "Unset optional on %s because of %s", then->uuid, first->uuid);
2115  pe_action_implies(then, first, pe_action_optional);
2116  }
2117  }
2118 
2119  if (is_set(type, pe_order_restart)) {
2120  handle_restart_ordering(first, then, filter);
2121  }
2122 
2123  if (then_flags != then->flags) {
2124  changed |= pe_graph_updated_then;
2125  pe_rsc_trace(then->rsc,
2126  "Then: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2127  then->uuid, then->node ? then->node->details->uname : "[none]", then->flags,
2128  then_flags, first->uuid, first->flags);
2129 
2130  if(then->rsc && then->rsc->parent) {
2131  /* "X_stop then X_start" doesn't get handled for cloned groups unless we do this */
2132  update_action(then, data_set);
2133  }
2134  }
2135 
2136  if (first_flags != first->flags) {
2137  changed |= pe_graph_updated_first;
2138  pe_rsc_trace(first->rsc,
2139  "First: Flags for %s on %s are now 0x%.6x (was 0x%.6x) because of %s 0x%.6x",
2140  first->uuid, first->node ? first->node->details->uname : "[none]",
2141  first->flags, first_flags, then->uuid, then->flags);
2142  }
2143 
2144  return changed;
2145 }
2146 
2147 void
2149 {
2150  GListPtr gIter = NULL;
2151  GHashTableIter iter;
2152  node_t *node = NULL;
2153 
2154  if (constraint == NULL) {
2155  pe_err("Constraint is NULL");
2156  return;
2157 
2158  } else if (rsc == NULL) {
2159  pe_err("LHS of rsc_to_node (%s) is NULL", constraint->id);
2160  return;
2161  }
2162 
2163  pe_rsc_trace(rsc, "Applying %s (%s) to %s", constraint->id,
2164  role2text(constraint->role_filter), rsc->id);
2165 
2166  /* take "lifetime" into account */
2167  if (constraint->role_filter > RSC_ROLE_UNKNOWN && constraint->role_filter != rsc->next_role) {
2168  pe_rsc_debug(rsc, "Constraint (%s) is not active (role : %s vs. %s)",
2169  constraint->id, role2text(constraint->role_filter), role2text(rsc->next_role));
2170  return;
2171 
2172  } else if (is_active(constraint) == FALSE) {
2173  pe_rsc_trace(rsc, "Constraint (%s) is not active", constraint->id);
2174  return;
2175  }
2176 
2177  if (constraint->node_list_rh == NULL) {
2178  pe_rsc_trace(rsc, "RHS of constraint %s is NULL", constraint->id);
2179  return;
2180  }
2181 
2182  for (gIter = constraint->node_list_rh; gIter != NULL; gIter = gIter->next) {
2183  node_t *node = (node_t *) gIter->data;
2184  node_t *other_node = NULL;
2185 
2186  other_node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2187 
2188  if (other_node != NULL) {
2189  pe_rsc_trace(rsc, "%s + %s: %d + %d",
2190  node->details->uname,
2191  other_node->details->uname, node->weight, other_node->weight);
2192  other_node->weight = merge_weights(other_node->weight, node->weight);
2193 
2194  } else {
2195  other_node = node_copy(node);
2196 
2197  pe_rsc_trace(rsc, "%s: %d (insert %d)", other_node->details->uname, other_node->weight, constraint->discover_mode);
2198  g_hash_table_insert(rsc->allowed_nodes, (gpointer) other_node->details->id, other_node);
2199  }
2200 
2201  if (other_node->rsc_discover_mode < constraint->discover_mode) {
2202  if (constraint->discover_mode == pe_discover_exclusive) {
2203  rsc->exclusive_discover = TRUE;
2204  }
2205  /* exclusive > never > always... always is default */
2206  other_node->rsc_discover_mode = constraint->discover_mode;
2207  }
2208  }
2209 
2210  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
2211  while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
2212  pe_rsc_trace(rsc, "%s + %s : %d", rsc->id, node->details->uname, node->weight);
2213  }
2214 }
2215 
2216 void
2218 {
2219  GListPtr gIter = NULL;
2220 
2221  CRM_ASSERT(rsc);
2222  pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
2223 
2224  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
2225  action_t *action = (action_t *) gIter->data;
2226 
2227  crm_trace("processing action %d for rsc=%s", action->id, rsc->id);
2228  graph_element_from_action(action, data_set);
2229  }
2230 
2231  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2232  resource_t *child_rsc = (resource_t *) gIter->data;
2233 
2234  child_rsc->cmds->expand(child_rsc, data_set);
2235  }
2236 }
2237 
2238 #define log_change(a, fmt, args...) do { \
2239  if(a && a->reason && terminal) { \
2240  printf(" * "fmt" \tdue to %s\n", ##args, a->reason); \
2241  } else if(a && a->reason) { \
2242  crm_notice(fmt" \tdue to %s", ##args, a->reason); \
2243  } else if(terminal) { \
2244  printf(" * "fmt"\n", ##args); \
2245  } else { \
2246  crm_notice(fmt, ##args); \
2247  } \
2248  } while(0)
2249 
2250 #define STOP_SANITY_ASSERT(lineno) do { \
2251  if(current && current->details->unclean) { \
2252  /* It will be a pseudo op */ \
2253  } else if(stop == NULL) { \
2254  crm_err("%s:%d: No stop action exists for %s", __FUNCTION__, lineno, rsc->id); \
2255  CRM_ASSERT(stop != NULL); \
2256  } else if(is_set(stop->flags, pe_action_optional)) { \
2257  crm_err("%s:%d: Action %s is still optional", __FUNCTION__, lineno, stop->uuid); \
2258  CRM_ASSERT(is_not_set(stop->flags, pe_action_optional)); \
2259  } \
2260  } while(0)
2261 
2262 static int rsc_width = 5;
2263 static int detail_width = 5;
2264 static void
2265 LogAction(const char *change, resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal)
2266 {
2267  int len = 0;
2268  char *reason = NULL;
2269  char *details = NULL;
2270  bool same_host = FALSE;
2271  bool same_role = FALSE;
2272  bool need_role = FALSE;
2273 
2274  CRM_ASSERT(action);
2275  CRM_ASSERT(destination != NULL || origin != NULL);
2276 
2277  if(source == NULL) {
2278  source = action;
2279  }
2280 
2281  len = strlen(rsc->id);
2282  if(len > rsc_width) {
2283  rsc_width = len + 2;
2284  }
2285 
2286  if(rsc->role > RSC_ROLE_STARTED || rsc->next_role > RSC_ROLE_SLAVE) {
2287  need_role = TRUE;
2288  }
2289 
2290  if(origin != NULL && destination != NULL && origin->details == destination->details) {
2291  same_host = TRUE;
2292  }
2293 
2294  if(rsc->role == rsc->next_role) {
2295  same_role = TRUE;
2296  }
2297 
2298  if(need_role && origin == NULL) {
2299  /* Promoting from Stopped */
2300  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname);
2301 
2302  } else if(need_role && destination == NULL) {
2303  /* Demoting a Master or Stopping a Slave */
2304  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2305 
2306  } else if(origin == NULL || destination == NULL) {
2307  /* Starting or stopping a resource */
2308  details = crm_strdup_printf("%s", origin?origin->details->uname:destination->details->uname);
2309 
2310  } else if(need_role && same_role && same_host) {
2311  /* Recovering or restarting a promotable clone resource */
2312  details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname);
2313 
2314  } else if(same_role && same_host) {
2315  /* Recovering or Restarting a normal resource */
2316  details = crm_strdup_printf("%s", origin->details->uname);
2317 
2318  } else if(same_role && need_role) {
2319  /* Moving a promotable clone resource */
2320  details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role));
2321 
2322  } else if(same_role) {
2323  /* Moving a normal resource */
2324  details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname);
2325 
2326  } else if(same_host) {
2327  /* Promoting or demoting a promotable clone resource */
2328  details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname);
2329 
2330  } else {
2331  /* Moving and promoting/demoting */
2332  details = crm_strdup_printf("%s %s -> %s %s", role2text(rsc->role), origin->details->uname, role2text(rsc->next_role), destination->details->uname);
2333  }
2334 
2335  len = strlen(details);
2336  if(len > detail_width) {
2337  detail_width = len;
2338  }
2339 
2340  if(source->reason && is_not_set(action->flags, pe_action_runnable)) {
2341  reason = crm_strdup_printf(" due to %s (blocked)", source->reason);
2342 
2343  } else if(source->reason) {
2344  reason = crm_strdup_printf(" due to %s", source->reason);
2345 
2346  } else if(is_not_set(action->flags, pe_action_runnable)) {
2347  reason = strdup(" blocked");
2348 
2349  } else {
2350  reason = strdup("");
2351  }
2352 
2353  if(terminal) {
2354  printf(" * %-8s %-*s ( %*s ) %s\n", change, rsc_width, rsc->id, detail_width, details, reason);
2355  } else {
2356  crm_notice(" * %-8s %-*s ( %*s ) %s", change, rsc_width, rsc->id, detail_width, details, reason);
2357  }
2358 
2359  free(details);
2360  free(reason);
2361 }
2362 
2363 
2364 void
2365 LogActions(resource_t * rsc, pe_working_set_t * data_set, gboolean terminal)
2366 {
2367  node_t *next = NULL;
2368  node_t *current = NULL;
2369  pe_node_t *start_node = NULL;
2370 
2371  action_t *stop = NULL;
2372  action_t *start = NULL;
2373  action_t *demote = NULL;
2374  action_t *promote = NULL;
2375 
2376  char *key = NULL;
2377  gboolean moving = FALSE;
2378  GListPtr possible_matches = NULL;
2379 
2380  if(rsc->variant == pe_container) {
2381  pcmk__bundle_log_actions(rsc, data_set, terminal);
2382  return;
2383  }
2384 
2385  if (rsc->children) {
2386  GListPtr gIter = NULL;
2387 
2388  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2389  resource_t *child_rsc = (resource_t *) gIter->data;
2390 
2391  LogActions(child_rsc, data_set, terminal);
2392  }
2393  return;
2394  }
2395 
2396  next = rsc->allocated_to;
2397  if (rsc->running_on) {
2398  current = pe__current_node(rsc);
2399  if (rsc->role == RSC_ROLE_STOPPED) {
2400  /*
2401  * This can occur when resources are being recovered
2402  * We fiddle with the current role in native_create_actions()
2403  */
2404  rsc->role = RSC_ROLE_STARTED;
2405  }
2406  }
2407 
2408  if (current == NULL && is_set(rsc->flags, pe_rsc_orphan)) {
2409  /* Don't log stopped orphans */
2410  return;
2411  }
2412 
2413  if (is_not_set(rsc->flags, pe_rsc_managed)
2414  || (current == NULL && next == NULL)) {
2415  pe_rsc_info(rsc, "Leave %s\t(%s%s)",
2416  rsc->id, role2text(rsc->role), is_not_set(rsc->flags,
2417  pe_rsc_managed) ? " unmanaged" : "");
2418  return;
2419  }
2420 
2421  if (current != NULL && next != NULL && safe_str_neq(current->details->id, next->details->id)) {
2422  moving = TRUE;
2423  }
2424 
2425  possible_matches = pe__resource_actions(rsc, next, RSC_START, FALSE);
2426  if (possible_matches) {
2427  start = possible_matches->data;
2428  g_list_free(possible_matches);
2429  }
2430 
2431  if ((start == NULL) || is_not_set(start->flags, pe_action_runnable)) {
2432  start_node = NULL;
2433  } else {
2434  start_node = current;
2435  }
2436  possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, FALSE);
2437  if (possible_matches) {
2438  stop = possible_matches->data;
2439  g_list_free(possible_matches);
2440  }
2441 
2442  possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, FALSE);
2443  if (possible_matches) {
2444  promote = possible_matches->data;
2445  g_list_free(possible_matches);
2446  }
2447 
2448  possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, FALSE);
2449  if (possible_matches) {
2450  demote = possible_matches->data;
2451  g_list_free(possible_matches);
2452  }
2453 
2454  if (rsc->role == rsc->next_role) {
2455  action_t *migrate_op = NULL;
2456 
2457  possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, FALSE);
2458  if (possible_matches) {
2459  migrate_op = possible_matches->data;
2460  }
2461 
2462  CRM_CHECK(next != NULL,);
2463  if (next == NULL) {
2464  } else if (migrate_op && is_set(migrate_op->flags, pe_action_runnable) && current) {
2465  LogAction("Migrate", rsc, current, next, start, NULL, terminal);
2466 
2467  } else if (is_set(rsc->flags, pe_rsc_reload)) {
2468  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2469 
2470  } else if (start == NULL || is_set(start->flags, pe_action_optional)) {
2471  pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role),
2472  next->details->uname);
2473 
2474  } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) {
2475  LogAction("Stop", rsc, current, NULL, stop,
2476  (stop && stop->reason)? stop : start, terminal);
2477  STOP_SANITY_ASSERT(__LINE__);
2478 
2479  } else if (moving && current) {
2480  LogAction(is_set(rsc->flags, pe_rsc_failed) ? "Recover" : "Move",
2481  rsc, current, next, stop, NULL, terminal);
2482 
2483  } else if (is_set(rsc->flags, pe_rsc_failed)) {
2484  LogAction("Recover", rsc, current, NULL, stop, NULL, terminal);
2485  STOP_SANITY_ASSERT(__LINE__);
2486 
2487  } else {
2488  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2489  /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */
2490  }
2491 
2492  g_list_free(possible_matches);
2493  return;
2494  }
2495 
2496  if(stop
2497  && (rsc->next_role == RSC_ROLE_STOPPED
2498  || (start && is_not_set(start->flags, pe_action_runnable)))) {
2499 
2500  GListPtr gIter = NULL;
2501 
2502  key = stop_key(rsc);
2503  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2504  node_t *node = (node_t *) gIter->data;
2505  action_t *stop_op = NULL;
2506 
2507  possible_matches = find_actions(rsc->actions, key, node);
2508  if (possible_matches) {
2509  stop_op = possible_matches->data;
2510  g_list_free(possible_matches);
2511  }
2512 
2513  if (stop_op && (stop_op->flags & pe_action_runnable)) {
2514  STOP_SANITY_ASSERT(__LINE__);
2515  }
2516 
2517  LogAction("Stop", rsc, node, NULL, stop_op,
2518  (stop_op && stop_op->reason)? stop_op : start, terminal);
2519  }
2520 
2521  free(key);
2522 
2523  } else if (stop && is_set(rsc->flags, pe_rsc_failed)) {
2524  /* 'stop' may be NULL if the failure was ignored */
2525  LogAction("Recover", rsc, current, next, stop, start, terminal);
2526  STOP_SANITY_ASSERT(__LINE__);
2527 
2528  } else if (moving) {
2529  LogAction("Move", rsc, current, next, stop, NULL, terminal);
2530  STOP_SANITY_ASSERT(__LINE__);
2531 
2532  } else if (is_set(rsc->flags, pe_rsc_reload)) {
2533  LogAction("Reload", rsc, current, next, start, NULL, terminal);
2534 
2535  } else if (stop != NULL && is_not_set(stop->flags, pe_action_optional)) {
2536  LogAction("Restart", rsc, current, next, start, NULL, terminal);
2537  STOP_SANITY_ASSERT(__LINE__);
2538 
2539  } else if (rsc->role == RSC_ROLE_MASTER) {
2540  CRM_LOG_ASSERT(current != NULL);
2541  LogAction("Demote", rsc, current, next, demote, NULL, terminal);
2542 
2543  } else if(rsc->next_role == RSC_ROLE_MASTER) {
2544  CRM_LOG_ASSERT(next);
2545  LogAction("Promote", rsc, current, next, promote, NULL, terminal);
2546 
2547  } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) {
2548  LogAction("Start", rsc, current, next, start, NULL, terminal);
2549  }
2550 }
2551 
2552 gboolean
2553 StopRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2554 {
2555  GListPtr gIter = NULL;
2556 
2557  CRM_ASSERT(rsc);
2558  pe_rsc_trace(rsc, "%s", rsc->id);
2559 
2560  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2561  node_t *current = (node_t *) gIter->data;
2562  action_t *stop;
2563 
2564  if (rsc->partial_migration_target) {
2565  if (rsc->partial_migration_target->details == current->details) {
2566  pe_rsc_trace(rsc, "Filtered %s -> %s %s", current->details->uname,
2567  next->details->uname, rsc->id);
2568  continue;
2569  } else {
2570  pe_rsc_trace(rsc, "Forced on %s %s", current->details->uname, rsc->id);
2571  optional = FALSE;
2572  }
2573  }
2574 
2575  pe_rsc_trace(rsc, "%s on %s", rsc->id, current->details->uname);
2576  stop = stop_action(rsc, current, optional);
2577 
2578  if(rsc->allocated_to == NULL) {
2579  pe_action_set_reason(stop, "node availability", TRUE);
2580  }
2581 
2582  if (is_not_set(rsc->flags, pe_rsc_managed)) {
2583  update_action_flags(stop, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
2584  }
2585 
2586  if (is_set(data_set->flags, pe_flag_remove_after_stop)) {
2587  DeleteRsc(rsc, current, optional, data_set);
2588  }
2589 
2590  if(is_set(rsc->flags, pe_rsc_needs_unfencing)) {
2591  action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, data_set);
2592 
2593  order_actions(stop, unfence, pe_order_implies_first);
2594  if (!node_has_been_unfenced(current)) {
2595  pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
2596  }
2597  }
2598  }
2599 
2600  return TRUE;
2601 }
2602 
2603 static void
2604 order_after_unfencing(resource_t *rsc, pe_node_t *node, action_t *action,
2605  enum pe_ordering order, pe_working_set_t *data_set)
2606 {
2607  /* When unfencing is in use, we order unfence actions before any probe or
2608  * start of resources that require unfencing, and also of fence devices.
2609  *
2610  * This might seem to violate the principle that fence devices require
2611  * only quorum. However, fence agents that unfence often don't have enough
2612  * information to even probe or start unless the node is first unfenced.
2613  */
2614  if (is_unfence_device(rsc, data_set)
2615  || is_set(rsc->flags, pe_rsc_needs_unfencing)) {
2616 
2617  /* Start with an optional ordering. Requiring unfencing would result in
2618  * the node being unfenced, and all its resources being stopped,
2619  * whenever a new resource is added -- which would be highly suboptimal.
2620  */
2621  action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, data_set);
2622 
2623  order_actions(unfence, action, order);
2624 
2625  if (!node_has_been_unfenced(node)) {
2626  // But unfencing is required if it has never been done
2627  char *reason = crm_strdup_printf("required by %s %s",
2628  rsc->id, action->task);
2629 
2630  trigger_unfencing(NULL, node, reason, NULL, data_set);
2631  free(reason);
2632  }
2633  }
2634 }
2635 
2636 gboolean
2637 StartRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2638 {
2639  action_t *start = NULL;
2640 
2641  CRM_ASSERT(rsc);
2642  pe_rsc_trace(rsc, "%s on %s %d %d", rsc->id, next ? next->details->uname : "N/A", optional, next ? next->weight : 0);
2643  start = start_action(rsc, next, TRUE);
2644 
2645  order_after_unfencing(rsc, next, start, pe_order_implies_then, data_set);
2646 
2647  if (is_set(start->flags, pe_action_runnable) && optional == FALSE) {
2648  update_action_flags(start, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
2649  }
2650 
2651 
2652  return TRUE;
2653 }
2654 
2655 gboolean
2656 PromoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2657 {
2658  GListPtr gIter = NULL;
2659  gboolean runnable = TRUE;
2660  GListPtr action_list = NULL;
2661 
2662  CRM_ASSERT(rsc);
2663  CRM_CHECK(next != NULL, return FALSE);
2664  pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
2665 
2666  action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
2667 
2668  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2669  action_t *start = (action_t *) gIter->data;
2670 
2671  if (is_set(start->flags, pe_action_runnable) == FALSE) {
2672  runnable = FALSE;
2673  }
2674  }
2675  g_list_free(action_list);
2676 
2677  if (runnable) {
2678  promote_action(rsc, next, optional);
2679  return TRUE;
2680  }
2681 
2682  pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
2683 
2684  action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
2685 
2686  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
2687  action_t *promote = (action_t *) gIter->data;
2688 
2689  update_action_flags(promote, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
2690  }
2691 
2692  g_list_free(action_list);
2693  return TRUE;
2694 }
2695 
2696 gboolean
2697 DemoteRsc(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2698 {
2699  GListPtr gIter = NULL;
2700 
2701  CRM_ASSERT(rsc);
2702  pe_rsc_trace(rsc, "%s", rsc->id);
2703 
2704 /* CRM_CHECK(rsc->next_role == RSC_ROLE_SLAVE, return FALSE); */
2705  for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
2706  node_t *current = (node_t *) gIter->data;
2707 
2708  pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
2709  demote_action(rsc, current, optional);
2710  }
2711  return TRUE;
2712 }
2713 
2714 gboolean
2715 RoleError(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2716 {
2717  CRM_ASSERT(rsc);
2718  crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
2719  CRM_CHECK(FALSE, return FALSE);
2720  return FALSE;
2721 }
2722 
2723 gboolean
2724 NullOp(resource_t * rsc, node_t * next, gboolean optional, pe_working_set_t * data_set)
2725 {
2726  CRM_ASSERT(rsc);
2727  pe_rsc_trace(rsc, "%s", rsc->id);
2728  return FALSE;
2729 }
2730 
2731 gboolean
2732 DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set)
2733 {
2734  if (is_set(rsc->flags, pe_rsc_failed)) {
2735  pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
2736  return FALSE;
2737 
2738  } else if (node == NULL) {
2739  pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
2740  return FALSE;
2741 
2742  } else if (node->details->unclean || node->details->online == FALSE) {
2743  pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
2744  node->details->uname);
2745  return FALSE;
2746  }
2747 
2748  crm_notice("Removing %s from %s", rsc->id, node->details->uname);
2749 
2750  delete_action(rsc, node, optional);
2751 
2752  new_rsc_order(rsc, RSC_STOP, rsc, RSC_DELETE,
2753  optional ? pe_order_implies_then : pe_order_optional, data_set);
2754 
2755  new_rsc_order(rsc, RSC_DELETE, rsc, RSC_START,
2756  optional ? pe_order_implies_then : pe_order_optional, data_set);
2757 
2758  return TRUE;
2759 }
2760 
2761 gboolean
2762 native_create_probe(resource_t * rsc, node_t * node, action_t * complete,
2763  gboolean force, pe_working_set_t * data_set)
2764 {
2766  char *key = NULL;
2767  action_t *probe = NULL;
2768  node_t *running = NULL;
2769  node_t *allowed = NULL;
2770  resource_t *top = uber_parent(rsc);
2771 
2772  static const char *rc_master = NULL;
2773  static const char *rc_inactive = NULL;
2774 
2775  if (rc_inactive == NULL) {
2776  rc_inactive = crm_itoa(PCMK_OCF_NOT_RUNNING);
2777  rc_master = crm_itoa(PCMK_OCF_RUNNING_MASTER);
2778  }
2779 
2780  CRM_CHECK(node != NULL, return FALSE);
2781  if (force == FALSE && is_not_set(data_set->flags, pe_flag_startup_probes)) {
2782  pe_rsc_trace(rsc, "Skipping active resource detection for %s", rsc->id);
2783  return FALSE;
2784  }
2785 
2786  if (pe__is_guest_or_remote_node(node)) {
2787  const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
2788 
2790  pe_rsc_trace(rsc,
2791  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot run stonith agents",
2792  rsc->id, node->details->id);
2793  return FALSE;
2794  } else if (pe__is_guest_node(node)
2795  && pe__resource_contains_guest_node(data_set, rsc)) {
2796  pe_rsc_trace(rsc,
2797  "Skipping probe for %s on %s because guest nodes cannot run resources containing guest nodes",
2798  rsc->id, node->details->id);
2799  return FALSE;
2800  } else if (rsc->is_remote_node) {
2801  pe_rsc_trace(rsc,
2802  "Skipping probe for %s on %s because Pacemaker Remote nodes cannot host remote connections",
2803  rsc->id, node->details->id);
2804  return FALSE;
2805  }
2806  }
2807 
2808  if (rsc->children) {
2809  GListPtr gIter = NULL;
2810  gboolean any_created = FALSE;
2811 
2812  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
2813  resource_t *child_rsc = (resource_t *) gIter->data;
2814 
2815  any_created = child_rsc->cmds->create_probe(child_rsc, node, complete, force, data_set)
2816  || any_created;
2817  }
2818 
2819  return any_created;
2820 
2821  } else if ((rsc->container) && (!rsc->is_remote_node)) {
2822  pe_rsc_trace(rsc, "Skipping %s: it is within container %s", rsc->id, rsc->container->id);
2823  return FALSE;
2824  }
2825 
2826  if (is_set(rsc->flags, pe_rsc_orphan)) {
2827  pe_rsc_trace(rsc, "Skipping orphan: %s", rsc->id);
2828  return FALSE;
2829  }
2830 
2831  // Check whether resource is already known on node
2832  if (!force && g_hash_table_lookup(rsc->known_on, node->details->id)) {
2833  pe_rsc_trace(rsc, "Skipping known: %s on %s", rsc->id, node->details->uname);
2834  return FALSE;
2835  }
2836 
2837  allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
2838 
2839  if (rsc->exclusive_discover || top->exclusive_discover) {
2840  if (allowed == NULL) {
2841  /* exclusive discover is enabled and this node is not in the allowed list. */
2842  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, A", rsc->id, node->details->id);
2843  return FALSE;
2844  } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
2845  /* exclusive discover is enabled and this node is not marked
2846  * as a node this resource should be discovered on */
2847  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, B", rsc->id, node->details->id);
2848  return FALSE;
2849  }
2850  }
2851 
2852  if(allowed == NULL && node->rsc_discover_mode == pe_discover_never) {
2853  /* If this node was allowed to host this resource it would
2854  * have been explicitly added to the 'allowed_nodes' list.
2855  * However it wasn't and the node has discovery disabled, so
2856  * no need to probe for this resource.
2857  */
2858  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, C", rsc->id, node->details->id);
2859  return FALSE;
2860  }
2861 
2862  if (allowed && allowed->rsc_discover_mode == pe_discover_never) {
2863  /* this resource is marked as not needing to be discovered on this node */
2864  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, discovery mode", rsc->id, node->details->id);
2865  return FALSE;
2866  }
2867 
2868  if (pe__is_guest_node(node)) {
2869  resource_t *remote = node->details->remote_rsc->container;
2870 
2871  if(remote->role == RSC_ROLE_STOPPED) {
2872  /* If the container is stopped, then we know anything that
2873  * might have been inside it is also stopped and there is
2874  * no need to probe.
2875  *
2876  * If we don't know the container's state on the target
2877  * either:
2878  *
2879  * - the container is running, the transition will abort
2880  * and we'll end up in a different case next time, or
2881  *
2882  * - the container is stopped
2883  *
2884  * Either way there is no need to probe.
2885  *
2886  */
2887  if(remote->allocated_to
2888  && g_hash_table_lookup(remote->known_on, remote->allocated_to->details->id) == NULL) {
2889  /* For safety, we order the 'rsc' start after 'remote'
2890  * has been probed.
2891  *
2892  * Using 'top' helps for groups, but we may need to
2893  * follow the start's ordering chain backwards.
2894  */
2895  custom_action_order(remote, generate_op_key(remote->id, RSC_STATUS, 0), NULL,
2896  top, generate_op_key(top->id, RSC_START, 0), NULL,
2897  pe_order_optional, data_set);
2898  }
2899  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopped",
2900  rsc->id, node->details->id, remote->id);
2901  return FALSE;
2902 
2903  /* Here we really we want to check if remote->stop is required,
2904  * but that information doesn't exist yet
2905  */
2906  } else if(node->details->remote_requires_reset
2907  || node->details->unclean
2908  || is_set(remote->flags, pe_rsc_failed)
2909  || remote->next_role == RSC_ROLE_STOPPED
2910  || (remote->allocated_to
2911  && pe_find_node(remote->running_on, remote->allocated_to->details->uname) == NULL)
2912  ) {
2913  /* The container is stopping or restarting, don't start
2914  * 'rsc' until 'remote' stops as this also implies that
2915  * 'rsc' is stopped - avoiding the need to probe
2916  */
2917  custom_action_order(remote, generate_op_key(remote->id, RSC_STOP, 0), NULL,
2918  top, generate_op_key(top->id, RSC_START, 0), NULL,
2919  pe_order_optional, data_set);
2920  pe_rsc_trace(rsc, "Skipping probe for %s on node %s, %s is stopping, restarting or moving",
2921  rsc->id, node->details->id, remote->id);
2922  return FALSE;
2923 /* } else {
2924  * The container is running so there is no problem probing it
2925  */
2926  }
2927  }
2928 
2929  key = generate_op_key(rsc->id, RSC_STATUS, 0);
2930  probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE, data_set);
2931  update_action_flags(probe, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__);
2932 
2933  order_after_unfencing(rsc, node, probe, pe_order_optional, data_set);
2934 
2935  /*
2936  * We need to know if it's running_on (not just known_on) this node
2937  * to correctly determine the target rc.
2938  */
2939  running = pe_find_node_id(rsc->running_on, node->details->id);
2940  if (running == NULL) {
2941  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_inactive);
2942 
2943  } else if (rsc->role == RSC_ROLE_MASTER) {
2944  add_hash_param(probe->meta, XML_ATTR_TE_TARGET_RC, rc_master);
2945  }
2946 
2947  crm_debug("Probing %s on %s (%s) %d %p", rsc->id, node->details->uname, role2text(rsc->role),
2948  is_set(probe->flags, pe_action_runnable), rsc->running_on);
2949 
2950  if (is_unfence_device(rsc, data_set) || !pe_rsc_is_clone(top)) {
2951  top = rsc;
2952  } else {
2953  crm_trace("Probing %s on %s (%s) as %s", rsc->id, node->details->uname, role2text(rsc->role), top->id);
2954  }
2955 
2956  if(is_not_set(probe->flags, pe_action_runnable) && rsc->running_on == NULL) {
2957  /* Prevent the start from occurring if rsc isn't active, but
2958  * don't cause it to stop if it was active already
2959  */
2960  flags |= pe_order_runnable_left;
2961  }
2962 
2963  custom_action_order(rsc, NULL, probe,
2964  top, generate_op_key(top->id, RSC_START, 0), NULL,
2965  flags, data_set);
2966 
2967  /* Before any reloads, if they exist */
2968  custom_action_order(rsc, NULL, probe,
2969  top, reload_key(rsc), NULL,
2970  pe_order_optional, data_set);
2971 
2972 #if 0
2973  // complete is always null currently
2974  if (!is_unfence_device(rsc, data_set)) {
2975  /* Normally rsc.start depends on probe complete which depends
2976  * on rsc.probe. But this can't be the case for fence devices
2977  * with unfencing, as it would create graph loops.
2978  *
2979  * So instead we explicitly order 'rsc.probe then rsc.start'
2980  */
2981  order_actions(probe, complete, pe_order_implies_then);
2982  }
2983 #endif
2984  return TRUE;
2985 }
2986 
2996 static bool
2997 rsc_is_known_on(pe_resource_t *rsc, const pe_node_t *node)
2998 {
2999  if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
3000  return TRUE;
3001 
3002  } else if ((rsc->variant == pe_native)
3003  && pe_rsc_is_anon_clone(rsc->parent)
3004  && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
3005  /* We check only the parent, not the uber-parent, because we cannot
3006  * assume that the resource is known if it is in an anonymously cloned
3007  * group (which may be only partially known).
3008  */
3009  return TRUE;
3010  }
3011  return FALSE;
3012 }
3013 
3022 static void
3023 native_start_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
3024 {
3025  node_t *target;
3026  GListPtr gIter = NULL;
3027 
3028  CRM_CHECK(stonith_op && stonith_op->node, return);
3029  target = stonith_op->node;
3030 
3031  for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
3032  action_t *action = (action_t *) gIter->data;
3033 
3034  switch (action->needs) {
3035  case rsc_req_nothing:
3036  // Anything other than start or promote requires nothing
3037  break;
3038 
3039  case rsc_req_stonith:
3040  order_actions(stonith_op, action, pe_order_optional);
3041  break;
3042 
3043  case rsc_req_quorum:
3044  if (safe_str_eq(action->task, RSC_START)
3045  && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
3046  && !rsc_is_known_on(rsc, target)) {
3047 
3048  /* If we don't know the status of the resource on the node
3049  * we're about to shoot, we have to assume it may be active
3050  * there. Order the resource start after the fencing. This
3051  * is analogous to waiting for all the probes for a resource
3052  * to complete before starting it.
3053  *
3054  * The most likely explanation is that the DC died and took
3055  * its status with it.
3056  */
3057  pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
3058  target->details->uname);
3059  order_actions(stonith_op, action,
3061  }
3062  break;
3063  }
3064  }
3065 }
3066 
3067 static void
3068 native_stop_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set)
3069 {
3070  GListPtr gIter = NULL;
3071  GListPtr action_list = NULL;
3072  bool order_implicit = FALSE;
3073 
3074  resource_t *top = uber_parent(rsc);
3075  node_t *target;
3076 
3077  CRM_CHECK(stonith_op && stonith_op->node, return);
3078  target = stonith_op->node;
3079 
3080  /* Get a list of stop actions potentially implied by the fencing */
3081  action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
3082 
3083  // If resource requires fencing, implicit actions must occur after fencing
3084  if (is_set(rsc->flags, pe_rsc_needs_fencing)) {
3085  order_implicit = TRUE;
3086  }
3087 
3088  /* Implied stops and demotes of resources running on guest nodes are always
3089  * ordered after fencing, even if the resource does not require fencing,
3090  * because guest node "fencing" is actually just a resource stop.
3091  */
3092  if (pe__is_guest_node(target)) {
3093  order_implicit = TRUE;
3094  }
3095 
3096  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3097  action_t *action = (action_t *) gIter->data;
3098 
3099  // The stop would never complete, so convert it into a pseudo-action.
3100  update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__);
3101  update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__);
3102 
3103  if (order_implicit) {
3105  action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
3106 
3107  if (is_set(rsc->flags, pe_rsc_failed)) {
3108  crm_notice("Stop of failed resource %s is implicit after %s is fenced",
3109  rsc->id, target->details->uname);
3110  } else {
3111  crm_info("%s is implicit after %s is fenced",
3112  action->uuid, target->details->uname);
3113  }
3115  __FUNCTION__, __LINE__);
3116 
3117  if (target->details->remote_rsc) {
3118  /* User constraints must not order a resource in a guest node
3119  * relative to the guest node container resource. This flag
3120  * marks constraints as generated by the cluster and thus
3121  * immune to that check.
3122  */
3123  flags |= pe_order_preserve;
3124  }
3125  if (pe_rsc_is_bundled(rsc) == FALSE) {
3126  order_actions(stonith_op, action, flags);
3127  }
3128  order_actions(stonith_op, parent_stop, flags);
3129  } else {
3130  if (is_set(rsc->flags, pe_rsc_failed)) {
3131  crm_notice("Stop of failed resource %s is implicit because %s will be fenced",
3132  rsc->id, target->details->uname);
3133  } else {
3134  crm_info("%s is implicit because %s will be fenced",
3135  action->uuid, target->details->uname);
3136  }
3137  }
3138 
3139  if (is_set(rsc->flags, pe_rsc_notify)) {
3140  /* Create a second notification that will be delivered
3141  * immediately after the node is fenced
3142  *
3143  * Basic problem:
3144  * - C is a clone active on the node to be shot and stopping on another
3145  * - R is a resource that depends on C
3146  *
3147  * + C.stop depends on R.stop
3148  * + C.stopped depends on STONITH
3149  * + C.notify depends on C.stopped
3150  * + C.healthy depends on C.notify
3151  * + R.stop depends on C.healthy
3152  *
3153  * The extra notification here changes
3154  * + C.healthy depends on C.notify
3155  * into:
3156  * + C.healthy depends on C.notify'
3157  * + C.notify' depends on STONITH'
3158  * thus breaking the loop
3159  */
3160  create_secondary_notification(action, rsc, stonith_op, data_set);
3161  }
3162 
3163 /* From Bug #1601, successful fencing must be an input to a failed resources stop action.
3164 
3165  However given group(rA, rB) running on nodeX and B.stop has failed,
3166  A := stop healthy resource (rA.stop)
3167  B := stop failed resource (pseudo operation B.stop)
3168  C := stonith nodeX
3169  A requires B, B requires C, C requires A
3170  This loop would prevent the cluster from making progress.
3171 
3172  This block creates the "C requires A" dependency and therefore must (at least
3173  for now) be disabled.
3174 
3175  Instead, run the block above and treat all resources on nodeX as B would be
3176  (marked as a pseudo op depending on the STONITH).
3177 
3178  TODO: Break the "A requires B" dependency in update_action() and re-enable this block
3179 
3180  } else if(is_stonith == FALSE) {
3181  crm_info("Moving healthy resource %s"
3182  " off %s before fencing",
3183  rsc->id, node->details->uname);
3184 
3185  * stop healthy resources before the
3186  * stonith op
3187  *
3188  custom_action_order(
3189  rsc, stop_key(rsc), NULL,
3190  NULL,strdup(CRM_OP_FENCE),stonith_op,
3191  pe_order_optional, data_set);
3192 */
3193  }
3194 
3195  g_list_free(action_list);
3196 
3197  /* Get a list of demote actions potentially implied by the fencing */
3198  action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
3199 
3200  for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
3201  action_t *action = (action_t *) gIter->data;
3202 
3203  if (action->node->details->online == FALSE || action->node->details->unclean == TRUE
3204  || is_set(rsc->flags, pe_rsc_failed)) {
3205 
3206  if (is_set(rsc->flags, pe_rsc_failed)) {
3207  pe_rsc_info(rsc,
3208  "Demote of failed resource %s is implicit after %s is fenced",
3209  rsc->id, target->details->uname);
3210  } else {
3211  pe_rsc_info(rsc, "%s is implicit after %s is fenced",
3212  action->uuid, target->details->uname);
3213  }
3214 
3215  /* The demote would never complete and is now implied by the
3216  * fencing, so convert it into a pseudo-action.
3217  */
3218  update_action_flags(action, pe_action_pseudo, __FUNCTION__, __LINE__);
3219  update_action_flags(action, pe_action_runnable, __FUNCTION__, __LINE__);
3220 
3221  if (pe_rsc_is_bundled(rsc)) {
3222  /* Do nothing, let the recovery be ordered after the parent's implied stop */
3223 
3224  } else if (order_implicit) {
3225  order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
3226  }
3227  }
3228  }
3229 
3230  g_list_free(action_list);
3231 }
3232 
3233 void
3235 {
3236  if (rsc->children) {
3237  GListPtr gIter = NULL;
3238 
3239  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3240  resource_t *child_rsc = (resource_t *) gIter->data;
3241 
3242  rsc_stonith_ordering(child_rsc, stonith_op, data_set);
3243  }
3244 
3245  } else if (is_not_set(rsc->flags, pe_rsc_managed)) {
3246  pe_rsc_trace(rsc, "Skipping fencing constraints for unmanaged resource: %s", rsc->id);
3247 
3248  } else {
3249  native_start_constraints(rsc, stonith_op, data_set);
3250  native_stop_constraints(rsc, stonith_op, data_set);
3251  }
3252 }
3253 
3254 void
3255 ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set)
3256 {
3257  GListPtr gIter = NULL;
3258  action_t *reload = NULL;
3259 
3260  if (rsc->children) {
3261  for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
3262  resource_t *child_rsc = (resource_t *) gIter->data;
3263 
3264  ReloadRsc(child_rsc, node, data_set);
3265  }
3266  return;
3267 
3268  } else if (rsc->variant > pe_native) {
3269  /* Complex resource with no children */
3270  return;
3271 
3272  } else if (is_not_set(rsc->flags, pe_rsc_managed)) {
3273  pe_rsc_trace(rsc, "%s: unmanaged", rsc->id);
3274  return;
3275 
3276  } else if (is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending)) {
3277  pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags);
3278  stop_action(rsc, node, FALSE); /* Force a full restart, overkill? */
3279  return;
3280 
3281  } else if (node == NULL) {
3282  pe_rsc_trace(rsc, "%s: not active", rsc->id);
3283  return;
3284  }
3285 
3286  pe_rsc_trace(rsc, "Processing %s", rsc->id);
3287  set_bit(rsc->flags, pe_rsc_reload);
3288 
3289  reload = custom_action(
3290  rsc, reload_key(rsc), CRMD_ACTION_RELOAD, node, FALSE, TRUE, data_set);
3291  pe_action_set_reason(reload, "resource definition change", FALSE);
3292 
3293  custom_action_order(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
3295  data_set);
3296  custom_action_order(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
3298  data_set);
3299 }
3300 
3301 void
3302 native_append_meta(resource_t * rsc, xmlNode * xml)
3303 {
3304  char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
3305  resource_t *parent;
3306 
3307  if (value) {
3308  char *name = NULL;
3309 
3311  crm_xml_add(xml, name, value);
3312  free(name);
3313  }
3314 
3315  value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
3316  if (value) {
3317  char *name = NULL;
3318 
3320  crm_xml_add(xml, name, value);
3321  free(name);
3322  }
3323 
3324  for (parent = rsc; parent != NULL; parent = parent->parent) {
3325  if (parent->container) {
3327  }
3328  }
3329 }
Services API.
pe_action_t * pe_cancel_op(pe_resource_t *rsc, const char *name, guint interval_ms, pe_node_t *node, pe_working_set_t *data_set)
#define LOG_TRACE
Definition: logging.h:26
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:156
GListPtr nodes
Definition: pe_types.h:133
enum rsc_role_e role_filter
Definition: internal.h:30
enum rsc_start_requirement needs
Definition: pe_types.h:375
enum pe_quorum_policy no_quorum_policy
Definition: pe_types.h:125
#define RSC_STOP
Definition: crm.h:177
enum pe_action_flags(* action_flags)(action_t *, node_t *)
#define crm_notice(fmt, args...)
Definition: logging.h:242
GHashTable * known_on
Definition: pe_types.h:330
void native_expand(resource_t *rsc, pe_working_set_t *data_set)
xmlNode * ops_xml
Definition: pe_types.h:288
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:17
gboolean unseen
Definition: pe_types.h:188
gboolean safe_str_neq(const char *a, const char *b)
Definition: strings.c:157
#define INFINITY
Definition: crm.h:73
gboolean PromoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
void native_internal_constraints(resource_t *rsc, pe_working_set_t *data_set)
#define LOAD_STOPPED
gboolean NullOp(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
gboolean is_active(pe__location_t *cons)
GListPtr dangling_migrations
Definition: pe_types.h:341
#define promote_action(rsc, node, optional)
Definition: internal.h:226
enum rsc_role_e(* state)(const pe_resource_t *, gboolean)
Definition: pe_types.h:51
GList * sort_nodes_by_weight(GList *nodes, pe_node_t *active_node, pe_working_set_t *data_set)
node_t * node_copy(const node_t *this_node)
Definition: utils.c:118
#define stop_action(rsc, node, optional)
Definition: internal.h:210
pe_node_t * pe__find_active_on(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: complex.c:842
pe_resource_t * container
Definition: pe_types.h:343
pe_node_t * partial_migration_source
Definition: pe_types.h:328
void(* expand)(resource_t *, pe_working_set_t *)
enum rsc_role_e role
Definition: pe_types.h:333
gboolean StopRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
resource_alloc_functions_t * cmds
Definition: pe_types.h:296
#define crm_config_err(fmt...)
Definition: crm_internal.h:179
gboolean standby
Definition: pe_types.h:415
#define pe_action_implies(action, reason, flag)
Definition: internal.h:329
#define delete_action(rsc, node, optional)
Definition: internal.h:200
#define pe_flag_remove_after_stop
Definition: pe_types.h:101
pe_resource_t * rsc
Definition: pe_types.h:365
#define XML_RSC_ATTR_INCARNATION
Definition: msg_xml.h:186
enum rsc_role_e next_role
Definition: pe_types.h:334
action_t * pe_fence_op(node_t *node, const char *op, bool optional, const char *reason, pe_working_set_t *data_set)
Definition: utils.c:2193
#define INFINITY_HACK
gboolean exclusive_discover
Definition: pe_types.h:315
#define reload_key(rsc)
Definition: internal.h:214
pe_resource_t * remote_rsc
Definition: pe_types.h:200
GHashTable * meta
Definition: pe_types.h:336
GListPtr find_actions(GListPtr input, const char *key, const node_t *on_node)
Definition: utils.c:1428
#define pe_rsc_unique
Definition: pe_types.h:223
#define pe_rsc_notify
Definition: pe_types.h:222
void pcmk__bundle_log_actions(pe_resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
resource_t * rsc_rh
char * score2char_stack(int score, char *buf, size_t len)
Definition: utils.c:237
void(* rsc_colocation_lh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
resource_object_functions_t * fns
Definition: pe_types.h:295
#define XML_LRM_ATTR_INTERVAL
Definition: msg_xml.h:254
#define RSC_DELETE
Definition: crm.h:168
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:275
gboolean pe__is_guest_or_remote_node(pe_node_t *node)
Definition: remote.c:58
void native_create_actions(resource_t *rsc, pe_working_set_t *data_set)
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:142
pe_node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:412
enum pe_graph_flags native_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node, enum pe_action_flags flags, enum pe_action_flags filter, enum pe_ordering type, pe_working_set_t *data_set)
#define demote_key(rsc)
Definition: internal.h:235
#define clear_bit(word, bit)
Definition: crm_internal.h:168
guint crm_parse_interval_spec(const char *input)
Definition: utils.c:542
void rsc_ticket_constraint(resource_t *lh_rsc, rsc_ticket_t *rsc_ticket, pe_working_set_t *data_set)
void native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
GListPtr rsc_cons
Definition: pe_types.h:320
pe_node_t * partial_migration_target
Definition: pe_types.h:327
gboolean show_scores
#define RSC_START
Definition: crm.h:174
action_t * get_pseudo_op(const char *name, pe_working_set_t *data_set)
Definition: utils.c:1828
pe_node_t * allocated_to
Definition: pe_types.h:326
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:232
#define pe_flag_have_quorum
Definition: pe_types.h:87
#define pe_proc_err(fmt...)
Definition: internal.h:22
gboolean remote_requires_reset
Definition: pe_types.h:194
char * reason
Definition: pe_types.h:372
gboolean native_assign_node(resource_t *rsc, GListPtr candidates, node_t *chosen, gboolean force)
pe_node_t * pe_find_node_id(GListPtr node_list, const char *id)
Definition: status.c:396
void create_secondary_notification(pe_action_t *action, resource_t *rsc, pe_action_t *stonith_op, pe_working_set_t *data_set)
#define RSC_MIGRATE
Definition: crm.h:171
char * crm_meta_name(const char *field)
Definition: utils.c:734
void(* rsc_colocation_rh)(pe_resource_t *, pe_resource_t *, rsc_colocation_t *, pe_working_set_t *)
#define pe_flag_stop_everything
Definition: pe_types.h:98
#define demote_action(rsc, node, optional)
Definition: internal.h:236
void native_rsc_colocation_lh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, rsc_colocation_t *constraint, pe_working_set_t *data_set)
void native_append_meta(resource_t *rsc, xmlNode *xml)
#define pe_rsc_provisional
Definition: pe_types.h:227
const char * role2text(enum rsc_role_e role)
Definition: common.c:329
#define CRM_ATTR_UNFENCED
Definition: crm.h:95
gboolean rsc_colocation_new(const char *id, const char *node_attr, int score, resource_t *rsc_lh, resource_t *rsc_rh, const char *state_lh, const char *state_rh, pe_working_set_t *data_set)
void native_rsc_colocation_rh_must(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh)
void process_utilization(resource_t *rsc, node_t **prefer, pe_working_set_t *data_set)
int weight
Definition: pe_types.h:210
#define pe_rsc_merging
Definition: pe_types.h:229
enum pe_discover_e discover_mode
Definition: internal.h:31
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
Definition: utils.c:2403
#define set_bit(word, bit)
Definition: crm_internal.h:167
pe_action_flags
Definition: pe_types.h:258
#define pe_rsc_allow_migrate
Definition: pe_types.h:240
#define pe_rsc_failed
Definition: pe_types.h:234
gboolean pe__is_guest_node(pe_node_t *node)
Definition: remote.c:47
#define crm_debug(fmt, args...)
Definition: logging.h:245
pe_resource_t * uber_parent(pe_resource_t *rsc)
Definition: complex.c:746
#define XML_RSC_ATTR_CONTAINER
Definition: msg_xml.h:205
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:423
const char * node_attribute
#define stop_key(rsc)
Definition: internal.h:209
#define pe_rsc_start_pending
Definition: pe_types.h:236
char * task
Definition: pe_types.h:369
gboolean update_action(pe_action_t *action, pe_working_set_t *data_set)
#define CRM_ATTR_UNAME
Definition: crm.h:88
#define crm_trace(fmt, args...)
Definition: logging.h:246
#define do_crm_log(level, fmt, args...)
Log a message.
Definition: logging.h:121
action_t * find_first_action(GListPtr input, const char *uuid, const char *task, node_t *on_node)
Definition: utils.c:1398
#define promote_key(rsc)
Definition: internal.h:225
GHashTable * meta
Definition: pe_types.h:379
pe_resource_t * pe__resource_contains_guest_node(const pe_working_set_t *data_set, const pe_resource_t *rsc)
Definition: remote.c:76
struct pe_node_shared_s * details
Definition: pe_types.h:213
GListPtr running_on
Definition: pe_types.h:329
enum rsc_recovery_type recovery_type
Definition: pe_types.h:298
pe_node_t * node
Definition: pe_types.h:366
GHashTable * rsc_merge_weights(resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, enum pe_weights flags)
filter_colocation_res
enum loss_ticket_policy_e loss_policy
#define pe_rsc_needs_fencing
Definition: pe_types.h:247
gboolean DemoteRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
unsigned long long flags
Definition: pe_types.h:311
const char * uname
Definition: pe_types.h:179
#define pe_rsc_promotable
Definition: pe_types.h:225
void LogActions(resource_t *rsc, pe_working_set_t *data_set, gboolean terminal)
void pe_fence_node(pe_working_set_t *data_set, node_t *node, const char *reason)
Schedule a fence action for a node.
Definition: unpack.c:70
gboolean StartRsc(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
gboolean RoleError(resource_t *rsc, node_t *next, gboolean optional, pe_working_set_t *data_set)
action_t * custom_action(resource_t *rsc, char *key, const char *task, node_t *on_node, gboolean optional, gboolean foo, pe_working_set_t *data_set)
Definition: utils.c:441
#define pe_flag_stonith_enabled
Definition: pe_types.h:91
const char * pe_node_attribute_raw(pe_node_t *node, const char *name)
Definition: common.c:462
pe_graph_flags
Definition: pe_types.h:250
enum filter_colocation_res filter_colocation_constraint(resource_t *rsc_lh, resource_t *rsc_rh, rsc_colocation_t *constraint, gboolean preview)
GHashTable * utilization
Definition: pe_types.h:338
xmlNode * find_rsc_op_entry(resource_t *rsc, const char *key)
Definition: utils.c:1247
#define XML_RSC_ATTR_TARGET_ROLE
Definition: msg_xml.h:196
GListPtr rsc_cons_lhs
Definition: pe_types.h:319
#define XML_LRM_ATTR_MIGRATE_TARGET
Definition: msg_xml.h:286
#define XML_RSC_ATTR_REMOTE_NODE
Definition: msg_xml.h:208
char * uuid
Definition: pe_types.h:370
GHashTable * node_hash_dup(GHashTable *hash)
gboolean native_create_probe(resource_t *rsc, node_t *node, action_t *complete, gboolean force, pe_working_set_t *data_set)
#define pe_rsc_allocating
Definition: pe_types.h:228
enum rsc_role_e text2role(const char *role)
Definition: common.c:350
enum pe_obj_types variant
Definition: pe_types.h:293
void ReloadRsc(resource_t *rsc, node_t *node, pe_working_set_t *data_set)
gboolean granted
Definition: pe_types.h:413
gboolean crm_str_eq(const char *a, const char *b, gboolean use_case)
Definition: strings.c:220
const char * placement_strategy
Definition: pe_types.h:120
gboolean can_run_resources(const node_t *node)
ticket_t * ticket
void graph_element_from_action(action_t *action, pe_working_set_t *data_set)
int rsc_discover_mode
Definition: pe_types.h:214
gboolean can_run_any(GHashTable *nodes)
GListPtr actions
Definition: pe_types.h:322
const char * id
Definition: pe_types.h:178
char * id
Definition: pe_types.h:412
#define CRMD_ACTION_RELOAD
Definition: crm.h:145
#define pe_rsc_fence_device
Definition: pe_types.h:224
int scores_log_level
#define PCMK_RESOURCE_CLASS_STONITH
Definition: services.h:49
GHashTable * node_hash_from_list(GListPtr list)
Definition: utils.c:174
node_t * native_color(resource_t *rsc, node_t *preferred, pe_working_set_t *data_set)
#define STOP_SANITY_ASSERT(lineno)
resource_t * rsc_lh
gboolean is_remote_node
Definition: pe_types.h:314
GListPtr children
Definition: pe_types.h:340
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:406
gboolean update_action_flags(action_t *action, enum pe_action_flags flags, const char *source, int line)
#define start_action(rsc, node, optional)
Definition: internal.h:216
#define CRM_META
Definition: crm.h:49
#define crm_err(fmt, args...)
Definition: logging.h:240
void resource_location(resource_t *rsc, node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:1565
#define CRM_ASSERT(expr)
Definition: results.h:42
#define RSC_STATUS
Definition: crm.h:188
#define pe_rsc_reload
Definition: pe_types.h:231
#define RSC_PROMOTE
Definition: crm.h:180
gboolean check_utilization(const char *value)
Definition: utils.c:177
#define pe_clear_action_bit(action, bit)
Definition: internal.h:25
gboolean DeleteRsc(resource_t *rsc, node_t *node, gboolean optional, pe_working_set_t *data_set)
pe_weights
GHashTable * native_merge_weights(resource_t *rsc, const char *rhs, GHashTable *nodes, const char *attr, float factor, enum pe_weights flags)
int merge_weights(int w1, int w2)
Definition: common.c:369
#define pe_rsc_needs_unfencing
Definition: pe_types.h:248
gboolean shutdown
Definition: pe_types.h:189
#define crm_str(x)
Definition: logging.h:266
gboolean(* rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(resource_t *, node_t *, gboolean, pe_working_set_t *)
gboolean(* create_probe)(resource_t *, node_t *, action_t *, gboolean, pe_working_set_t *)
rsc_role_e
Definition: common.h:86
#define pe_rsc_block
Definition: pe_types.h:219
#define pe_flag_stdout
Definition: pe_types.h:110
enum pe_action_flags flags
Definition: pe_types.h:374
gboolean maintenance
Definition: pe_types.h:192
#define pe_rsc_maintenance
Definition: pe_types.h:243
#define XML_OP_ATTR_PENDING
Definition: msg_xml.h:221
const char * id
int custom_action_order(resource_t *lh_rsc, char *lh_task, action_t *lh_action, resource_t *rh_rsc, char *rh_task, action_t *rh_action, enum pe_ordering type, pe_working_set_t *data_set)
const char * id
#define pe_flag_have_stonith_resource
Definition: pe_types.h:92
#define RSC_ROLE_MAX
Definition: common.h:94
enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX]
void rsc_stonith_ordering(resource_t *rsc, action_t *stonith_op, pe_working_set_t *data_set)
GList * find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
Definition: utils.c:1468
#define pe_flag_enable_unfencing
Definition: pe_types.h:93
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:18
#define start_key(rsc)
Definition: internal.h:215
#define ID(x)
Definition: msg_xml.h:414
unsigned long long flags
Definition: pe_types.h:122
#define pe_err(fmt...)
Definition: internal.h:20
#define dump_node_scores(level, rsc, text, nodes)
Definition: internal.h:186
void print_resource(int log_level, const char *pre_text, resource_t *rsc, gboolean details)
Definition: utils.c:1297
#define safe_str_eq(a, b)
Definition: util.h:59
node_t *(* allocate)(resource_t *, node_t *, pe_working_set_t *)
void native_rsc_colocation_rh_mustnot(resource_t *rsc_lh, gboolean update_lh, resource_t *rsc_rh, gboolean update_rh)
gboolean order_actions(action_t *lh_action, action_t *rh_action, enum pe_ordering order)
Definition: utils.c:1776
GList * pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node, const char *task, bool require_node)
Find all actions of given type for a resource.
Definition: utils.c:1517
#define XML_LRM_ATTR_MIGRATE_SOURCE
Definition: msg_xml.h:285
#define CRM_ATTR_ID
Definition: crm.h:89
gint sort_node_uname(gconstpointer a, gconstpointer b)
Definition: utils.c:217
gboolean unclean
Definition: pe_types.h:187
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
GList * GListPtr
Definition: crm.h:192
#define crm_info(fmt, args...)
Definition: logging.h:243
#define pe_rsc_managed
Definition: pe_types.h:218
#define pe_rsc_orphan
Definition: pe_types.h:217
char * generate_op_key(const char *rsc_id, const char *op_type, guint interval_ms)
Generate an operation key.
Definition: operations.c:39
GHashTable *(* merge_weights)(resource_t *, const char *, GHashTable *, const char *, float, enum pe_weights)
void trigger_unfencing(resource_t *rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t *data_set)
Definition: utils.c:2275
pe_ordering
Definition: pe_types.h:437
gboolean online
Definition: pe_types.h:183
uint64_t flags
Definition: remote.c:148
#define XML_ATTR_TE_TARGET_RC
Definition: msg_xml.h:360
pe_resource_t * parent
Definition: pe_types.h:291
enum pe_action_flags native_action_flags(action_t *action, node_t *node)
enum crm_ais_msg_types type
Definition: internal.h:85
#define RSC_DEMOTE
Definition: crm.h:182
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:16
void native_rsc_colocation_rh(pe_resource_t *lh_rsc, pe_resource_t *rh_rsc, rsc_colocation_t *constraint, pe_working_set_t *data_set)
char * id
Definition: pe_types.h:284
GHashTable * allowed_nodes
Definition: pe_types.h:331
#define RSC_MIGRATED
Definition: crm.h:172
#define pe_flag_startup_probes
Definition: pe_types.h:104
int new_rsc_order(resource_t *lh_rsc, const char *lh_task, resource_t *rh_rsc, const char *rh_task, enum pe_ordering type, pe_working_set_t *data_set)