pacemaker  2.1.6-802a72226b
Scalable High-Availability cluster resource manager
pcmk_sched_instances.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2023 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU General Public License version 2
7  * or later (GPLv2+) WITHOUT ANY WARRANTY.
8  */
9 
10 /* This file is intended for code usable with both clone instances and bundle
11  * replica containers.
12  */
13 
14 #include <crm_internal.h>
15 #include <crm/msg_xml.h>
16 #include <pacemaker-internal.h>
17 #include "libpacemaker_private.h"
18 
28 static bool
29 can_run_everywhere(const pe_resource_t *collective)
30 {
31  GHashTableIter iter;
32  pe_node_t *node = NULL;
33  int available_nodes = 0;
34  int max_instances = 0;
35 
36  switch (collective->variant) {
37  case pe_clone:
38  max_instances = pe__clone_max(collective);
39  break;
40  case pe_container:
41  max_instances = pe__bundle_max(collective);
42  break;
43  default:
44  return false; // Not actually possible
45  }
46 
47  g_hash_table_iter_init(&iter, collective->allowed_nodes);
48  while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
49  if (pcmk__node_available(node, false, false)
50  && (max_instances < ++available_nodes)) {
51  return false;
52  }
53  }
54  return true;
55 }
56 
67 static bool
68 can_run_instance(const pe_resource_t *instance, const pe_node_t *node,
69  int max_per_node)
70 {
71  pe_node_t *allowed_node = NULL;
72 
73  if (pcmk_is_set(instance->flags, pe_rsc_orphan)) {
74  pe_rsc_trace(instance, "%s cannot run on %s: orphaned",
75  instance->id, pe__node_name(node));
76  return false;
77  }
78 
79  if (!pcmk__node_available(node, false, false)) {
80  pe_rsc_trace(instance,
81  "%s cannot run on %s: node cannot run resources",
82  instance->id, pe__node_name(node));
83  return false;
84  }
85 
86  allowed_node = pcmk__top_allowed_node(instance, node);
87  if (allowed_node == NULL) {
88  crm_warn("%s cannot run on %s: node not allowed",
89  instance->id, pe__node_name(node));
90  return false;
91  }
92 
93  if (allowed_node->weight < 0) {
94  pe_rsc_trace(instance, "%s cannot run on %s: parent score is %s there",
95  instance->id, pe__node_name(node),
96  pcmk_readable_score(allowed_node->weight));
97  return false;
98  }
99 
100  if (allowed_node->count >= max_per_node) {
101  pe_rsc_trace(instance,
102  "%s cannot run on %s: node already has %d instance%s",
103  instance->id, pe__node_name(node), max_per_node,
104  pcmk__plural_s(max_per_node));
105  return false;
106  }
107 
108  pe_rsc_trace(instance, "%s can run on %s (%d already running)",
109  instance->id, pe__node_name(node), allowed_node->count);
110  return true;
111 }
112 
120 static void
121 ban_unavailable_allowed_nodes(pe_resource_t *instance, int max_per_node)
122 {
123  if (instance->allowed_nodes != NULL) {
124  GHashTableIter iter;
125  pe_node_t *node = NULL;
126 
127  g_hash_table_iter_init(&iter, instance->allowed_nodes);
128  while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
129  if (!can_run_instance(instance, node, max_per_node)) {
130  pe_rsc_trace(instance, "Banning %s from unavailable node %s",
131  instance->id, pe__node_name(node));
132  node->weight = -INFINITY;
133  for (GList *child_iter = instance->children;
134  child_iter != NULL; child_iter = child_iter->next) {
135  pe_resource_t *child = (pe_resource_t *) child_iter->data;
136  pe_node_t *child_node = NULL;
137 
138  child_node = pe_hash_table_lookup(child->allowed_nodes,
139  node->details->id);
140  if (child_node != NULL) {
141  pe_rsc_trace(instance,
142  "Banning %s child %s "
143  "from unavailable node %s",
144  instance->id, child->id,
145  pe__node_name(node));
146  child_node->weight = -INFINITY;
147  }
148  }
149  }
150  }
151  }
152 }
153 
164 static GHashTable *
165 new_node_table(pe_node_t *node)
166 {
167  GHashTable *table = pcmk__strkey_table(NULL, free);
168 
169  node = pe__copy_node(node);
170  g_hash_table_insert(table, (gpointer) node->details->id, node);
171  return table;
172 }
173 
181 static void
182 apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes)
183 {
184  GList *iter = NULL;
185  pcmk__colocation_t *colocation = NULL;
186  pe_resource_t *other = NULL;
187  float factor = 0.0;
188 
189  /* Because the this_with_colocations() and with_this_colocations() methods
190  * boil down to copies of rsc_cons and rsc_cons_lhs for clones and bundles,
191  * we can use those here directly for efficiency.
192  */
193  for (iter = rsc->parent->rsc_cons; iter != NULL; iter = iter->next) {
194  colocation = (pcmk__colocation_t *) iter->data;
195  other = colocation->primary;
196  factor = colocation->score / (float) INFINITY,
197  other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
198  colocation->node_attribute,
199  factor,
201  }
202  for (iter = rsc->parent->rsc_cons_lhs; iter != NULL; iter = iter->next) {
203  colocation = (pcmk__colocation_t *) iter->data;
204  if (!pcmk__colocation_has_influence(colocation, rsc)) {
205  continue;
206  }
207  other = colocation->dependent;
208  factor = colocation->score / (float) INFINITY,
209  other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
210  colocation->node_attribute,
211  factor,
213  }
214 }
215 
231 static int
232 cmp_instance_by_colocation(const pe_resource_t *instance1,
233  const pe_resource_t *instance2)
234 {
235  int rc = 0;
236  pe_node_t *node1 = NULL;
237  pe_node_t *node2 = NULL;
238  pe_node_t *current_node1 = pe__current_node(instance1);
239  pe_node_t *current_node2 = pe__current_node(instance2);
240  GHashTable *colocated_scores1 = NULL;
241  GHashTable *colocated_scores2 = NULL;
242 
243  CRM_ASSERT((instance1 != NULL) && (instance1->parent != NULL)
244  && (instance2 != NULL) && (instance2->parent != NULL)
245  && (current_node1 != NULL) && (current_node2 != NULL));
246 
247  // Create node tables initialized with each node
248  colocated_scores1 = new_node_table(current_node1);
249  colocated_scores2 = new_node_table(current_node2);
250 
251  // Apply parental colocations
252  apply_parent_colocations(instance1, &colocated_scores1);
253  apply_parent_colocations(instance2, &colocated_scores2);
254 
255  // Find original nodes again, with scores updated for colocations
256  node1 = g_hash_table_lookup(colocated_scores1, current_node1->details->id);
257  node2 = g_hash_table_lookup(colocated_scores2, current_node2->details->id);
258 
259  // Compare nodes by updated scores
260  if (node1->weight < node2->weight) {
261  crm_trace("Assign %s (%d on %s) after %s (%d on %s)",
262  instance1->id, node1->weight, pe__node_name(node1),
263  instance2->id, node2->weight, pe__node_name(node2));
264  rc = 1;
265 
266  } else if (node1->weight > node2->weight) {
267  crm_trace("Assign %s (%d on %s) before %s (%d on %s)",
268  instance1->id, node1->weight, pe__node_name(node1),
269  instance2->id, node2->weight, pe__node_name(node2));
270  rc = -1;
271  }
272 
273  g_hash_table_destroy(colocated_scores1);
274  g_hash_table_destroy(colocated_scores2);
275  return rc;
276 }
277 
286 static bool
287 did_fail(const pe_resource_t *rsc)
288 {
289  if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
290  return true;
291  }
292  for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
293  if (did_fail((const pe_resource_t *) iter->data)) {
294  return true;
295  }
296  }
297  return false;
298 }
299 
309 static bool
310 node_is_allowed(const pe_resource_t *rsc, pe_node_t **node)
311 {
312  if (*node != NULL) {
313  pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
314  (*node)->details->id);
315  if ((allowed == NULL) || (allowed->weight < 0)) {
316  pe_rsc_trace(rsc, "%s: current location (%s) is unavailable",
317  rsc->id, pe__node_name(*node));
318  *node = NULL;
319  return false;
320  }
321  }
322  return true;
323 }
324 
336 gint
337 pcmk__cmp_instance_number(gconstpointer a, gconstpointer b)
338 {
339  const pe_resource_t *instance1 = (const pe_resource_t *) a;
340  const pe_resource_t *instance2 = (const pe_resource_t *) b;
341  char *div1 = NULL;
342  char *div2 = NULL;
343 
344  CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
345 
346  // Clone numbers are after a colon, bundle numbers after a dash
347  div1 = strrchr(instance1->id, ':');
348  if (div1 == NULL) {
349  div1 = strrchr(instance1->id, '-');
350  }
351  div2 = strrchr(instance2->id, ':');
352  if (div2 == NULL) {
353  div2 = strrchr(instance2->id, '-');
354  }
355  CRM_ASSERT((div1 != NULL) && (div2 != NULL));
356 
357  return (gint) (strtol(div1 + 1, NULL, 10) - strtol(div2 + 1, NULL, 10));
358 }
359 
385 gint
386 pcmk__cmp_instance(gconstpointer a, gconstpointer b)
387 {
388  int rc = 0;
389  pe_node_t *node1 = NULL;
390  pe_node_t *node2 = NULL;
391  unsigned int nnodes1 = 0;
392  unsigned int nnodes2 = 0;
393 
394  bool can1 = true;
395  bool can2 = true;
396 
397  const pe_resource_t *instance1 = (const pe_resource_t *) a;
398  const pe_resource_t *instance2 = (const pe_resource_t *) b;
399 
400  CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
401 
402  node1 = instance1->fns->active_node(instance1, &nnodes1, NULL);
403  node2 = instance2->fns->active_node(instance2, &nnodes2, NULL);
404 
405  /* If both instances are running and at least one is multiply
406  * active, prefer instance that's running on fewer nodes.
407  */
408  if ((nnodes1 > 0) && (nnodes2 > 0)) {
409  if (nnodes1 < nnodes2) {
410  crm_trace("Assign %s (active on %d) before %s (active on %d): "
411  "less multiply active",
412  instance1->id, nnodes1, instance2->id, nnodes2);
413  return -1;
414 
415  } else if (nnodes1 > nnodes2) {
416  crm_trace("Assign %s (active on %d) after %s (active on %d): "
417  "more multiply active",
418  instance1->id, nnodes1, instance2->id, nnodes2);
419  return 1;
420  }
421  }
422 
423  /* An instance that is either inactive or active on an allowed node is
424  * preferred over an instance that is active on a no-longer-allowed node.
425  */
426  can1 = node_is_allowed(instance1, &node1);
427  can2 = node_is_allowed(instance2, &node2);
428  if (can1 && !can2) {
429  crm_trace("Assign %s before %s: not active on a disallowed node",
430  instance1->id, instance2->id);
431  return -1;
432 
433  } else if (!can1 && can2) {
434  crm_trace("Assign %s after %s: active on a disallowed node",
435  instance1->id, instance2->id);
436  return 1;
437  }
438 
439  // Prefer instance with higher configured priority
440  if (instance1->priority > instance2->priority) {
441  crm_trace("Assign %s before %s: priority (%d > %d)",
442  instance1->id, instance2->id,
443  instance1->priority, instance2->priority);
444  return -1;
445 
446  } else if (instance1->priority < instance2->priority) {
447  crm_trace("Assign %s after %s: priority (%d < %d)",
448  instance1->id, instance2->id,
449  instance1->priority, instance2->priority);
450  return 1;
451  }
452 
453  // Prefer active instance
454  if ((node1 == NULL) && (node2 == NULL)) {
455  crm_trace("No assignment preference for %s vs. %s: inactive",
456  instance1->id, instance2->id);
457  return 0;
458 
459  } else if (node1 == NULL) {
460  crm_trace("Assign %s after %s: active", instance1->id, instance2->id);
461  return 1;
462 
463  } else if (node2 == NULL) {
464  crm_trace("Assign %s before %s: active", instance1->id, instance2->id);
465  return -1;
466  }
467 
468  // Prefer instance whose current node can run resources
469  can1 = pcmk__node_available(node1, false, false);
470  can2 = pcmk__node_available(node2, false, false);
471  if (can1 && !can2) {
472  crm_trace("Assign %s before %s: current node can run resources",
473  instance1->id, instance2->id);
474  return -1;
475 
476  } else if (!can1 && can2) {
477  crm_trace("Assign %s after %s: current node can't run resources",
478  instance1->id, instance2->id);
479  return 1;
480  }
481 
482  // Prefer instance whose parent is allowed to run on instance's current node
483  node1 = pcmk__top_allowed_node(instance1, node1);
484  node2 = pcmk__top_allowed_node(instance2, node2);
485  if ((node1 == NULL) && (node2 == NULL)) {
486  crm_trace("No assignment preference for %s vs. %s: "
487  "parent not allowed on either instance's current node",
488  instance1->id, instance2->id);
489  return 0;
490 
491  } else if (node1 == NULL) {
492  crm_trace("Assign %s after %s: parent not allowed on current node",
493  instance1->id, instance2->id);
494  return 1;
495 
496  } else if (node2 == NULL) {
497  crm_trace("Assign %s before %s: parent allowed on current node",
498  instance1->id, instance2->id);
499  return -1;
500  }
501 
502  // Prefer instance whose current node is running fewer other instances
503  if (node1->count < node2->count) {
504  crm_trace("Assign %s before %s: fewer active instances on current node",
505  instance1->id, instance2->id);
506  return -1;
507 
508  } else if (node1->count > node2->count) {
509  crm_trace("Assign %s after %s: more active instances on current node",
510  instance1->id, instance2->id);
511  return 1;
512  }
513 
514  // Prefer instance that isn't failed
515  can1 = did_fail(instance1);
516  can2 = did_fail(instance2);
517  if (!can1 && can2) {
518  crm_trace("Assign %s before %s: not failed",
519  instance1->id, instance2->id);
520  return -1;
521  } else if (can1 && !can2) {
522  crm_trace("Assign %s after %s: failed",
523  instance1->id, instance2->id);
524  return 1;
525  }
526 
527  // Prefer instance with higher cumulative colocation score on current node
528  rc = cmp_instance_by_colocation(instance1, instance2);
529  if (rc != 0) {
530  return rc;
531  }
532 
533  // Prefer instance with lower instance number
534  rc = pcmk__cmp_instance_number(instance1, instance2);
535  if (rc < 0) {
536  crm_trace("Assign %s before %s: instance number",
537  instance1->id, instance2->id);
538  } else if (rc > 0) {
539  crm_trace("Assign %s after %s: instance number",
540  instance1->id, instance2->id);
541  } else {
542  crm_trace("No assignment preference for %s vs. %s",
543  instance1->id, instance2->id);
544  }
545  return rc;
546 }
547 
560 static bool
561 assign_instance(pe_resource_t *instance, const pe_node_t *prefer,
562  int max_per_node)
563 {
564  pe_node_t *chosen = NULL;
565  pe_node_t *allowed = NULL;
566 
567  CRM_ASSERT(instance != NULL);
568  pe_rsc_trace(instance, "Assigning %s (preferring %s)", instance->id,
569  ((prefer == NULL)? "no node" : prefer->details->uname));
570 
571  if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
572  // Instance is already assigned
573  return instance->fns->location(instance, NULL, FALSE) != NULL;
574  }
575 
576  if (pcmk_is_set(instance->flags, pe_rsc_allocating)) {
577  pe_rsc_debug(instance,
578  "Assignment loop detected involving %s colocations",
579  instance->id);
580  return false;
581  }
582 
583  if (prefer != NULL) { // Possible early assignment to preferred node
584 
585  // Get preferred node with instance's scores
586  allowed = g_hash_table_lookup(instance->allowed_nodes,
587  prefer->details->id);
588 
589  if ((allowed == NULL) || (allowed->weight < 0)) {
590  pe_rsc_trace(instance,
591  "Not assigning %s to preferred node %s: unavailable",
592  instance->id, pe__node_name(prefer));
593  return false;
594  }
595  }
596 
597  ban_unavailable_allowed_nodes(instance, max_per_node);
598 
599  if (prefer == NULL) { // Final assignment
600  chosen = instance->cmds->assign(instance, NULL);
601 
602  } else { // Possible early assignment to preferred node
603  GHashTable *backup = pcmk__copy_node_table(instance->allowed_nodes);
604 
605  chosen = instance->cmds->assign(instance, prefer);
606 
607  // Revert nodes if preferred node won't be assigned
608  if ((chosen != NULL) && (chosen->details != prefer->details)) {
609  crm_info("Not assigning %s to preferred node %s: %s is better",
610  instance->id, pe__node_name(prefer),
611  pe__node_name(chosen));
612  g_hash_table_destroy(instance->allowed_nodes);
613  instance->allowed_nodes = backup;
614  pcmk__unassign_resource(instance);
615  chosen = NULL;
616  } else if (backup != NULL) {
617  g_hash_table_destroy(backup);
618  }
619  }
620 
621  // The parent tracks how many instances have been assigned to each node
622  if (chosen != NULL) {
623  allowed = pcmk__top_allowed_node(instance, chosen);
624  if (allowed == NULL) {
625  /* The instance is allowed on the node, but its parent isn't. This
626  * shouldn't be possible if the resource is managed, and we won't be
627  * able to limit the number of instances assigned to the node.
628  */
630 
631  } else {
632  allowed->count++;
633  }
634  }
635  return chosen != NULL;
636 }
637 
646 static unsigned int
647 reset_allowed_node_counts(pe_resource_t *rsc)
648 {
649  unsigned int available_nodes = 0;
650  pe_node_t *node = NULL;
651  GHashTableIter iter;
652 
653  g_hash_table_iter_init(&iter, rsc->allowed_nodes);
654  while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
655  node->count = 0;
656  if (pcmk__node_available(node, false, false)) {
657  available_nodes++;
658  }
659  }
660  return available_nodes;
661 }
662 
673 static const pe_node_t *
674 preferred_node(const pe_resource_t *rsc, const pe_resource_t *instance,
675  int optimal_per_node)
676 {
677  const pe_node_t *node = NULL;
678  const pe_node_t *parent_node = NULL;
679 
680  // Check whether instance is active, healthy, and not yet assigned
681  if ((instance->running_on == NULL)
682  || !pcmk_is_set(instance->flags, pe_rsc_provisional)
683  || pcmk_is_set(instance->flags, pe_rsc_failed)) {
684  return NULL;
685  }
686 
687  // Check whether instance's current node can run resources
688  node = pe__current_node(instance);
689  if (!pcmk__node_available(node, true, false)) {
690  pe_rsc_trace(rsc, "Not assigning %s to %s early (unavailable)",
691  instance->id, pe__node_name(node));
692  return NULL;
693  }
694 
695  // Check whether node already has optimal number of instances assigned
696  parent_node = pcmk__top_allowed_node(instance, node);
697  if ((parent_node != NULL) && (parent_node->count >= optimal_per_node)) {
698  pe_rsc_trace(rsc,
699  "Not assigning %s to %s early "
700  "(optimal instances already assigned)",
701  instance->id, pe__node_name(node));
702  return NULL;
703  }
704 
705  return node;
706 }
707 
717 void
718 pcmk__assign_instances(pe_resource_t *collective, GList *instances,
719  int max_total, int max_per_node)
720 {
721  // Reuse node count to track number of assigned instances
722  unsigned int available_nodes = reset_allowed_node_counts(collective);
723 
724  int optimal_per_node = 0;
725  int assigned = 0;
726  GList *iter = NULL;
727  pe_resource_t *instance = NULL;
728  const pe_node_t *current = NULL;
729 
730  if (available_nodes > 0) {
731  optimal_per_node = max_total / available_nodes;
732  }
733  if (optimal_per_node < 1) {
734  optimal_per_node = 1;
735  }
736 
737  pe_rsc_debug(collective,
738  "Assigning up to %d %s instance%s to up to %u node%s "
739  "(at most %d per host, %d optimal)",
740  max_total, collective->id, pcmk__plural_s(max_total),
741  available_nodes, pcmk__plural_s(available_nodes),
742  max_per_node, optimal_per_node);
743 
744  // Assign as many instances as possible to their current location
745  for (iter = instances; (iter != NULL) && (assigned < max_total);
746  iter = iter->next) {
747  instance = (pe_resource_t *) iter->data;
748 
749  current = preferred_node(collective, instance, optimal_per_node);
750  if ((current != NULL)
751  && assign_instance(instance, current, max_per_node)) {
752  pe_rsc_trace(collective, "Assigned %s to current node %s",
753  instance->id, pe__node_name(current));
754  assigned++;
755  }
756  }
757 
758  pe_rsc_trace(collective, "Assigned %d of %d instance%s to current node",
759  assigned, max_total, pcmk__plural_s(max_total));
760 
761  for (iter = instances; iter != NULL; iter = iter->next) {
762  instance = (pe_resource_t *) iter->data;
763 
764  if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
765  continue; // Already assigned
766  }
767 
768  if (instance->running_on != NULL) {
769  current = pe__current_node(instance);
770  if (pcmk__top_allowed_node(instance, current) == NULL) {
771  const char *unmanaged = "";
772 
773  if (!pcmk_is_set(instance->flags, pe_rsc_managed)) {
774  unmanaged = "Unmanaged resource ";
775  }
776  crm_notice("%s%s is running on %s which is no longer allowed",
777  unmanaged, instance->id, pe__node_name(current));
778  }
779  }
780 
781  if (assigned >= max_total) {
782  pe_rsc_debug(collective,
783  "Not assigning %s because maximum %d instances "
784  "already assigned",
785  instance->id, max_total);
786  resource_location(instance, NULL, -INFINITY,
787  "collective_limit_reached", collective->cluster);
788 
789  } else if (assign_instance(instance, NULL, max_per_node)) {
790  assigned++;
791  }
792  }
793 
794  pe_rsc_debug(collective, "Assigned %d of %d possible instance%s of %s",
795  assigned, max_total, pcmk__plural_s(max_total),
796  collective->id);
797 }
798 
800  instance_starting = (1 << 0),
801  instance_stopping = (1 << 1),
802 
803  /* This indicates that some instance is restarting. It's not the same as
804  * instance_starting|instance_stopping, which would indicate that some
805  * instance is starting, and some instance (not necessarily the same one) is
806  * stopping.
807  */
809 
810  instance_active = (1 << 3),
811 
814 };
815 
823 static void
824 check_instance_state(const pe_resource_t *instance, uint32_t *state)
825 {
826  const GList *iter = NULL;
827  uint32_t instance_state = 0; // State of just this instance
828 
829  // No need to check further if all conditions have already been detected
830  if (pcmk_all_flags_set(*state, instance_all)) {
831  return;
832  }
833 
834  // If instance is a collective (a cloned group), check its children instead
835  if (instance->variant > pe_native) {
836  for (iter = instance->children;
837  (iter != NULL) && !pcmk_all_flags_set(*state, instance_all);
838  iter = iter->next) {
839  check_instance_state((const pe_resource_t *) iter->data, state);
840  }
841  return;
842  }
843 
844  // If we get here, instance is a primitive
845 
846  if (instance->running_on != NULL) {
848  }
849 
850  // Check each of the instance's actions for runnable start or stop
851  for (iter = instance->actions;
852  (iter != NULL) && !pcmk_all_flags_set(instance_state,
855  iter = iter->next) {
856 
857  const pe_action_t *action = (const pe_action_t *) iter->data;
858  const bool optional = pcmk_is_set(action->flags, pe_action_optional);
859 
860  if (pcmk__str_eq(RSC_START, action->task, pcmk__str_none)) {
861  if (!optional && pcmk_is_set(action->flags, pe_action_runnable)) {
862  pe_rsc_trace(instance, "Instance is starting due to %s",
863  action->uuid);
865  } else {
866  pe_rsc_trace(instance, "%s doesn't affect %s state (%s)",
867  action->uuid, instance->id,
868  (optional? "optional" : "unrunnable"));
869  }
870 
871  } else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_none)) {
872  /* Only stop actions can be pseudo-actions for primitives. That
873  * indicates that the node they are on is being fenced, so the stop
874  * is implied rather than actually executed.
875  */
876  if (!optional
877  && pcmk_any_flags_set(action->flags,
879  pe_rsc_trace(instance, "Instance is stopping due to %s",
880  action->uuid);
882  } else {
883  pe_rsc_trace(instance, "%s doesn't affect %s state (%s)",
884  action->uuid, instance->id,
885  (optional? "optional" : "unrunnable"));
886  }
887  }
888  }
889 
890  if (pcmk_all_flags_set(instance_state,
893  }
894  *state |= instance_state;
895 }
896 
904 void
905 pcmk__create_instance_actions(pe_resource_t *collective, GList *instances)
906 {
907  uint32_t state = 0;
908 
909  pe_action_t *stop = NULL;
910  pe_action_t *stopped = NULL;
911 
912  pe_action_t *start = NULL;
913  pe_action_t *started = NULL;
914 
915  pe_rsc_trace(collective, "Creating collective instance actions for %s",
916  collective->id);
917 
918  // Create actions for each instance appropriate to its variant
919  for (GList *iter = instances; iter != NULL; iter = iter->next) {
920  pe_resource_t *instance = (pe_resource_t *) iter->data;
921 
922  instance->cmds->create_actions(instance);
923  check_instance_state(instance, &state);
924  }
925 
926  // Create pseudo-actions for rsc start and started
927  start = pe__new_rsc_pseudo_action(collective, RSC_START,
929  true);
930  started = pe__new_rsc_pseudo_action(collective, RSC_STARTED,
932  false);
933  started->priority = INFINITY;
934  if (pcmk_any_flags_set(state, instance_active|instance_starting)) {
936  }
937 
938  // Create pseudo-actions for rsc stop and stopped
939  stop = pe__new_rsc_pseudo_action(collective, RSC_STOP,
941  true);
942  stopped = pe__new_rsc_pseudo_action(collective, RSC_STOPPED,
944  true);
945  stopped->priority = INFINITY;
946  if (!pcmk_is_set(state, instance_restarting)) {
948  }
949 
950  if (collective->variant == pe_clone) {
951  pe__create_clone_notif_pseudo_ops(collective, start, started, stop,
952  stopped);
953  }
954 }
955 
967 static inline GList *
968 get_instance_list(const pe_resource_t *rsc)
969 {
970  if (rsc->variant == pe_container) {
971  return pe__bundle_containers(rsc);
972  } else {
973  return rsc->children;
974  }
975 }
976 
984 static inline void
985 free_instance_list(const pe_resource_t *rsc, GList *list)
986 {
987  if (list != rsc->children) {
988  g_list_free(list);
989  }
990 }
991 
1005 bool
1006 pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
1007  enum rsc_role_e role, bool current)
1008 {
1009  pe_node_t *instance_node = NULL;
1010 
1011  CRM_CHECK((instance != NULL) && (node != NULL), return false);
1012 
1013  if ((role != RSC_ROLE_UNKNOWN)
1014  && (role != instance->fns->state(instance, current))) {
1015  pe_rsc_trace(instance,
1016  "%s is not a compatible instance (role is not %s)",
1017  instance->id, role2text(role));
1018  return false;
1019  }
1020 
1021  if (!is_set_recursive(instance, pe_rsc_block, true)) {
1022  // We only want instances that haven't failed
1023  instance_node = instance->fns->location(instance, NULL, current);
1024  }
1025 
1026  if (instance_node == NULL) {
1027  pe_rsc_trace(instance,
1028  "%s is not a compatible instance (not assigned to a node)",
1029  instance->id);
1030  return false;
1031  }
1032 
1033  if (instance_node->details != node->details) {
1034  pe_rsc_trace(instance,
1035  "%s is not a compatible instance (assigned to %s not %s)",
1036  instance->id, pe__node_name(instance_node),
1037  pe__node_name(node));
1038  return false;
1039  }
1040 
1041  return true;
1042 }
1043 
1057 static pe_resource_t *
1058 find_compatible_instance_on_node(const pe_resource_t *match_rsc,
1059  const pe_resource_t *rsc,
1060  const pe_node_t *node, enum rsc_role_e role,
1061  bool current)
1062 {
1063  GList *instances = NULL;
1064 
1065  instances = get_instance_list(rsc);
1066  for (GList *iter = instances; iter != NULL; iter = iter->next) {
1067  pe_resource_t *instance = (pe_resource_t *) iter->data;
1068 
1069  if (pcmk__instance_matches(instance, node, role, current)) {
1070  pe_rsc_trace(match_rsc, "Found %s %s instance %s compatible with %s on %s",
1071  role == RSC_ROLE_UNKNOWN? "matching" : role2text(role),
1072  rsc->id, instance->id, match_rsc->id,
1073  pe__node_name(node));
1074  free_instance_list(rsc, instances); // Only frees list, not contents
1075  return instance;
1076  }
1077  }
1078  free_instance_list(rsc, instances);
1079 
1080  pe_rsc_trace(match_rsc, "No %s %s instance found compatible with %s on %s",
1081  ((role == RSC_ROLE_UNKNOWN)? "matching" : role2text(role)),
1082  rsc->id, match_rsc->id, pe__node_name(node));
1083  return NULL;
1084 }
1085 
1099 pe_resource_t *
1101  const pe_resource_t *rsc, enum rsc_role_e role,
1102  bool current)
1103 {
1104  pe_resource_t *instance = NULL;
1105  GList *nodes = NULL;
1106  const pe_node_t *node = match_rsc->fns->location(match_rsc, NULL, current);
1107 
1108  // If match_rsc has a node, check only that node
1109  if (node != NULL) {
1110  return find_compatible_instance_on_node(match_rsc, rsc, node, role,
1111  current);
1112  }
1113 
1114  // Otherwise check for an instance matching any of match_rsc's allowed nodes
1115  nodes = pcmk__sort_nodes(g_hash_table_get_values(match_rsc->allowed_nodes),
1116  NULL);
1117  for (GList *iter = nodes; (iter != NULL) && (instance == NULL);
1118  iter = iter->next) {
1119  instance = find_compatible_instance_on_node(match_rsc, rsc,
1120  (pe_node_t *) iter->data,
1121  role, current);
1122  }
1123 
1124  if (instance == NULL) {
1125  pe_rsc_debug(rsc, "No %s instance found compatible with %s",
1126  rsc->id, match_rsc->id);
1127  }
1128  g_list_free(nodes);
1129  return instance;
1130 }
1131 
1144 static bool
1145 unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then,
1146  pe_resource_t *then_instance, uint32_t type, bool current)
1147 {
1148  // Allow "then" instance to go down even without an interleave match
1149  if (current) {
1150  pe_rsc_trace(then->rsc,
1151  "%s has no instance to order before stopping "
1152  "or demoting %s",
1153  first->rsc->id, then_instance->id);
1154 
1155  /* If the "first" action must be runnable, but there is no "first"
1156  * instance, the "then" instance must not be allowed to come up.
1157  */
1158  } else if (pcmk_any_flags_set(type, pe_order_runnable_left
1160  pe_rsc_info(then->rsc,
1161  "Inhibiting %s from being active "
1162  "because there is no %s instance to interleave",
1163  then_instance->id, first->rsc->id);
1164  return pcmk__assign_resource(then_instance, NULL, true);
1165  }
1166  return false;
1167 }
1168 
1184 static pe_action_t *
1185 find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
1186  const char *action_name, const pe_node_t *node,
1187  bool for_first)
1188 {
1189  const pe_resource_t *rsc = NULL;
1190  pe_action_t *matching_action = NULL;
1191 
1192  /* If instance is a bundle container, sometimes we should interleave the
1193  * action for the container itself, and sometimes for the containerized
1194  * resource.
1195  *
1196  * For example, given "start bundle A then bundle B", B likely requires the
1197  * service inside A's container to be active, rather than just the
1198  * container, so we should interleave the action for A's containerized
1199  * resource. On the other hand, it's possible B's container itself requires
1200  * something from A, so we should interleave the action for B's container.
1201  *
1202  * Essentially, for 'first', we should use the containerized resource for
1203  * everything except stop, and for 'then', we should use the container for
1204  * everything except promote and demote (which can only be performed on the
1205  * containerized resource).
1206  */
1207  if ((for_first && !pcmk__str_any_of(action->task, CRMD_ACTION_STOP,
1208  CRMD_ACTION_STOPPED, NULL))
1209 
1210  || (!for_first && pcmk__str_any_of(action->task, CRMD_ACTION_PROMOTE,
1213  CRMD_ACTION_DEMOTED, NULL))) {
1214 
1215  rsc = pcmk__get_rsc_in_container(instance);
1216  }
1217  if (rsc == NULL) {
1218  rsc = instance; // No containerized resource, use instance itself
1219  } else {
1220  node = NULL; // Containerized actions are on bundle-created guest
1221  }
1222 
1223  matching_action = find_first_action(rsc->actions, NULL, action_name, node);
1224  if (matching_action != NULL) {
1225  return matching_action;
1226  }
1227 
1228  if (pcmk_is_set(instance->flags, pe_rsc_orphan)
1229  || pcmk__str_any_of(action_name, RSC_STOP, RSC_DEMOTE, NULL)) {
1230  crm_trace("No %s action found for %s%s",
1231  action_name,
1232  pcmk_is_set(instance->flags, pe_rsc_orphan)? "orphan " : "",
1233  instance->id);
1234  } else {
1235  crm_err("No %s action found for %s to interleave (bug?)",
1236  action_name, instance->id);
1237  }
1238  return NULL;
1239 }
1240 
1254 static const char *
1255 orig_action_name(const pe_action_t *action)
1256 {
1257  const pe_resource_t *instance = action->rsc->children->data; // Any instance
1258  char *action_type = NULL;
1259  const char *action_name = action->task;
1260  enum action_tasks orig_task = no_action;
1261 
1263  CRMD_ACTION_NOTIFIED, NULL)) {
1264  // action->uuid is RSC_(confirmed-){pre,post}_notify_ACTION_INTERVAL
1265  CRM_CHECK(parse_op_key(action->uuid, NULL, &action_type, NULL),
1266  return task2text(no_action));
1267  action_name = strstr(action_type, "_notify_");
1268  CRM_CHECK(action_name != NULL, return task2text(no_action));
1269  action_name += strlen("_notify_");
1270  }
1271  orig_task = get_complex_task(instance, action_name);
1272  free(action_type);
1273  return task2text(orig_task);
1274 }
1275 
1296 static uint32_t
1297 update_interleaved_actions(pe_action_t *first, pe_action_t *then,
1298  const pe_node_t *node, uint32_t filter,
1299  uint32_t type)
1300 {
1301  GList *instances = NULL;
1302  uint32_t changed = pcmk__updated_none;
1303  const char *orig_first_task = orig_action_name(first);
1304 
1305  // Stops and demotes must be interleaved with instance on current node
1306  bool current = pcmk__ends_with(first->uuid, "_" CRMD_ACTION_STOPPED "_0")
1307  || pcmk__ends_with(first->uuid,
1308  "_" CRMD_ACTION_DEMOTED "_0");
1309 
1310  // Update the specified actions for each "then" instance individually
1311  instances = get_instance_list(then->rsc);
1312  for (GList *iter = instances; iter != NULL; iter = iter->next) {
1313  pe_resource_t *first_instance = NULL;
1314  pe_resource_t *then_instance = iter->data;
1315 
1316  pe_action_t *first_action = NULL;
1317  pe_action_t *then_action = NULL;
1318 
1319  // Find a "first" instance to interleave with this "then" instance
1320  first_instance = pcmk__find_compatible_instance(then_instance,
1321  first->rsc,
1323  current);
1324 
1325  if (first_instance == NULL) { // No instance can be interleaved
1326  if (unassign_if_mandatory(first, then, then_instance, type,
1327  current)) {
1329  }
1330  continue;
1331  }
1332 
1333  first_action = find_instance_action(first, first_instance,
1334  orig_first_task, node, true);
1335  if (first_action == NULL) {
1336  continue;
1337  }
1338 
1339  then_action = find_instance_action(then, then_instance, then->task,
1340  node, false);
1341  if (then_action == NULL) {
1342  continue;
1343  }
1344 
1345  if (order_actions(first_action, then_action, type)) {
1346  pcmk__set_updated_flags(changed, first,
1348  }
1349 
1350  changed |= then_instance->cmds->update_ordered_actions(
1351  first_action, then_action, node,
1352  first_instance->cmds->action_flags(first_action, node), filter,
1353  type, then->rsc->cluster);
1354  }
1355  free_instance_list(then->rsc, instances);
1356  return changed;
1357 }
1358 
1368 static bool
1369 can_interleave_actions(const pe_action_t *first, const pe_action_t *then)
1370 {
1371  bool interleave = false;
1372  pe_resource_t *rsc = NULL;
1373 
1374  if ((first->rsc == NULL) || (then->rsc == NULL)) {
1375  crm_trace("Not interleaving %s with %s: not resource actions",
1376  first->uuid, then->uuid);
1377  return false;
1378  }
1379 
1380  if (first->rsc == then->rsc) {
1381  crm_trace("Not interleaving %s with %s: same resource",
1382  first->uuid, then->uuid);
1383  return false;
1384  }
1385 
1386  if ((first->rsc->variant < pe_clone) || (then->rsc->variant < pe_clone)) {
1387  crm_trace("Not interleaving %s with %s: not clones or bundles",
1388  first->uuid, then->uuid);
1389  return false;
1390  }
1391 
1392  if (pcmk__ends_with(then->uuid, "_stop_0")
1393  || pcmk__ends_with(then->uuid, "_demote_0")) {
1394  rsc = first->rsc;
1395  } else {
1396  rsc = then->rsc;
1397  }
1398 
1399  interleave = crm_is_true(g_hash_table_lookup(rsc->meta,
1401  pe_rsc_trace(rsc, "'%s then %s' will %sbe interleaved (based on %s)",
1402  first->uuid, then->uuid, (interleave? "" : "not "), rsc->id);
1403  return interleave;
1404 }
1405 
1428 static uint32_t
1429 update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
1430  const pe_action_t *then, const pe_node_t *node,
1431  uint32_t flags, uint32_t filter, uint32_t type)
1432 {
1433  pe_action_t *instance_action = NULL;
1434  uint32_t instance_flags = 0;
1435  uint32_t changed = pcmk__updated_none;
1436 
1437  // Check whether instance has an equivalent of "then" action
1438  instance_action = find_first_action(instance->actions, NULL, then->task,
1439  node);
1440  if (instance_action == NULL) {
1441  return changed;
1442  }
1443 
1444  // Check whether action is runnable
1445  instance_flags = instance->cmds->action_flags(instance_action, node);
1446  if (!pcmk_is_set(instance_flags, pe_action_runnable)) {
1447  return changed;
1448  }
1449 
1450  // If so, update actions for the instance
1451  changed = instance->cmds->update_ordered_actions(first, instance_action,
1452  node, flags, filter, type,
1453  instance->cluster);
1454 
1455  // Propagate any changes to later actions
1456  if (pcmk_is_set(changed, pcmk__updated_then)) {
1457  for (GList *after_iter = instance_action->actions_after;
1458  after_iter != NULL; after_iter = after_iter->next) {
1459  pe_action_wrapper_t *after = after_iter->data;
1460 
1462  }
1463  }
1464 
1465  return changed;
1466 }
1467 
1491 uint32_t
1493  const pe_node_t *node, uint32_t flags,
1494  uint32_t filter, uint32_t type,
1496 {
1497  if (then->rsc == NULL) {
1498  return pcmk__updated_none;
1499 
1500  } else if (can_interleave_actions(first, then)) {
1501  return update_interleaved_actions(first, then, node, filter, type);
1502 
1503  } else {
1504  uint32_t changed = pcmk__updated_none;
1505  GList *instances = get_instance_list(then->rsc);
1506 
1507  // Update actions for the clone or bundle resource itself
1508  changed |= pcmk__update_ordered_actions(first, then, node, flags,
1509  filter, type, data_set);
1510 
1511  // Update the 'then' clone instances or bundle containers individually
1512  for (GList *iter = instances; iter != NULL; iter = iter->next) {
1513  pe_resource_t *instance = iter->data;
1514 
1515  changed |= update_noninterleaved_actions(instance, first, then,
1516  node, flags, filter, type);
1517  }
1518  free_instance_list(then->rsc, instances);
1519  return changed;
1520  }
1521 }
1522 
1523 #define pe__clear_action_summary_flags(flags, action, flag) do { \
1524  flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
1525  "Action summary", action->rsc->id, \
1526  flags, flag, #flag); \
1527  } while (0)
1528 
1539 enum pe_action_flags
1541  const pe_node_t *node)
1542 {
1543  bool any_runnable = false;
1544  enum pe_action_flags flags;
1545  const char *action_name = orig_action_name(action);
1546 
1547  // Set original assumptions (optional and runnable may be cleared below)
1549 
1550  for (const GList *iter = instances; iter != NULL; iter = iter->next) {
1551  const pe_resource_t *instance = iter->data;
1552  const pe_node_t *instance_node = NULL;
1553  pe_action_t *instance_action = NULL;
1554  enum pe_action_flags instance_flags;
1555 
1556  // Node is relevant only to primitive instances
1557  if (instance->variant == pe_native) {
1558  instance_node = node;
1559  }
1560 
1561  instance_action = find_first_action(instance->actions, NULL,
1562  action_name, instance_node);
1563  if (instance_action == NULL) {
1564  pe_rsc_trace(action->rsc, "%s has no %s action on %s",
1565  instance->id, action_name, pe__node_name(node));
1566  continue;
1567  }
1568 
1569  pe_rsc_trace(action->rsc, "%s has %s for %s on %s",
1570  instance->id, instance_action->uuid, action_name,
1571  pe__node_name(node));
1572 
1573  instance_flags = instance->cmds->action_flags(instance_action, node);
1574 
1575  // If any instance action is mandatory, so is the collective action
1577  && !pcmk_is_set(instance_flags, pe_action_optional)) {
1578  pe_rsc_trace(instance, "%s is mandatory because %s is",
1579  action->uuid, instance_action->uuid);
1582  }
1583 
1584  // If any instance action is runnable, so is the collective action
1585  if (pcmk_is_set(instance_flags, pe_action_runnable)) {
1586  any_runnable = true;
1587  }
1588  }
1589 
1590  if (!any_runnable) {
1591  pe_rsc_trace(action->rsc,
1592  "%s is not runnable because no instance can run %s",
1593  action->uuid, action_name);
1595  if (node == NULL) {
1597  }
1598  }
1599 
1600  return flags;
1601 }
1602 
1613 void
1614 pcmk__add_collective_constraints(GList **list, const pe_resource_t *instance,
1615  const pe_resource_t *collective,
1616  bool with_this)
1617 {
1618  const GList *colocations = NULL;
1619  bool everywhere = false;
1620 
1621  CRM_CHECK((list != NULL) && (instance != NULL), return);
1622 
1623  if (collective == NULL) {
1624  return;
1625  }
1626  switch (collective->variant) {
1627  case pe_clone:
1628  case pe_container:
1629  break;
1630  default:
1631  return;
1632  }
1633 
1634  everywhere = can_run_everywhere(collective);
1635 
1636  if (with_this) {
1637  colocations = collective->rsc_cons_lhs;
1638  } else {
1639  colocations = collective->rsc_cons;
1640  }
1641 
1642  for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
1643  const pcmk__colocation_t *colocation = iter->data;
1644 
1645  if (with_this
1646  && !pcmk__colocation_has_influence(colocation, instance)) {
1647  continue;
1648  }
1649  if (!everywhere || (colocation->score < 0)
1650  || (!with_this && (colocation->score == INFINITY))) {
1651 
1652  if (with_this) {
1653  pcmk__add_with_this(list, colocation);
1654  } else {
1655  pcmk__add_this_with(list, colocation);
1656  }
1657  }
1658  }
1659 }
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:235
const char * task2text(enum action_tasks task)
Definition: common.c:397
#define RSC_STOP
Definition: crm.h:202
gboolean parse_op_key(const char *key, char **rsc_id, char **op_type, guint *interval_ms)
Definition: operations.c:96
#define crm_notice(fmt, args...)
Definition: logging.h:379
#define pe_rsc_debug(rsc, fmt, args...)
Definition: internal.h:49
#define INFINITY
Definition: crm.h:99
GList * rsc_cons
Definition: pe_types.h:389
#define pe__set_action_flags(action, flags_to_set)
Definition: internal.h:89
G_GNUC_INTERNAL uint32_t pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set)
const char * pcmk_readable_score(int score)
Return a displayable static string for a score value.
Definition: scores.c:86
int priority
Definition: pe_types.h:431
bool pcmk__strcase_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:933
GList * children
Definition: pe_types.h:409
int count
Definition: pe_types.h:267
resource_alloc_functions_t * cmds
Definition: pe_types.h:359
pe_resource_t * rsc
Definition: pe_types.h:433
pe_action_t * find_first_action(const GList *input, const char *uuid, const char *task, const pe_node_t *on_node)
Definition: pe_actions.c:1298
#define CRMD_ACTION_NOTIFY
Definition: crm.h:185
GHashTable * meta
Definition: pe_types.h:405
resource_object_functions_t * fns
Definition: pe_types.h:358
pe_node_t * pe__copy_node(const pe_node_t *this_node)
Definition: utils.c:89
#define CRMD_ACTION_PROMOTE
Definition: crm.h:180
pe_resource_t * dependent
G_GNUC_INTERNAL GList * pcmk__sort_nodes(GList *nodes, pe_node_t *active_node)
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:219
gint pcmk__cmp_instance(gconstpointer a, gconstpointer b)
GList * rsc_cons_lhs
Definition: pe_types.h:388
pe_action_t * pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional, bool runnable)
Definition: pe_actions.c:1650
enum crm_ais_msg_types type
Definition: cpg.c:48
void resource_location(pe_resource_t *rsc, const pe_node_t *node, int score, const char *tag, pe_working_set_t *data_set)
Definition: utils.c:398
#define RSC_START
Definition: crm.h:199
pe_action_t * action
Definition: pe_types.h:557
bool pcmk__ends_with(const char *s, const char *match)
Definition: strings.c:536
int pe__clone_max(const pe_resource_t *clone)
Definition: clone.c:62
const char * action
Definition: pcmk_fence.c:30
pe_node_t *(* assign)(pe_resource_t *rsc, const pe_node_t *prefer)
enum pe_action_flags(* action_flags)(pe_action_t *action, const pe_node_t *node)
gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b)
pe_node_t *(* active_node)(const pe_resource_t *rsc, unsigned int *count_all, unsigned int *count_clean)
Definition: pe_types.h:73
#define pe_rsc_provisional
Definition: pe_types.h:282
GList * pe__bundle_containers(const pe_resource_t *bundle)
Definition: bundle.c:1916
void pcmk__add_collective_constraints(GList **list, const pe_resource_t *instance, const pe_resource_t *collective, bool with_this)
bool pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node, enum rsc_role_e role, bool current)
const char * role2text(enum rsc_role_e role)
Definition: common.c:450
#define CRMD_ACTION_DEMOTED
Definition: crm.h:183
pe_node_t *(* location)(const pe_resource_t *, GList **, int)
Definition: pe_types.h:55
#define CRMD_ACTION_STOP
Definition: crm.h:177
void pcmk__assign_instances(pe_resource_t *collective, GList *instances, int max_total, int max_per_node)
G_GNUC_INTERNAL void pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation)
int weight
Definition: pe_types.h:265
#define crm_warn(fmt, args...)
Definition: logging.h:378
#define CRMD_ACTION_DEMOTE
Definition: crm.h:182
pe_action_flags
Definition: pe_types.h:316
#define pe_rsc_failed
Definition: pe_types.h:292
G_GNUC_INTERNAL bool pcmk__node_available(const pe_node_t *node, bool consider_score, bool consider_guest)
enum pe_action_flags pcmk__collective_action_flags(pe_action_t *action, const GList *instances, const pe_node_t *node)
pe_resource_t * primary
char * task
Definition: pe_types.h:437
G_GNUC_INTERNAL const pe_resource_t * pcmk__get_rsc_in_container(const pe_resource_t *instance)
GList * actions_after
Definition: pe_types.h:471
#define pe__clear_action_flags(action, flags_to_clear)
Definition: internal.h:98
#define pe__clear_action_summary_flags(flags, action, flag)
#define crm_trace(fmt, args...)
Definition: logging.h:383
#define pcmk_is_set(g, f)
Convenience alias for pcmk_all_flags_set(), to check single flag.
Definition: util.h:121
struct pe_node_shared_s * details
Definition: pe_types.h:268
void pcmk__create_instance_actions(pe_resource_t *collective, GList *instances)
gboolean order_actions(pe_action_t *lh_action, pe_action_t *rh_action, enum pe_ordering order)
Definition: utils.c:488
unsigned long long flags
Definition: pe_types.h:373
const char * uname
Definition: pe_types.h:232
pe_working_set_t * data_set
void(* create_actions)(pe_resource_t *rsc)
GList * actions
Definition: pe_types.h:391
uint32_t pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set)
void pe__create_clone_notif_pseudo_ops(pe_resource_t *clone, pe_action_t *start, pe_action_t *started, pe_action_t *stop, pe_action_t *stopped)
Definition: clone.c:1448
#define CRMD_ACTION_PROMOTED
Definition: crm.h:181
pe_node_t node1
bool is_set_recursive(const pe_resource_t *rsc, long long flag, bool any)
Definition: clone.c:555
char * uuid
Definition: pe_types.h:438
#define pe_rsc_allocating
Definition: pe_types.h:283
enum pe_obj_types variant
Definition: pe_types.h:356
bool pcmk__str_any_of(const char *s,...) G_GNUC_NULL_TERMINATED
Definition: strings.c:957
pe_node_t node2
G_GNUC_INTERNAL void pcmk__unassign_resource(pe_resource_t *rsc)
pe_resource_t * pcmk__find_compatible_instance(const pe_resource_t *match_rsc, const pe_resource_t *rsc, enum rsc_role_e role, bool current)
const char * id
Definition: pe_types.h:231
#define CRMD_ACTION_STOPPED
Definition: crm.h:178
enum rsc_role_e(* state)(const pe_resource_t *, gboolean)
Definition: pe_types.h:54
G_GNUC_INTERNAL void pcmk__update_action_for_orderings(pe_action_t *action, pe_working_set_t *data_set)
#define RSC_STARTED
Definition: crm.h:200
GHashTable * pcmk__strkey_table(GDestroyNotify key_destroy_func, GDestroyNotify value_destroy_func)
Definition: strings.c:611
G_GNUC_INTERNAL GHashTable * pcmk__copy_node_table(GHashTable *nodes)
#define pcmk__set_updated_flags(au_flags, action, flags_to_set)
#define crm_err(fmt, args...)
Definition: logging.h:377
#define CRM_ASSERT(expr)
Definition: results.h:42
G_GNUC_INTERNAL pe_node_t * pcmk__top_allowed_node(const pe_resource_t *rsc, const pe_node_t *node)
G_GNUC_INTERNAL void pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation)
G_GNUC_INTERNAL bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force)
int pe__bundle_max(const pe_resource_t *rsc)
Definition: bundle.c:35
#define pcmk__plural_s(i)
rsc_role_e
Possible roles that a resource can be in.
Definition: common.h:92
GList * running_on
Definition: pe_types.h:398
#define pe_rsc_block
Definition: pe_types.h:274
pe_working_set_t * cluster
Definition: pe_types.h:353
const char * node_attribute
#define RSC_STOPPED
Definition: crm.h:203
enum action_tasks get_complex_task(const pe_resource_t *rsc, const char *name)
Definition: pe_actions.c:1265
gboolean crm_is_true(const char *s)
Definition: strings.c:416
#define CRMD_ACTION_NOTIFIED
Definition: crm.h:186
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:50
uint32_t(* update_ordered_actions)(pe_action_t *first, pe_action_t *then, const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type, pe_working_set_t *data_set)
#define XML_RSC_ATTR_INTERLEAVE
Definition: msg_xml.h:240
#define crm_info(fmt, args...)
Definition: logging.h:380
#define pe_rsc_managed
Definition: pe_types.h:273
#define pe_rsc_orphan
Definition: pe_types.h:272
uint64_t flags
Definition: remote.c:215
void(* add_colocated_node_scores)(pe_resource_t *rsc, const char *log_id, GHashTable **nodes, const char *attr, float factor, uint32_t flags)
action_tasks
Definition: common.h:61
pe_resource_t * parent
Definition: pe_types.h:354
#define RSC_DEMOTE
Definition: crm.h:207
#define pe_rsc_info(rsc, fmt, args...)
Definition: internal.h:48
char * id
Definition: pe_types.h:347
GHashTable * allowed_nodes
Definition: pe_types.h:400