pacemaker  2.0.2-debe490
Scalable High-Availability cluster resource manager
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
bundle.c
Go to the documentation of this file.
1 /*
2  * Copyright 2004-2019 the Pacemaker project contributors
3  *
4  * The version control history for this file may have further details.
5  *
6  * This source code is licensed under the GNU Lesser General Public License
7  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
8  */
9 
10 #include <crm_internal.h>
11 
12 #include <ctype.h>
13 
14 #include <crm/pengine/rules.h>
15 #include <crm/pengine/status.h>
16 #include <crm/pengine/internal.h>
17 #include <unpack.h>
18 #include <crm/msg_xml.h>
19 
20 #define PE__VARIANT_BUNDLE 1
21 #include "./variant.h"
22 
23 static char *
24 next_ip(const char *last_ip)
25 {
26  unsigned int oct1 = 0;
27  unsigned int oct2 = 0;
28  unsigned int oct3 = 0;
29  unsigned int oct4 = 0;
30  int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
31 
32  if (rc != 4) {
33  /*@ TODO check for IPv6 */
34  return NULL;
35 
36  } else if (oct3 > 253) {
37  return NULL;
38 
39  } else if (oct4 > 253) {
40  ++oct3;
41  oct4 = 1;
42 
43  } else {
44  ++oct4;
45  }
46 
47  return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
48 }
49 
50 static int
51 allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica,
52  char *buffer, int max)
53 {
54  if(data->ip_range_start == NULL) {
55  return 0;
56 
57  } else if(data->ip_last) {
58  replica->ipaddr = next_ip(data->ip_last);
59 
60  } else {
61  replica->ipaddr = strdup(data->ip_range_start);
62  }
63 
64  data->ip_last = replica->ipaddr;
65  switch (data->agent_type) {
66  case PE__CONTAINER_AGENT_DOCKER:
67  case PE__CONTAINER_AGENT_PODMAN:
68  if (data->add_host) {
69  return snprintf(buffer, max, " --add-host=%s-%d:%s",
70  data->prefix, replica->offset,
71  replica->ipaddr);
72  }
73  case PE__CONTAINER_AGENT_RKT:
74  return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
75  replica->ipaddr, data->prefix, replica->offset);
76  default: // PE__CONTAINER_AGENT_UNKNOWN
77  break;
78  }
79  return 0;
80 }
81 
82 static xmlNode *
83 create_resource(const char *name, const char *provider, const char *kind)
84 {
85  xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
86 
87  crm_xml_add(rsc, XML_ATTR_ID, name);
89  crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
90  crm_xml_add(rsc, XML_ATTR_TYPE, kind);
91 
92  return rsc;
93 }
94 
107 static bool
108 valid_network(pe__bundle_variant_data_t *data)
109 {
110  if(data->ip_range_start) {
111  return TRUE;
112  }
113  if(data->control_port) {
114  if(data->nreplicas_per_host > 1) {
115  pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
116  data->nreplicas_per_host = 1;
117  /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
118  }
119  return TRUE;
120  }
121  return FALSE;
122 }
123 
124 static bool
125 create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
126  pe__bundle_replica_t *replica, pe_working_set_t *data_set)
127 {
128  if(data->ip_range_start) {
129  char *id = NULL;
130  xmlNode *xml_ip = NULL;
131  xmlNode *xml_obj = NULL;
132 
133  id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
135  xml_ip = create_resource(id, "heartbeat", "IPaddr2");
136  free(id);
137 
138  xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
139  crm_xml_set_id(xml_obj, "%s-attributes-%d",
140  data->prefix, replica->offset);
141 
142  crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
143  if(data->host_network) {
144  crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
145  }
146 
147  if(data->host_netmask) {
148  crm_create_nvpair_xml(xml_obj, NULL,
149  "cidr_netmask", data->host_netmask);
150 
151  } else {
152  crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
153  }
154 
155  xml_obj = create_xml_node(xml_ip, "operations");
156  crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
157 
158  // TODO: Other ops? Timeouts and intervals from underlying resource?
159 
160  if (!common_unpack(xml_ip, &replica->ip, parent, data_set)) {
161  return FALSE;
162  }
163 
164  parent->children = g_list_append(parent->children, replica->ip);
165  }
166  return TRUE;
167 }
168 
169 static bool
170 create_docker_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
171  pe__bundle_replica_t *replica,
172  pe_working_set_t *data_set)
173 {
174  int offset = 0, max = 4096;
175  char *buffer = calloc(1, max+1);
176 
177  int doffset = 0, dmax = 1024;
178  char *dbuffer = calloc(1, dmax+1);
179 
180  char *id = NULL;
181  xmlNode *xml_container = NULL;
182  xmlNode *xml_obj = NULL;
183 
184  id = crm_strdup_printf("%s-docker-%d", data->prefix, replica->offset);
186  xml_container = create_resource(id, "heartbeat",
187  PE__CONTAINER_AGENT_DOCKER_S);
188  free(id);
189 
190  xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
191  crm_xml_set_id(xml_obj, "%s-attributes-%d",
192  data->prefix, replica->offset);
193 
194  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
195  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
196  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
197  crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
198 
199  offset += snprintf(buffer+offset, max-offset, " --restart=no");
200 
201  /* Set a container hostname only if we have an IP to map it to.
202  * The user can set -h or --uts=host themselves if they want a nicer
203  * name for logs, but this makes applications happy who need their
204  * hostname to match the IP they bind to.
205  */
206  if (data->ip_range_start != NULL) {
207  offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
208  data->prefix, replica->offset);
209  }
210 
211  offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
212 
213  if (data->container_network) {
214 #if 0
215  offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
216  replica->ipaddr);
217 #endif
218  offset += snprintf(buffer+offset, max-offset, " --net=%s",
219  data->container_network);
220  }
221 
222  if(data->control_port) {
223  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
224  } else {
225  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
226  }
227 
228  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
229  pe__bundle_mount_t *mount = pIter->data;
230 
231  if(mount->flags) {
232  char *source = crm_strdup_printf(
233  "%s/%s-%d", mount->source, data->prefix, replica->offset);
234 
235  if(doffset > 0) {
236  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
237  }
238  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
239  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
240  free(source);
241 
242  } else {
243  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
244  }
245  if(mount->options) {
246  offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
247  }
248  }
249 
250  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
251  pe__bundle_port_t *port = pIter->data;
252 
253  if (replica->ipaddr) {
254  offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
255  replica->ipaddr, port->source,
256  port->target);
257  } else if(safe_str_neq(data->container_network, "host")) {
258  // No need to do port mapping if net=host
259  offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
260  }
261  }
262 
263  if (data->launcher_options) {
264  offset += snprintf(buffer+offset, max-offset, " %s",
265  data->launcher_options);
266  }
267 
268  if (data->container_host_options) {
269  offset += snprintf(buffer + offset, max - offset, " %s",
270  data->container_host_options);
271  }
272 
273  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
274  free(buffer);
275 
276  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
277  free(dbuffer);
278 
279  if (replica->child) {
280  if (data->container_command) {
281  crm_create_nvpair_xml(xml_obj, NULL,
282  "run_cmd", data->container_command);
283  } else {
284  crm_create_nvpair_xml(xml_obj, NULL,
285  "run_cmd", SBIN_DIR "/pacemaker-remoted");
286  }
287 
288  /* TODO: Allow users to specify their own?
289  *
290  * We just want to know if the container is alive, we'll
291  * monitor the child independently
292  */
293  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
294  /* } else if(child && data->untrusted) {
295  * Support this use-case?
296  *
297  * The ability to have resources started/stopped by us, but
298  * unable to set attributes, etc.
299  *
300  * Arguably better to control API access this with ACLs like
301  * "normal" remote nodes
302  *
303  * crm_create_nvpair_xml(xml_obj, NULL,
304  * "run_cmd",
305  * "/usr/libexec/pacemaker/pacemaker-execd");
306  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
307  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
308  */
309  } else {
310  if (data->container_command) {
311  crm_create_nvpair_xml(xml_obj, NULL,
312  "run_cmd", data->container_command);
313  }
314 
315  /* TODO: Allow users to specify their own?
316  *
317  * We don't know what's in the container, so we just want
318  * to know if it is alive
319  */
320  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
321  }
322 
323 
324  xml_obj = create_xml_node(xml_container, "operations");
325  crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
326 
327  // TODO: Other ops? Timeouts and intervals from underlying resource?
328  if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
329  return FALSE;
330  }
331  parent->children = g_list_append(parent->children, replica->container);
332  return TRUE;
333 }
334 
335 static bool
336 create_podman_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
337  pe__bundle_replica_t *replica,
338  pe_working_set_t *data_set)
339 {
340  int offset = 0, max = 4096;
341  char *buffer = calloc(1, max+1);
342 
343  int doffset = 0, dmax = 1024;
344  char *dbuffer = calloc(1, dmax+1);
345 
346  char *id = NULL;
347  xmlNode *xml_container = NULL;
348  xmlNode *xml_obj = NULL;
349 
350  id = crm_strdup_printf("%s-podman-%d", data->prefix, replica->offset);
352  xml_container = create_resource(id, "heartbeat",
353  PE__CONTAINER_AGENT_PODMAN_S);
354  free(id);
355 
356  xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
357  crm_xml_set_id(xml_obj, "%s-attributes-%d",
358  data->prefix, replica->offset);
359 
360  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
361  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
362  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
363  crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
364 
365  // FIXME: (bandini 2018-08) podman has no restart policies
366  //offset += snprintf(buffer+offset, max-offset, " --restart=no");
367 
368  /* Set a container hostname only if we have an IP to map it to.
369  * The user can set -h or --uts=host themselves if they want a nicer
370  * name for logs, but this makes applications happy who need their
371  * hostname to match the IP they bind to.
372  */
373  if (data->ip_range_start != NULL) {
374  offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
375  data->prefix, replica->offset);
376  }
377 
378  offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
379 
380  if (data->container_network) {
381 #if 0
382  // podman has no support for --link-local-ip
383  offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
384  replica->ipaddr);
385 #endif
386  offset += snprintf(buffer+offset, max-offset, " --net=%s",
387  data->container_network);
388  }
389 
390  if(data->control_port) {
391  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
392  } else {
393  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
394  }
395 
396  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
397  pe__bundle_mount_t *mount = pIter->data;
398 
399  if(mount->flags) {
400  char *source = crm_strdup_printf(
401  "%s/%s-%d", mount->source, data->prefix, replica->offset);
402 
403  if(doffset > 0) {
404  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
405  }
406  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
407  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
408  free(source);
409 
410  } else {
411  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
412  }
413  if(mount->options) {
414  offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
415  }
416  }
417 
418  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
419  pe__bundle_port_t *port = pIter->data;
420 
421  if (replica->ipaddr) {
422  offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
423  replica->ipaddr, port->source,
424  port->target);
425  } else if(safe_str_neq(data->container_network, "host")) {
426  // No need to do port mapping if net=host
427  offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
428  }
429  }
430 
431  if (data->launcher_options) {
432  offset += snprintf(buffer+offset, max-offset, " %s",
433  data->launcher_options);
434  }
435 
436  if (data->container_host_options) {
437  offset += snprintf(buffer + offset, max - offset, " %s",
438  data->container_host_options);
439  }
440 
441  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
442  free(buffer);
443 
444  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
445  free(dbuffer);
446 
447  if (replica->child) {
448  if (data->container_command) {
449  crm_create_nvpair_xml(xml_obj, NULL,
450  "run_cmd", data->container_command);
451  } else {
452  crm_create_nvpair_xml(xml_obj, NULL,
453  "run_cmd", SBIN_DIR "/pacemaker-remoted");
454  }
455 
456  /* TODO: Allow users to specify their own?
457  *
458  * We just want to know if the container is alive, we'll
459  * monitor the child independently
460  */
461  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
462  /* } else if(child && data->untrusted) {
463  * Support this use-case?
464  *
465  * The ability to have resources started/stopped by us, but
466  * unable to set attributes, etc.
467  *
468  * Arguably better to control API access this with ACLs like
469  * "normal" remote nodes
470  *
471  * crm_create_nvpair_xml(xml_obj, NULL,
472  * "run_cmd",
473  * "/usr/libexec/pacemaker/pacemaker-execd");
474  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
475  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
476  */
477  } else {
478  if (data->container_command) {
479  crm_create_nvpair_xml(xml_obj, NULL,
480  "run_cmd", data->container_command);
481  }
482 
483  /* TODO: Allow users to specify their own?
484  *
485  * We don't know what's in the container, so we just want
486  * to know if it is alive
487  */
488  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
489  }
490 
491 
492  xml_obj = create_xml_node(xml_container, "operations");
493  crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
494 
495  // TODO: Other ops? Timeouts and intervals from underlying resource?
496  if (!common_unpack(xml_container, &replica->container, parent,
497  data_set)) {
498  return FALSE;
499  }
500  parent->children = g_list_append(parent->children, replica->container);
501  return TRUE;
502 }
503 
504 static bool
505 create_rkt_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
506  pe__bundle_replica_t *replica, pe_working_set_t *data_set)
507 {
508  int offset = 0, max = 4096;
509  char *buffer = calloc(1, max+1);
510 
511  int doffset = 0, dmax = 1024;
512  char *dbuffer = calloc(1, dmax+1);
513 
514  char *id = NULL;
515  xmlNode *xml_container = NULL;
516  xmlNode *xml_obj = NULL;
517 
518  int volid = 0;
519 
520  id = crm_strdup_printf("%s-rkt-%d", data->prefix, replica->offset);
522  xml_container = create_resource(id, "heartbeat",
523  PE__CONTAINER_AGENT_RKT_S);
524  free(id);
525 
526  xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
527  crm_xml_set_id(xml_obj, "%s-attributes-%d",
528  data->prefix, replica->offset);
529 
530  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
531  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
532  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
533  crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
534 
535  /* Set a container hostname only if we have an IP to map it to.
536  * The user can set -h or --uts=host themselves if they want a nicer
537  * name for logs, but this makes applications happy who need their
538  * hostname to match the IP they bind to.
539  */
540  if (data->ip_range_start != NULL) {
541  offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
542  data->prefix, replica->offset);
543  }
544 
545  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1");
546 
547  if (data->container_network) {
548 #if 0
549  offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s",
550  replica->ipaddr);
551 #endif
552  offset += snprintf(buffer+offset, max-offset, " --net=%s",
553  data->container_network);
554  }
555 
556  if(data->control_port) {
557  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
558  } else {
559  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
560  }
561 
562  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
563  pe__bundle_mount_t *mount = pIter->data;
564 
565  if(mount->flags) {
566  char *source = crm_strdup_printf(
567  "%s/%s-%d", mount->source, data->prefix, replica->offset);
568 
569  if(doffset > 0) {
570  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
571  }
572  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
573  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
574  if(mount->options) {
575  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
576  }
577  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
578  free(source);
579 
580  } else {
581  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
582  if(mount->options) {
583  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
584  }
585  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
586  }
587  volid++;
588  }
589 
590  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
591  pe__bundle_port_t *port = pIter->data;
592 
593  if (replica->ipaddr) {
594  offset += snprintf(buffer+offset, max-offset,
595  " --port=%s:%s:%s", port->target,
596  replica->ipaddr, port->source);
597  } else {
598  offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
599  }
600  }
601 
602  if (data->launcher_options) {
603  offset += snprintf(buffer+offset, max-offset, " %s",
604  data->launcher_options);
605  }
606 
607  if (data->container_host_options) {
608  offset += snprintf(buffer + offset, max - offset, " %s",
609  data->container_host_options);
610  }
611 
612  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
613  free(buffer);
614 
615  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
616  free(dbuffer);
617 
618  if (replica->child) {
619  if (data->container_command) {
620  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
621  data->container_command);
622  } else {
623  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
624  SBIN_DIR "/pacemaker-remoted");
625  }
626 
627  /* TODO: Allow users to specify their own?
628  *
629  * We just want to know if the container is alive, we'll
630  * monitor the child independently
631  */
632  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
633  /* } else if(child && data->untrusted) {
634  * Support this use-case?
635  *
636  * The ability to have resources started/stopped by us, but
637  * unable to set attributes, etc.
638  *
639  * Arguably better to control API access this with ACLs like
640  * "normal" remote nodes
641  *
642  * crm_create_nvpair_xml(xml_obj, NULL,
643  * "run_cmd",
644  * "/usr/libexec/pacemaker/pacemaker-execd");
645  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
646  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
647  */
648  } else {
649  if (data->container_command) {
650  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
651  data->container_command);
652  }
653 
654  /* TODO: Allow users to specify their own?
655  *
656  * We don't know what's in the container, so we just want
657  * to know if it is alive
658  */
659  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
660  }
661 
662 
663  xml_obj = create_xml_node(xml_container, "operations");
664  crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
665 
666  // TODO: Other ops? Timeouts and intervals from underlying resource?
667 
668  if (!common_unpack(xml_container, &replica->container, parent, data_set)) {
669  return FALSE;
670  }
671  parent->children = g_list_append(parent->children, replica->container);
672  return TRUE;
673 }
674 
681 static void
682 disallow_node(resource_t *rsc, const char *uname)
683 {
684  gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
685 
686  if (match) {
687  ((pe_node_t *) match)->weight = -INFINITY;
688  ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
689  }
690  if (rsc->children) {
691  GListPtr child;
692 
693  for (child = rsc->children; child != NULL; child = child->next) {
694  disallow_node((resource_t *) (child->data), uname);
695  }
696  }
697 }
698 
699 static bool
700 create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
701  pe__bundle_replica_t *replica,
702  pe_working_set_t *data_set)
703 {
704  if (replica->child && valid_network(data)) {
705  GHashTableIter gIter;
706  GListPtr rsc_iter = NULL;
707  node_t *node = NULL;
708  xmlNode *xml_remote = NULL;
709  char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
710  char *port_s = NULL;
711  const char *uname = NULL;
712  const char *connect_name = NULL;
713 
714  if (remote_id_conflict(id, data_set)) {
715  free(id);
716  // The biggest hammer we have
717  id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
718  replica->child->id, replica->offset);
719  CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE);
720  }
721 
722  /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
723  * connection does not have its own IP is a magic string that we use to
724  * support nested remotes (i.e. a bundle running on a remote node).
725  */
726  connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
727 
728  if (data->control_port == NULL) {
729  port_s = crm_itoa(DEFAULT_REMOTE_PORT);
730  }
731 
732  /* This sets replica->container as replica->remote's container, which is
733  * similar to what happens with guest nodes. This is how the PE knows
734  * that the bundle node is fenced by recovering the container, and that
735  * remote should be ordered relative to the container.
736  */
737  xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
738  NULL, NULL, NULL,
739  connect_name, (data->control_port?
740  data->control_port : port_s));
741  free(port_s);
742 
743  /* Abandon our created ID, and pull the copy from the XML, because we
744  * need something that will get freed during data set cleanup to use as
745  * the node ID and uname.
746  */
747  free(id);
748  id = NULL;
749  uname = ID(xml_remote);
750 
751  /* Ensure a node has been created for the guest (it may have already
752  * been, if it has a permanent node attribute), and ensure its weight is
753  * -INFINITY so no other resources can run on it.
754  */
755  node = pe_find_node(data_set->nodes, uname);
756  if (node == NULL) {
757  node = pe_create_node(uname, uname, "remote", "-INFINITY",
758  data_set);
759  } else {
760  node->weight = -INFINITY;
761  }
763 
764  /* unpack_remote_nodes() ensures that each remote node and guest node
765  * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
766  * Unfortunately, a bundle has to be mostly unpacked before it's obvious
767  * what nodes will be needed, so we do it just above.
768  *
769  * Worse, that means that the node may have been utilized while
770  * unpacking other resources, without our weight correction. The most
771  * likely place for this to happen is when common_unpack() calls
772  * resource_location() to set a default score in symmetric clusters.
773  * This adds a node *copy* to each resource's allowed nodes, and these
774  * copies will have the wrong weight.
775  *
776  * As a hacky workaround, fix those copies here.
777  *
778  * @TODO Possible alternative: ensure bundles are unpacked before other
779  * resources, so the weight is correct before any copies are made.
780  */
781  for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
782  disallow_node((resource_t *) (rsc_iter->data), uname);
783  }
784 
785  replica->node = node_copy(node);
786  replica->node->weight = 500;
787  replica->node->rsc_discover_mode = pe_discover_exclusive;
788 
789  /* Ensure the node shows up as allowed and with the correct discovery set */
790  if (replica->child->allowed_nodes != NULL) {
791  g_hash_table_destroy(replica->child->allowed_nodes);
792  }
793  replica->child->allowed_nodes = g_hash_table_new_full(crm_str_hash,
794  g_str_equal,
795  NULL, free);
796  g_hash_table_insert(replica->child->allowed_nodes,
797  (gpointer) replica->node->details->id,
798  node_copy(replica->node));
799 
800  {
801  node_t *copy = node_copy(replica->node);
802  copy->weight = -INFINITY;
803  g_hash_table_insert(replica->child->parent->allowed_nodes,
804  (gpointer) replica->node->details->id, copy);
805  }
806  if (!common_unpack(xml_remote, &replica->remote, parent, data_set)) {
807  return FALSE;
808  }
809 
810  g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
811  while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
812  if (pe__is_guest_or_remote_node(node)) {
813  /* Remote resources can only run on 'normal' cluster node */
814  node->weight = -INFINITY;
815  }
816  }
817 
818  replica->node->details->remote_rsc = replica->remote;
819 
820  // Ensure pe__is_guest_node() functions correctly immediately
821  replica->remote->container = replica->container;
822 
823  /* A bundle's #kind is closer to "container" (guest node) than the
824  * "remote" set by pe_create_node().
825  */
826  g_hash_table_insert(replica->node->details->attrs,
827  strdup(CRM_ATTR_KIND), strdup("container"));
828 
829  /* One effect of this is that setup_container() will add
830  * replica->remote to replica->container's fillers, which will make
831  * pe__resource_contains_guest_node() true for replica->container.
832  *
833  * replica->child does NOT get added to replica->container's fillers.
834  * The only noticeable effect if it did would be for its fail count to
835  * be taken into account when checking replica->container's migration
836  * threshold.
837  */
838  parent->children = g_list_append(parent->children, replica->remote);
839  }
840  return TRUE;
841 }
842 
843 static bool
844 create_container(pe_resource_t *parent, pe__bundle_variant_data_t *data,
845  pe__bundle_replica_t *replica, pe_working_set_t *data_set)
846 {
847 
848  switch (data->agent_type) {
849  case PE__CONTAINER_AGENT_DOCKER:
850  if (!create_docker_resource(parent, data, replica, data_set)) {
851  return FALSE;
852  }
853  break;
854 
855  case PE__CONTAINER_AGENT_PODMAN:
856  if (!create_podman_resource(parent, data, replica, data_set)) {
857  return FALSE;
858  }
859  break;
860 
861  case PE__CONTAINER_AGENT_RKT:
862  if (!create_rkt_resource(parent, data, replica, data_set)) {
863  return FALSE;
864  }
865  break;
866  default: // PE__CONTAINER_AGENT_UNKNOWN
867  return FALSE;
868  }
869 
870  if (create_ip_resource(parent, data, replica, data_set) == FALSE) {
871  return FALSE;
872  }
873  if(create_remote_resource(parent, data, replica, data_set) == FALSE) {
874  return FALSE;
875  }
876  if (replica->child && replica->ipaddr) {
877  add_hash_param(replica->child->meta, "external-ip", replica->ipaddr);
878  }
879 
880  if (replica->remote) {
881  /*
882  * Allow the remote connection resource to be allocated to a
883  * different node than the one on which the container is active.
884  *
885  * This makes it possible to have Pacemaker Remote nodes running
886  * containers with pacemaker-remoted inside in order to start
887  * services inside those containers.
888  */
889  set_bit(replica->remote->flags, pe_rsc_allow_remote_remotes);
890  }
891 
892  return TRUE;
893 }
894 
895 static void
896 mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
897  const char *target, const char *options, int flags)
898 {
899  pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t));
900 
901  mount->source = strdup(source);
902  mount->target = strdup(target);
903  if (options) {
904  mount->options = strdup(options);
905  }
906  mount->flags = flags;
907  bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
908 }
909 
910 static void
911 mount_free(pe__bundle_mount_t *mount)
912 {
913  free(mount->source);
914  free(mount->target);
915  free(mount->options);
916  free(mount);
917 }
918 
919 static void
920 port_free(pe__bundle_port_t *port)
921 {
922  free(port->source);
923  free(port->target);
924  free(port);
925 }
926 
927 static pe__bundle_replica_t *
928 replica_for_remote(pe_resource_t *remote)
929 {
930  resource_t *top = remote;
931  pe__bundle_variant_data_t *bundle_data = NULL;
932 
933  if (top == NULL) {
934  return NULL;
935  }
936 
937  while (top->parent != NULL) {
938  top = top->parent;
939  }
940 
941  get_bundle_variant_data(bundle_data, top);
942  for (GList *gIter = bundle_data->replicas; gIter != NULL;
943  gIter = gIter->next) {
944  pe__bundle_replica_t *replica = gIter->data;
945 
946  if (replica->remote == remote) {
947  return replica;
948  }
949  }
950  CRM_LOG_ASSERT(FALSE);
951  return NULL;
952 }
953 
954 bool
956 {
957  const char *value;
958 
959  if (rsc == NULL) {
960  return FALSE;
961  }
962 
963  value = g_hash_table_lookup(rsc->parameters, XML_RSC_ATTR_REMOTE_RA_ADDR);
964  if (safe_str_eq(value, "#uname") == FALSE) {
965  return FALSE;
966 
967  } else {
968  const char *match[3][2] = {
969  { XML_ATTR_TYPE, "remote" },
971  { XML_AGENT_ATTR_PROVIDER, "pacemaker" },
972  };
973 
974  for (int m = 0; m < 3; m++) {
975  value = crm_element_value(rsc->xml, match[m][0]);
976  if (safe_str_neq(value, match[m][1])) {
977  return FALSE;
978  }
979  }
980  }
981  return TRUE;
982 }
983 
984 const char *
985 pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml, const char *field)
986 {
987  // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
988 
989  pe_node_t *node = NULL;
990  pe__bundle_replica_t *replica = NULL;
991 
992  if (!pe__bundle_needs_remote_name(rsc)) {
993  return NULL;
994  }
995 
996  replica = replica_for_remote(rsc);
997  if (replica == NULL) {
998  return NULL;
999  }
1000 
1001  node = replica->container->allocated_to;
1002  if (node == NULL) {
1003  /* If it won't be running anywhere after the
1004  * transition, go with where it's running now.
1005  */
1006  node = pe__current_node(replica->container);
1007  }
1008 
1009  if(node == NULL) {
1010  crm_trace("Cannot determine address for bundle connection %s", rsc->id);
1011  return NULL;
1012  }
1013 
1014  crm_trace("Setting address for bundle connection %s to bundle host %s",
1015  rsc->id, node->details->uname);
1016  if(xml != NULL && field != NULL) {
1017  crm_xml_add(xml, field, node->details->uname);
1018  }
1019 
1020  return node->details->uname;
1021 }
1022 
1023 gboolean
1025 {
1026  const char *value = NULL;
1027  xmlNode *xml_obj = NULL;
1028  xmlNode *xml_resource = NULL;
1029  pe__bundle_variant_data_t *bundle_data = NULL;
1030 
1031  CRM_ASSERT(rsc != NULL);
1032  pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
1033 
1034  bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t));
1035  rsc->variant_opaque = bundle_data;
1036  bundle_data->prefix = strdup(rsc->id);
1037 
1038  xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S);
1039  if (xml_obj != NULL) {
1040  bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
1041  } else {
1042  xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S);
1043  if (xml_obj != NULL) {
1044  bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
1045  } else {
1046  xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S);
1047  if (xml_obj != NULL) {
1048  bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
1049  } else {
1050  return FALSE;
1051  }
1052  }
1053  }
1054 
1055  value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
1056  if (value == NULL) {
1057  // @COMPAT deprecated since 2.0.0
1058  value = crm_element_value(xml_obj, "masters");
1059  }
1060  bundle_data->promoted_max = crm_parse_int(value, "0");
1061  if (bundle_data->promoted_max < 0) {
1062  pe_err("%s for %s must be nonnegative integer, using 0",
1064  bundle_data->promoted_max = 0;
1065  }
1066 
1067  value = crm_element_value(xml_obj, "replicas");
1068  if ((value == NULL) && bundle_data->promoted_max) {
1069  bundle_data->nreplicas = bundle_data->promoted_max;
1070  } else {
1071  bundle_data->nreplicas = crm_parse_int(value, "1");
1072  }
1073  if (bundle_data->nreplicas < 1) {
1074  pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
1075  bundle_data->nreplicas = 1;
1076  }
1077 
1078  /*
1079  * Communication between containers on the same host via the
1080  * floating IPs only works if the container is started with:
1081  * --userland-proxy=false --ip-masq=false
1082  */
1083  value = crm_element_value(xml_obj, "replicas-per-host");
1084  bundle_data->nreplicas_per_host = crm_parse_int(value, "1");
1085  if (bundle_data->nreplicas_per_host < 1) {
1086  pe_err("'replicas-per-host' for %s must be positive integer, using 1",
1087  rsc->id);
1088  bundle_data->nreplicas_per_host = 1;
1089  }
1090  if (bundle_data->nreplicas_per_host == 1) {
1091  clear_bit(rsc->flags, pe_rsc_unique);
1092  }
1093 
1094  bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
1095  bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options");
1096  bundle_data->image = crm_element_value_copy(xml_obj, "image");
1097  bundle_data->container_network = crm_element_value_copy(xml_obj, "network");
1098 
1099  xml_obj = first_named_child(rsc->xml, "network");
1100  if(xml_obj) {
1101 
1102  bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
1103  bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
1104  bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
1105  bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port");
1106  value = crm_element_value(xml_obj, "add-host");
1107  if (check_boolean(value) == FALSE) {
1108  bundle_data->add_host = TRUE;
1109  } else {
1110  crm_str_to_boolean(value, &bundle_data->add_host);
1111  }
1112 
1113  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
1114  xml_child = __xml_next_element(xml_child)) {
1115 
1116  pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t));
1117  port->source = crm_element_value_copy(xml_child, "port");
1118 
1119  if(port->source == NULL) {
1120  port->source = crm_element_value_copy(xml_child, "range");
1121  } else {
1122  port->target = crm_element_value_copy(xml_child, "internal-port");
1123  }
1124 
1125  if(port->source != NULL && strlen(port->source) > 0) {
1126  if(port->target == NULL) {
1127  port->target = strdup(port->source);
1128  }
1129  bundle_data->ports = g_list_append(bundle_data->ports, port);
1130 
1131  } else {
1132  pe_err("Invalid port directive %s", ID(xml_child));
1133  port_free(port);
1134  }
1135  }
1136  }
1137 
1138  xml_obj = first_named_child(rsc->xml, "storage");
1139  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
1140  xml_child = __xml_next_element(xml_child)) {
1141 
1142  const char *source = crm_element_value(xml_child, "source-dir");
1143  const char *target = crm_element_value(xml_child, "target-dir");
1144  const char *options = crm_element_value(xml_child, "options");
1145  int flags = 0;
1146 
1147  if (source == NULL) {
1148  source = crm_element_value(xml_child, "source-dir-root");
1149  flags = 1;
1150  }
1151 
1152  if (source && target) {
1153  mount_add(bundle_data, source, target, options, flags);
1154  } else {
1155  pe_err("Invalid mount directive %s", ID(xml_child));
1156  }
1157  }
1158 
1159  xml_obj = first_named_child(rsc->xml, "primitive");
1160  if (xml_obj && valid_network(bundle_data)) {
1161  char *value = NULL;
1162  xmlNode *xml_set = NULL;
1163 
1164  xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
1165 
1166  /* @COMPAT We no longer use the <master> tag, but we need to keep it as
1167  * part of the resource name, so that bundles don't restart in a rolling
1168  * upgrade. (It also avoids needing to change regression tests.)
1169  */
1170  crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
1171  (bundle_data->promoted_max? "master"
1172  : (const char *)xml_resource->name));
1173 
1174  xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
1175  crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
1176 
1177  crm_create_nvpair_xml(xml_set, NULL,
1179 
1180  value = crm_itoa(bundle_data->nreplicas);
1181  crm_create_nvpair_xml(xml_set, NULL,
1183  free(value);
1184 
1185  value = crm_itoa(bundle_data->nreplicas_per_host);
1186  crm_create_nvpair_xml(xml_set, NULL,
1188  free(value);
1189 
1191  (bundle_data->nreplicas_per_host > 1)?
1193 
1194  if (bundle_data->promoted_max) {
1195  crm_create_nvpair_xml(xml_set, NULL,
1197 
1198  value = crm_itoa(bundle_data->promoted_max);
1199  crm_create_nvpair_xml(xml_set, NULL,
1200  XML_RSC_ATTR_PROMOTED_MAX, value);
1201  free(value);
1202  }
1203 
1204  //crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix);
1205  add_node_copy(xml_resource, xml_obj);
1206 
1207  } else if(xml_obj) {
1208  pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
1209  rsc->id, ID(xml_obj));
1210  return FALSE;
1211  }
1212 
1213  if(xml_resource) {
1214  int lpc = 0;
1215  GListPtr childIter = NULL;
1216  resource_t *new_rsc = NULL;
1217  pe__bundle_port_t *port = NULL;
1218 
1219  int offset = 0, max = 1024;
1220  char *buffer = NULL;
1221 
1222  if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
1223  pe_err("Failed unpacking resource %s", ID(rsc->xml));
1224  if (new_rsc != NULL && new_rsc->fns != NULL) {
1225  new_rsc->fns->free(new_rsc);
1226  }
1227  return FALSE;
1228  }
1229 
1230  bundle_data->child = new_rsc;
1231 
1232  /* Currently, we always map the default authentication key location
1233  * into the same location inside the container.
1234  *
1235  * Ideally, we would respect the host's PCMK_authkey_location, but:
1236  * - it may be different on different nodes;
1237  * - the actual connection will do extra checking to make sure the key
1238  * file exists and is readable, that we can't do here on the DC
1239  * - tools such as crm_resource and crm_simulate may not have the same
1240  * environment variables as the cluster, causing operation digests to
1241  * differ
1242  *
1243  * Always using the default location inside the container is fine,
1244  * because we control the pacemaker_remote environment, and it avoids
1245  * having to pass another environment variable to the container.
1246  *
1247  * @TODO A better solution may be to have only pacemaker_remote use the
1248  * environment variable, and have the cluster nodes use a new
1249  * cluster option for key location. This would introduce the limitation
1250  * of the location being the same on all cluster nodes, but that's
1251  * reasonable.
1252  */
1253  mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
1254  DEFAULT_REMOTE_KEY_LOCATION, NULL, 0);
1255 
1256  mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL, 1);
1257 
1258  port = calloc(1, sizeof(pe__bundle_port_t));
1259  if(bundle_data->control_port) {
1260  port->source = strdup(bundle_data->control_port);
1261  } else {
1262  /* If we wanted to respect PCMK_remote_port, we could use
1263  * crm_default_remote_port() here and elsewhere in this file instead
1264  * of DEFAULT_REMOTE_PORT.
1265  *
1266  * However, it gains nothing, since we control both the container
1267  * environment and the connection resource parameters, and the user
1268  * can use a different port if desired by setting control-port.
1269  */
1270  port->source = crm_itoa(DEFAULT_REMOTE_PORT);
1271  }
1272  port->target = strdup(port->source);
1273  bundle_data->ports = g_list_append(bundle_data->ports, port);
1274 
1275  buffer = calloc(1, max+1);
1276  for (childIter = bundle_data->child->children; childIter != NULL;
1277  childIter = childIter->next) {
1278 
1279  pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
1280 
1281  replica->child = childIter->data;
1282  replica->child->exclusive_discover = TRUE;
1283  replica->offset = lpc++;
1284 
1285  // Ensure the child's notify gets set based on the underlying primitive's value
1286  if (is_set(replica->child->flags, pe_rsc_notify)) {
1287  set_bit(bundle_data->child->flags, pe_rsc_notify);
1288  }
1289 
1290  offset += allocate_ip(bundle_data, replica, buffer+offset,
1291  max-offset);
1292  bundle_data->replicas = g_list_append(bundle_data->replicas,
1293  replica);
1294  bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta,
1296  }
1297  bundle_data->container_host_options = buffer;
1298  if (bundle_data->attribute_target) {
1299  g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET),
1300  strdup(bundle_data->attribute_target));
1301  g_hash_table_replace(bundle_data->child->meta,
1302  strdup(XML_RSC_ATTR_TARGET),
1303  strdup(bundle_data->attribute_target));
1304  }
1305 
1306  } else {
1307  // Just a naked container, no pacemaker-remote
1308  int offset = 0, max = 1024;
1309  char *buffer = calloc(1, max+1);
1310 
1311  for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
1312  pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
1313 
1314  replica->offset = lpc;
1315  offset += allocate_ip(bundle_data, replica, buffer+offset,
1316  max-offset);
1317  bundle_data->replicas = g_list_append(bundle_data->replicas,
1318  replica);
1319  }
1320  bundle_data->container_host_options = buffer;
1321  }
1322 
1323  for (GList *gIter = bundle_data->replicas; gIter != NULL;
1324  gIter = gIter->next) {
1325  pe__bundle_replica_t *replica = gIter->data;
1326 
1327  if (!create_container(rsc, bundle_data, replica, data_set)) {
1328  pe_err("Failed unpacking resource %s", rsc->id);
1329  rsc->fns->free(rsc);
1330  return FALSE;
1331  }
1332  }
1333 
1334  if (bundle_data->child) {
1335  rsc->children = g_list_append(rsc->children, bundle_data->child);
1336  }
1337  return TRUE;
1338 }
1339 
1340 static int
1341 replica_resource_active(pe_resource_t *rsc, gboolean all)
1342 {
1343  if (rsc) {
1344  gboolean child_active = rsc->fns->active(rsc, all);
1345 
1346  if (child_active && !all) {
1347  return TRUE;
1348  } else if (!child_active && all) {
1349  return FALSE;
1350  }
1351  }
1352  return -1;
1353 }
1354 
1355 gboolean
1357 {
1358  pe__bundle_variant_data_t *bundle_data = NULL;
1359  GListPtr iter = NULL;
1360 
1361  get_bundle_variant_data(bundle_data, rsc);
1362  for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
1363  pe__bundle_replica_t *replica = iter->data;
1364  int rsc_active;
1365 
1366  rsc_active = replica_resource_active(replica->ip, all);
1367  if (rsc_active >= 0) {
1368  return (gboolean) rsc_active;
1369  }
1370 
1371  rsc_active = replica_resource_active(replica->child, all);
1372  if (rsc_active >= 0) {
1373  return (gboolean) rsc_active;
1374  }
1375 
1376  rsc_active = replica_resource_active(replica->container, all);
1377  if (rsc_active >= 0) {
1378  return (gboolean) rsc_active;
1379  }
1380 
1381  rsc_active = replica_resource_active(replica->remote, all);
1382  if (rsc_active >= 0) {
1383  return (gboolean) rsc_active;
1384  }
1385  }
1386 
1387  /* If "all" is TRUE, we've already checked that no resources were inactive,
1388  * so return TRUE; if "all" is FALSE, we didn't find any active resources,
1389  * so return FALSE.
1390  */
1391  return all;
1392 }
1393 
1403 pe_resource_t *
1405 {
1406  pe__bundle_variant_data_t *bundle_data = NULL;
1407  CRM_ASSERT(bundle && node);
1408 
1409  get_bundle_variant_data(bundle_data, bundle);
1410  for (GList *gIter = bundle_data->replicas; gIter != NULL;
1411  gIter = gIter->next) {
1412  pe__bundle_replica_t *replica = gIter->data;
1413 
1414  CRM_ASSERT(replica && replica->node);
1415  if (replica->node->details == node->details) {
1416  return replica->child;
1417  }
1418  }
1419  return NULL;
1420 }
1421 
1422 static void
1423 print_rsc_in_list(resource_t *rsc, const char *pre_text, long options,
1424  void *print_data)
1425 {
1426  if (rsc != NULL) {
1427  if (options & pe_print_html) {
1428  status_print("<li>");
1429  }
1430  rsc->fns->print(rsc, pre_text, options, print_data);
1431  if (options & pe_print_html) {
1432  status_print("</li>\n");
1433  }
1434  }
1435 }
1436 
1437 static const char*
1438 container_agent_str(enum pe__container_agent t)
1439 {
1440  switch (t) {
1441  case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
1442  case PE__CONTAINER_AGENT_RKT: return PE__CONTAINER_AGENT_RKT_S;
1443  case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
1444  default: // PE__CONTAINER_AGENT_UNKNOWN
1445  break;
1446  }
1447  return PE__CONTAINER_AGENT_UNKNOWN_S;
1448 }
1449 
1450 static void
1451 bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
1452  void *print_data)
1453 {
1454  pe__bundle_variant_data_t *bundle_data = NULL;
1455  char *child_text = NULL;
1456  CRM_CHECK(rsc != NULL, return);
1457 
1458  if (pre_text == NULL) {
1459  pre_text = "";
1460  }
1461  child_text = crm_concat(pre_text, " ", ' ');
1462 
1463  get_bundle_variant_data(bundle_data, rsc);
1464 
1465  status_print("%s<bundle ", pre_text);
1466  status_print("id=\"%s\" ", rsc->id);
1467  status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
1468  status_print("image=\"%s\" ", bundle_data->image);
1469  status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
1470  status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
1471  status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
1472  status_print(">\n");
1473 
1474  for (GList *gIter = bundle_data->replicas; gIter != NULL;
1475  gIter = gIter->next) {
1476  pe__bundle_replica_t *replica = gIter->data;
1477 
1478  CRM_ASSERT(replica);
1479  status_print("%s <replica id=\"%d\">\n", pre_text, replica->offset);
1480  print_rsc_in_list(replica->ip, child_text, options, print_data);
1481  print_rsc_in_list(replica->child, child_text, options, print_data);
1482  print_rsc_in_list(replica->container, child_text, options, print_data);
1483  print_rsc_in_list(replica->remote, child_text, options, print_data);
1484  status_print("%s </replica>\n", pre_text);
1485  }
1486  status_print("%s</bundle>\n", pre_text);
1487  free(child_text);
1488 }
1489 
1490 static void
1491 print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
1492  long options, void *print_data)
1493 {
1494  node_t *node = NULL;
1495  pe_resource_t *rsc = replica->child;
1496 
1497  int offset = 0;
1498  char buffer[LINE_MAX];
1499 
1500  if(rsc == NULL) {
1501  rsc = replica->container;
1502  }
1503 
1504  if (replica->remote) {
1505  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
1506  rsc_printable_id(replica->remote));
1507  } else {
1508  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
1509  rsc_printable_id(replica->container));
1510  }
1511  if (replica->ipaddr) {
1512  offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
1513  replica->ipaddr);
1514  }
1515 
1516  node = pe__current_node(replica->container);
1517  common_print(rsc, pre_text, buffer, node, options, print_data);
1518 }
1519 
1520 void
1521 pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
1522  void *print_data)
1523 {
1524  pe__bundle_variant_data_t *bundle_data = NULL;
1525  char *child_text = NULL;
1526  CRM_CHECK(rsc != NULL, return);
1527 
1528  if (options & pe_print_xml) {
1529  bundle_print_xml(rsc, pre_text, options, print_data);
1530  return;
1531  }
1532 
1533  get_bundle_variant_data(bundle_data, rsc);
1534 
1535  if (pre_text == NULL) {
1536  pre_text = " ";
1537  }
1538 
1539  status_print("%sContainer bundle%s: %s [%s]%s%s\n",
1540  pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
1541  rsc->id, bundle_data->image,
1542  is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
1543  is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
1544  if (options & pe_print_html) {
1545  status_print("<br />\n<ul>\n");
1546  }
1547 
1548 
1549  for (GList *gIter = bundle_data->replicas; gIter != NULL;
1550  gIter = gIter->next) {
1551  pe__bundle_replica_t *replica = gIter->data;
1552 
1553  CRM_ASSERT(replica);
1554  if (options & pe_print_html) {
1555  status_print("<li>");
1556  }
1557 
1558  if (is_set(options, pe_print_implicit)) {
1559  child_text = crm_strdup_printf(" %s", pre_text);
1560  if(g_list_length(bundle_data->replicas) > 1) {
1561  status_print(" %sReplica[%d]\n", pre_text, replica->offset);
1562  }
1563  if (options & pe_print_html) {
1564  status_print("<br />\n<ul>\n");
1565  }
1566  print_rsc_in_list(replica->ip, child_text, options, print_data);
1567  print_rsc_in_list(replica->container, child_text, options, print_data);
1568  print_rsc_in_list(replica->remote, child_text, options, print_data);
1569  print_rsc_in_list(replica->child, child_text, options, print_data);
1570  if (options & pe_print_html) {
1571  status_print("</ul>\n");
1572  }
1573  } else {
1574  child_text = crm_strdup_printf("%s ", pre_text);
1575  print_bundle_replica(replica, child_text, options, print_data);
1576  }
1577  free(child_text);
1578 
1579  if (options & pe_print_html) {
1580  status_print("</li>\n");
1581  }
1582  }
1583  if (options & pe_print_html) {
1584  status_print("</ul>\n");
1585  }
1586 }
1587 
1588 static void
1589 free_bundle_replica(pe__bundle_replica_t *replica)
1590 {
1591  if (replica == NULL) {
1592  return;
1593  }
1594 
1595  if (replica->node) {
1596  free(replica->node);
1597  replica->node = NULL;
1598  }
1599 
1600  if (replica->ip) {
1601  free_xml(replica->ip->xml);
1602  replica->ip->xml = NULL;
1603  replica->ip->fns->free(replica->ip);
1604  replica->ip = NULL;
1605  }
1606  if (replica->container) {
1607  free_xml(replica->container->xml);
1608  replica->container->xml = NULL;
1609  replica->container->fns->free(replica->container);
1610  replica->container = NULL;
1611  }
1612  if (replica->remote) {
1613  free_xml(replica->remote->xml);
1614  replica->remote->xml = NULL;
1615  replica->remote->fns->free(replica->remote);
1616  replica->remote = NULL;
1617  }
1618  free(replica->ipaddr);
1619  free(replica);
1620 }
1621 
1622 void
1624 {
1625  pe__bundle_variant_data_t *bundle_data = NULL;
1626  CRM_CHECK(rsc != NULL, return);
1627 
1628  get_bundle_variant_data(bundle_data, rsc);
1629  pe_rsc_trace(rsc, "Freeing %s", rsc->id);
1630 
1631  free(bundle_data->prefix);
1632  free(bundle_data->image);
1633  free(bundle_data->control_port);
1634  free(bundle_data->host_network);
1635  free(bundle_data->host_netmask);
1636  free(bundle_data->ip_range_start);
1637  free(bundle_data->container_network);
1638  free(bundle_data->launcher_options);
1639  free(bundle_data->container_command);
1640  free(bundle_data->container_host_options);
1641 
1642  g_list_free_full(bundle_data->replicas,
1643  (GDestroyNotify) free_bundle_replica);
1644  g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
1645  g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
1646  g_list_free(rsc->children);
1647 
1648  if(bundle_data->child) {
1649  free_xml(bundle_data->child->xml);
1650  bundle_data->child->xml = NULL;
1651  bundle_data->child->fns->free(bundle_data->child);
1652  }
1653  common_free(rsc);
1654 }
1655 
1656 enum rsc_role_e
1657 pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
1658 {
1659  enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
1660  return container_role;
1661 }
1662 
1670 int
1672 {
1673  if ((rsc == NULL) || (rsc->variant != pe_container)) {
1674  return 0;
1675  } else {
1676  pe__bundle_variant_data_t *bundle_data = NULL;
1677 
1678  get_bundle_variant_data(bundle_data, rsc);
1679  return bundle_data->nreplicas;
1680  }
1681 }
bool remote_id_conflict(const char *remote_name, pe_working_set_t *data)
Definition: unpack.c:394
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:156
char uname[MAX_NAME]
Definition: internal.h:87
GListPtr nodes
Definition: pe_types.h:133
xmlNode * crm_create_op_xml(xmlNode *parent, const char *prefix, const char *task, const char *interval_spec, const char *timeout)
Create a CIB XML element for an operation.
Definition: operations.c:422
gboolean safe_str_neq(const char *a, const char *b)
Definition: strings.c:157
#define INFINITY
Definition: crm.h:73
#define CRM_ATTR_KIND
Definition: crm.h:90
node_t * node_copy(const node_t *this_node)
Definition: utils.c:118
pe_resource_t * container
Definition: pe_types.h:343
node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t *data_set)
Definition: unpack.c:333
#define XML_ATTR_TYPE
Definition: msg_xml.h:99
void pe__free_bundle(pe_resource_t *rsc)
Definition: bundle.c:1623
#define XML_BOOLEAN_FALSE
Definition: msg_xml.h:108
enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
Definition: bundle.c:1657
xmlNode * first_named_child(const xmlNode *parent, const char *name)
Definition: xml.c:4155
xmlNode * pe_create_remote_xml(xmlNode *parent, const char *uname, const char *container_id, const char *migrateable, const char *is_managed, const char *start_timeout, const char *server, const char *port)
Definition: remote.c:151
xmlNode * xml
Definition: pe_types.h:286
GHashTable * meta
Definition: pe_types.h:336
#define pe_rsc_unique
Definition: pe_types.h:223
#define pe_rsc_notify
Definition: pe_types.h:222
gboolean common_unpack(xmlNode *xml_obj, resource_t **rsc, resource_t *parent, pe_working_set_t *data_set)
Definition: complex.c:360
void common_free(resource_t *rsc)
Definition: complex.c:760
resource_object_functions_t * fns
Definition: pe_types.h:295
GHashTable * parameters
Definition: pe_types.h:337
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Create an XML attribute with specified name and value.
Definition: nvpair.c:275
#define status_print(fmt, args...)
Definition: unpack.h:67
gboolean pe__is_guest_or_remote_node(pe_node_t *node)
Definition: remote.c:58
int crm_parse_int(const char *text, const char *default_text)
Parse an integer value from a string.
Definition: strings.c:110
GListPtr resources
Definition: pe_types.h:134
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:142
pe_node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:412
#define clear_bit(word, bit)
Definition: crm_internal.h:168
void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options, void *print_data)
Definition: bundle.c:1521
#define pe_rsc_allow_remote_remotes
Definition: pe_types.h:232
#define XML_RSC_ATTR_INCARNATION_MAX
Definition: msg_xml.h:187
#define CRM_BUNDLE_DIR
Definition: config.h:14
bool pe__bundle_needs_remote_name(pe_resource_t *rsc)
Definition: bundle.c:955
#define XML_RSC_ATTR_TARGET
Definition: msg_xml.h:182
#define XML_RSC_ATTR_REMOTE_RA_ADDR
Definition: msg_xml.h:211
gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
Definition: bundle.c:1024
void crm_xml_sanitize_id(char *id)
Sanitize a string so it is usable as an XML ID.
Definition: xml.c:2325
#define DEFAULT_REMOTE_PORT
Definition: lrmd.h:49
#define DEFAULT_REMOTE_KEY_LOCATION
Definition: lrmd.h:47
#define XML_TAG_ATTR_SETS
Definition: msg_xml.h:163
#define XML_RSC_ATTR_PROMOTABLE
Definition: msg_xml.h:190
pe_resource_t * pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
Definition: bundle.c:1404
int weight
Definition: pe_types.h:210
#define set_bit(word, bit)
Definition: crm_internal.h:167
#define PCMK_RESOURCE_CLASS_OCF
Definition: services.h:43
#define pe_rsc_failed
Definition: pe_types.h:234
char * crm_element_value_copy(const xmlNode *data, const char *name)
Retrieve a copy of the value of an XML attribute.
Definition: nvpair.c:556
#define XML_ATTR_ID
Definition: msg_xml.h:96
const char * crm_element_value(const xmlNode *data, const char *name)
Retrieve the value of an XML attribute.
Definition: nvpair.c:423
#define XML_CIB_TAG_RESOURCE
Definition: msg_xml.h:174
#define XML_BOOLEAN_TRUE
Definition: msg_xml.h:107
const char * pe__add_bundle_remote_name(pe_resource_t *rsc, xmlNode *xml, const char *field)
Definition: bundle.c:985
#define crm_trace(fmt, args...)
Definition: logging.h:246
xmlNode * add_node_copy(xmlNode *new_parent, xmlNode *xml_node)
Definition: xml.c:1868
struct pe_node_shared_s * details
Definition: pe_types.h:213
#define XML_AGENT_ATTR_PROVIDER
Definition: msg_xml.h:230
unsigned long long flags
Definition: pe_types.h:311
const char * uname
Definition: pe_types.h:179
#define XML_RSC_ATTR_ORDERED
Definition: msg_xml.h:184
#define XML_TAG_META_SETS
Definition: msg_xml.h:164
xmlNode * create_xml_node(xmlNode *parent, const char *name)
Definition: xml.c:1890
#define XML_RSC_ATTR_INCARNATION_NODEMAX
Definition: msg_xml.h:189
gboolean check_boolean(const char *value)
Definition: utils.c:91
void free_xml(xmlNode *child)
Definition: xml.c:2014
enum pe_obj_types variant
Definition: pe_types.h:293
int rsc_discover_mode
Definition: pe_types.h:214
void(* print)(pe_resource_t *, const char *, long, void *)
Definition: pe_types.h:49
#define XML_RSC_ATTR_UNIQUE
Definition: msg_xml.h:197
void common_print(resource_t *rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data)
Definition: native.c:502
int crm_str_to_boolean(const char *s, int *ret)
Definition: strings.c:183
#define XML_RSC_ATTR_PROMOTED_MAX
Definition: msg_xml.h:191
#define SBIN_DIR
Definition: config.h:550
Cluster status and scheduling.
#define XML_CIB_TAG_INCARNATION
Definition: msg_xml.h:176
GListPtr children
Definition: pe_types.h:340
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:406
void * variant_opaque
Definition: pe_types.h:294
#define CRM_ASSERT(expr)
Definition: results.h:42
const char * rsc_printable_id(pe_resource_t *rsc)
Definition: utils.c:2137
void crm_xml_set_id(xmlNode *xml, const char *format,...) __attribute__((__format__(__printf__
#define crm_str_hash
Definition: util.h:60
void(* free)(pe_resource_t *)
Definition: pe_types.h:53
char data[0]
Definition: internal.h:92
rsc_role_e
Definition: common.h:86
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:18
xmlNode * crm_create_nvpair_xml(xmlNode *parent, const char *id, const char *name, const char *value)
Create an XML name/value pair.
Definition: nvpair.c:678
gboolean(* active)(pe_resource_t *, gboolean)
Definition: pe_types.h:50
#define ID(x)
Definition: msg_xml.h:414
#define pe_err(fmt...)
Definition: internal.h:20
#define safe_str_eq(a, b)
Definition: util.h:59
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
GList * GListPtr
Definition: crm.h:192
#define pe_rsc_managed
Definition: pe_types.h:218
uint64_t flags
Definition: remote.c:148
int pe_bundle_replicas(const pe_resource_t *rsc)
Get the number of configured replicas in a bundle.
Definition: bundle.c:1671
pe_resource_t * parent
Definition: pe_types.h:291
gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all)
Definition: bundle.c:1356
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:229
char * id
Definition: pe_types.h:284
GHashTable * allowed_nodes
Definition: pe_types.h:331