pacemaker  1.1.18-7fdfbbe
Scalable High-Availability cluster resource manager
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
container.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <crm_internal.h>
20 
21 #include <ctype.h>
22 
23 #include <crm/pengine/rules.h>
24 #include <crm/pengine/status.h>
25 #include <crm/pengine/internal.h>
26 #include <unpack.h>
27 #include <crm/msg_xml.h>
28 
29 #define VARIANT_CONTAINER 1
30 #include "./variant.h"
31 
32 void tuple_free(container_grouping_t *tuple);
33 
34 static char *
35 next_ip(const char *last_ip)
36 {
37  unsigned int oct1 = 0;
38  unsigned int oct2 = 0;
39  unsigned int oct3 = 0;
40  unsigned int oct4 = 0;
41  int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
42 
43  if (rc != 4) {
44  /*@ TODO check for IPv6 */
45  return NULL;
46 
47  } else if (oct3 > 253) {
48  return NULL;
49 
50  } else if (oct4 > 253) {
51  ++oct3;
52  oct4 = 1;
53 
54  } else {
55  ++oct4;
56  }
57 
58  return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
59 }
60 
61 static int
62 allocate_ip(container_variant_data_t *data, container_grouping_t *tuple, char *buffer, int max)
63 {
64  if(data->ip_range_start == NULL) {
65  return 0;
66 
67  } else if(data->ip_last) {
68  tuple->ipaddr = next_ip(data->ip_last);
69 
70  } else {
71  tuple->ipaddr = strdup(data->ip_range_start);
72  }
73 
74  data->ip_last = tuple->ipaddr;
75 #if 0
76  return snprintf(buffer, max, " --add-host=%s-%d:%s --link %s-docker-%d:%s-link-%d",
77  data->prefix, tuple->offset, tuple->ipaddr,
78  data->prefix, tuple->offset, data->prefix, tuple->offset);
79 #else
80  if (data->type == PE_CONTAINER_TYPE_DOCKER) {
81  return snprintf(buffer, max, " --add-host=%s-%d:%s",
82  data->prefix, tuple->offset, tuple->ipaddr);
83  } else if (data->type == PE_CONTAINER_TYPE_RKT) {
84  return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
85  tuple->ipaddr, data->prefix, tuple->offset);
86  } else {
87  return 0;
88  }
89 #endif
90 }
91 
92 static xmlNode *
93 create_resource(const char *name, const char *provider, const char *kind)
94 {
95  xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
96 
97  crm_xml_add(rsc, XML_ATTR_ID, name);
99  crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
100  crm_xml_add(rsc, XML_ATTR_TYPE, kind);
101 
102  return rsc;
103 }
104 
117 static bool
118 valid_network(container_variant_data_t *data)
119 {
120  if(data->ip_range_start) {
121  return TRUE;
122  }
123  if(data->control_port) {
124  if(data->replicas_per_host > 1) {
125  pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
126  data->replicas_per_host = 1;
127  /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
128  }
129  return TRUE;
130  }
131  return FALSE;
132 }
133 
134 static bool
135 create_ip_resource(
136  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
137  pe_working_set_t * data_set)
138 {
139  if(data->ip_range_start) {
140  char *id = NULL;
141  xmlNode *xml_ip = NULL;
142  xmlNode *xml_obj = NULL;
143 
144  id = crm_strdup_printf("%s-ip-%s", data->prefix, tuple->ipaddr);
146  xml_ip = create_resource(id, "heartbeat", "IPaddr2");
147  free(id);
148 
149  xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
150  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
151 
152  crm_create_nvpair_xml(xml_obj, NULL, "ip", tuple->ipaddr);
153  if(data->host_network) {
154  crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
155  }
156 
157  if(data->host_netmask) {
158  crm_create_nvpair_xml(xml_obj, NULL,
159  "cidr_netmask", data->host_netmask);
160 
161  } else {
162  crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
163  }
164 
165  xml_obj = create_xml_node(xml_ip, "operations");
166  crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
167 
168  // TODO: Other ops? Timeouts and intervals from underlying resource?
169 
170  crm_log_xml_trace(xml_ip, "Container-ip");
171  if (common_unpack(xml_ip, &tuple->ip, parent, data_set) == false) {
172  return FALSE;
173  }
174 
175  parent->children = g_list_append(parent->children, tuple->ip);
176  }
177  return TRUE;
178 }
179 
180 static bool
181 create_docker_resource(
182  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
183  pe_working_set_t * data_set)
184 {
185  int offset = 0, max = 4096;
186  char *buffer = calloc(1, max+1);
187 
188  int doffset = 0, dmax = 1024;
189  char *dbuffer = calloc(1, dmax+1);
190 
191  char *id = NULL;
192  xmlNode *xml_docker = NULL;
193  xmlNode *xml_obj = NULL;
194 
195  id = crm_strdup_printf("%s-docker-%d", data->prefix, tuple->offset);
197  xml_docker = create_resource(id, "heartbeat", "docker");
198  free(id);
199 
200  xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
201  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
202 
203  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
204  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
205  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
206  crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
207 
208  offset += snprintf(buffer+offset, max-offset, " --restart=no");
209 
210  /* Set a container hostname only if we have an IP to map it to.
211  * The user can set -h or --uts=host themselves if they want a nicer
212  * name for logs, but this makes applications happy who need their
213  * hostname to match the IP they bind to.
214  */
215  if (data->ip_range_start != NULL) {
216  offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
217  data->prefix, tuple->offset);
218  }
219 
220  offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
221 
222  if(data->docker_network) {
223 // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
224  offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
225  }
226 
227  if(data->control_port) {
228  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
229  } else {
230  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
231  }
232 
233  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
234  container_mount_t *mount = pIter->data;
235 
236  if(mount->flags) {
237  char *source = crm_strdup_printf(
238  "%s/%s-%d", mount->source, data->prefix, tuple->offset);
239 
240  if(doffset > 0) {
241  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
242  }
243  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
244  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
245  free(source);
246 
247  } else {
248  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
249  }
250  if(mount->options) {
251  offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
252  }
253  }
254 
255  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
256  container_port_t *port = pIter->data;
257 
258  if(tuple->ipaddr) {
259  offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
260  tuple->ipaddr, port->source, port->target);
261  } else if(safe_str_neq(data->docker_network, "host")) {
262  // No need to do port mapping if net=host
263  offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
264  }
265  }
266 
267  if(data->docker_run_options) {
268  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
269  }
270 
271  if(data->docker_host_options) {
272  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
273  }
274 
275  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
276  free(buffer);
277 
278  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
279  free(dbuffer);
280 
281  if(tuple->child) {
282  if(data->docker_run_command) {
283  crm_create_nvpair_xml(xml_obj, NULL,
284  "run_cmd", data->docker_run_command);
285  } else {
286  crm_create_nvpair_xml(xml_obj, NULL,
287  "run_cmd", SBIN_DIR "/pacemaker_remoted");
288  }
289 
290  /* TODO: Allow users to specify their own?
291  *
292  * We just want to know if the container is alive, we'll
293  * monitor the child independently
294  */
295  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
296  /* } else if(child && data->untrusted) {
297  * Support this use-case?
298  *
299  * The ability to have resources started/stopped by us, but
300  * unable to set attributes, etc.
301  *
302  * Arguably better to control API access this with ACLs like
303  * "normal" remote nodes
304  *
305  * crm_create_nvpair_xml(xml_obj, NULL,
306  * "run_cmd", "/usr/libexec/pacemaker/lrmd");
307  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
308  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
309  */
310  } else {
311  if(data->docker_run_command) {
312  crm_create_nvpair_xml(xml_obj, NULL,
313  "run_cmd", data->docker_run_command);
314  }
315 
316  /* TODO: Allow users to specify their own?
317  *
318  * We don't know what's in the container, so we just want
319  * to know if it is alive
320  */
321  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
322  }
323 
324 
325  xml_obj = create_xml_node(xml_docker, "operations");
326  crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
327 
328  // TODO: Other ops? Timeouts and intervals from underlying resource?
329  crm_log_xml_trace(xml_docker, "Container-docker");
330  if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
331  return FALSE;
332  }
333  parent->children = g_list_append(parent->children, tuple->docker);
334  return TRUE;
335 }
336 
337 static bool
338 create_rkt_resource(
339  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
340  pe_working_set_t * data_set)
341 {
342  int offset = 0, max = 4096;
343  char *buffer = calloc(1, max+1);
344 
345  int doffset = 0, dmax = 1024;
346  char *dbuffer = calloc(1, dmax+1);
347 
348  char *id = NULL;
349  xmlNode *xml_docker = NULL;
350  xmlNode *xml_obj = NULL;
351 
352  int volid = 0;
353 
354  id = crm_strdup_printf("%s-rkt-%d", data->prefix, tuple->offset);
356  xml_docker = create_resource(id, "heartbeat", "rkt");
357  free(id);
358 
359  xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
360  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
361 
362  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
363  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
364  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
365  crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
366 
367  /* Set a container hostname only if we have an IP to map it to.
368  * The user can set -h or --uts=host themselves if they want a nicer
369  * name for logs, but this makes applications happy who need their
370  * hostname to match the IP they bind to.
371  */
372  if (data->ip_range_start != NULL) {
373  offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
374  data->prefix, tuple->offset);
375  }
376 
377  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1");
378 
379  if(data->docker_network) {
380 // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
381  offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
382  }
383 
384  if(data->control_port) {
385  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
386  } else {
387  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
388  }
389 
390  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
391  container_mount_t *mount = pIter->data;
392 
393  if(mount->flags) {
394  char *source = crm_strdup_printf(
395  "%s/%s-%d", mount->source, data->prefix, tuple->offset);
396 
397  if(doffset > 0) {
398  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
399  }
400  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
401  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
402  if(mount->options) {
403  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
404  }
405  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
406  free(source);
407 
408  } else {
409  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
410  if(mount->options) {
411  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
412  }
413  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
414  }
415  volid++;
416  }
417 
418  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
419  container_port_t *port = pIter->data;
420 
421  if(tuple->ipaddr) {
422  offset += snprintf(buffer+offset, max-offset, " --port=%s:%s:%s",
423  port->target, tuple->ipaddr, port->source);
424  } else {
425  offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
426  }
427  }
428 
429  if(data->docker_run_options) {
430  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
431  }
432 
433  if(data->docker_host_options) {
434  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
435  }
436 
437  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
438  free(buffer);
439 
440  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
441  free(dbuffer);
442 
443  if(tuple->child) {
444  if(data->docker_run_command) {
445  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->docker_run_command);
446  } else {
447  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR"/pacemaker_remoted");
448  }
449 
450  /* TODO: Allow users to specify their own?
451  *
452  * We just want to know if the container is alive, we'll
453  * monitor the child independently
454  */
455  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
456  /* } else if(child && data->untrusted) {
457  * Support this use-case?
458  *
459  * The ability to have resources started/stopped by us, but
460  * unable to set attributes, etc.
461  *
462  * Arguably better to control API access this with ACLs like
463  * "normal" remote nodes
464  *
465  * crm_create_nvpair_xml(xml_obj, NULL,
466  * "run_cmd", "/usr/libexec/pacemaker/lrmd");
467  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
468  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
469  */
470  } else {
471  if(data->docker_run_command) {
472  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
473  data->docker_run_command);
474  }
475 
476  /* TODO: Allow users to specify their own?
477  *
478  * We don't know what's in the container, so we just want
479  * to know if it is alive
480  */
481  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
482  }
483 
484 
485  xml_obj = create_xml_node(xml_docker, "operations");
486  crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
487 
488  // TODO: Other ops? Timeouts and intervals from underlying resource?
489 
490  crm_log_xml_trace(xml_docker, "Container-rkt");
491  if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
492  return FALSE;
493  }
494  parent->children = g_list_append(parent->children, tuple->docker);
495  return TRUE;
496 }
497 
504 static void
505 disallow_node(resource_t *rsc, const char *uname)
506 {
507  gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
508 
509  if (match) {
510  ((pe_node_t *) match)->weight = -INFINITY;
511  ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
512  }
513  if (rsc->children) {
514  GListPtr child;
515 
516  for (child = rsc->children; child != NULL; child = child->next) {
517  disallow_node((resource_t *) (child->data), uname);
518  }
519  }
520 }
521 
522 static bool
523 create_remote_resource(
524  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
525  pe_working_set_t * data_set)
526 {
527  if (tuple->child && valid_network(data)) {
528  GHashTableIter gIter;
529  GListPtr rsc_iter = NULL;
530  node_t *node = NULL;
531  xmlNode *xml_remote = NULL;
532  char *id = crm_strdup_printf("%s-%d", data->prefix, tuple->offset);
533  char *port_s = NULL;
534  const char *uname = NULL;
535  const char *connect_name = NULL;
536 
537  if (remote_id_conflict(id, data_set)) {
538  free(id);
539  // The biggest hammer we have
540  id = crm_strdup_printf("pcmk-internal-%s-remote-%d", tuple->child->id, tuple->offset);
541  CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE);
542  }
543 
544  /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
545  * connection does not have its own IP is a magic string that we use to
546  * support nested remotes (i.e. a bundle running on a remote node).
547  */
548  connect_name = (tuple->ipaddr? tuple->ipaddr : "#uname");
549 
550  if (data->control_port == NULL) {
551  port_s = crm_itoa(DEFAULT_REMOTE_PORT);
552  }
553 
554  /* This sets tuple->docker as tuple->remote's container, which is
555  * similar to what happens with guest nodes. This is how the PE knows
556  * that the bundle node is fenced by recovering docker, and that
557  * remote should be ordered relative to docker.
558  */
559  xml_remote = pe_create_remote_xml(NULL, id, tuple->docker->id,
560  XML_BOOLEAN_FALSE, NULL, "60s", NULL,
561  NULL, connect_name,
562  (data->control_port?
563  data->control_port : port_s));
564  free(port_s);
565 
566  /* Abandon our created ID, and pull the copy from the XML, because we
567  * need something that will get freed during data set cleanup to use as
568  * the node ID and uname.
569  */
570  free(id);
571  id = NULL;
572  uname = ID(xml_remote);
573 
574  /* Ensure a node has been created for the guest (it may have already
575  * been, if it has a permanent node attribute), and ensure its weight is
576  * -INFINITY so no other resources can run on it.
577  */
578  node = pe_find_node(data_set->nodes, uname);
579  if (node == NULL) {
580  node = pe_create_node(uname, uname, "remote", "-INFINITY",
581  data_set);
582  } else {
583  node->weight = -INFINITY;
584  }
586 
587  /* unpack_remote_nodes() ensures that each remote node and guest node
588  * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
589  * Unfortunately, a bundle has to be mostly unpacked before it's obvious
590  * what nodes will be needed, so we do it just above.
591  *
592  * Worse, that means that the node may have been utilized while
593  * unpacking other resources, without our weight correction. The most
594  * likely place for this to happen is when common_unpack() calls
595  * resource_location() to set a default score in symmetric clusters.
596  * This adds a node *copy* to each resource's allowed nodes, and these
597  * copies will have the wrong weight.
598  *
599  * As a hacky workaround, fix those copies here.
600  *
601  * @TODO Possible alternative: ensure bundles are unpacked before other
602  * resources, so the weight is correct before any copies are made.
603  */
604  for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
605  disallow_node((resource_t *) (rsc_iter->data), uname);
606  }
607 
608  tuple->node = node_copy(node);
609  tuple->node->weight = 500;
610  tuple->node->rsc_discover_mode = pe_discover_exclusive;
611 
612  /* Ensure the node shows up as allowed and with the correct discovery set */
613  g_hash_table_destroy(tuple->child->allowed_nodes);
614  tuple->child->allowed_nodes = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str);
615  g_hash_table_insert(tuple->child->allowed_nodes, (gpointer) tuple->node->details->id, node_copy(tuple->node));
616 
617  {
618  node_t *copy = node_copy(tuple->node);
619  copy->weight = -INFINITY;
620  g_hash_table_insert(tuple->child->parent->allowed_nodes, (gpointer) tuple->node->details->id, copy);
621  }
622  crm_log_xml_trace(xml_remote, "Container-remote");
623  if (common_unpack(xml_remote, &tuple->remote, parent, data_set) == FALSE) {
624  return FALSE;
625  }
626 
627  g_hash_table_iter_init(&gIter, tuple->remote->allowed_nodes);
628  while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
629  if(is_remote_node(node)) {
630  /* Remote resources can only run on 'normal' cluster node */
631  node->weight = -INFINITY;
632  }
633  }
634 
635  tuple->node->details->remote_rsc = tuple->remote;
636  tuple->remote->container = tuple->docker; // Ensures is_container_remote_node() functions correctly immediately
637 
638  /* A bundle's #kind is closer to "container" (guest node) than the
639  * "remote" set by pe_create_node().
640  */
641  g_hash_table_insert(tuple->node->details->attrs,
642  strdup(CRM_ATTR_KIND), strdup("container"));
643 
644  /* One effect of this is that setup_container() will add
645  * tuple->remote to tuple->docker's fillers, which will make
646  * rsc_contains_remote_node() true for tuple->docker.
647  *
648  * tuple->child does NOT get added to tuple->docker's fillers.
649  * The only noticeable effect if it did would be for its fail count to
650  * be taken into account when checking tuple->docker's migration
651  * threshold.
652  */
653  parent->children = g_list_append(parent->children, tuple->remote);
654  }
655  return TRUE;
656 }
657 
658 static bool
659 create_container(
660  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
661  pe_working_set_t * data_set)
662 {
663 
664  if (data->type == PE_CONTAINER_TYPE_DOCKER &&
665  create_docker_resource(parent, data, tuple, data_set) == FALSE) {
666  return FALSE;
667  }
668  if (data->type == PE_CONTAINER_TYPE_RKT &&
669  create_rkt_resource(parent, data, tuple, data_set) == FALSE) {
670  return FALSE;
671  }
672 
673  if(create_ip_resource(parent, data, tuple, data_set) == FALSE) {
674  return FALSE;
675  }
676  if(create_remote_resource(parent, data, tuple, data_set) == FALSE) {
677  return FALSE;
678  }
679  if(tuple->child && tuple->ipaddr) {
680  add_hash_param(tuple->child->meta, "external-ip", tuple->ipaddr);
681  }
682 
683  if(tuple->remote) {
684  /*
685  * Allow the remote connection resource to be allocated to a
686  * different node than the one on which the docker container
687  * is active.
688  *
689  * Makes it possible to have remote nodes, running docker
690  * containers with pacemaker_remoted inside in order to start
691  * services inside those containers.
692  */
693  set_bit(tuple->remote->flags, pe_rsc_allow_remote_remotes);
694  }
695 
696  return TRUE;
697 }
698 
699 static void
700 mount_add(container_variant_data_t *container_data, const char *source,
701  const char *target, const char *options, int flags)
702 {
703  container_mount_t *mount = calloc(1, sizeof(container_mount_t));
704 
705  mount->source = strdup(source);
706  mount->target = strdup(target);
707  if (options) {
708  mount->options = strdup(options);
709  }
710  mount->flags = flags;
711  container_data->mounts = g_list_append(container_data->mounts, mount);
712 }
713 
714 static void mount_free(container_mount_t *mount)
715 {
716  free(mount->source);
717  free(mount->target);
718  free(mount->options);
719  free(mount);
720 }
721 
722 static void port_free(container_port_t *port)
723 {
724  free(port->source);
725  free(port->target);
726  free(port);
727 }
728 
729 static container_grouping_t *
730 tuple_for_remote(resource_t *remote)
731 {
732  resource_t *top = remote;
733  container_variant_data_t *container_data = NULL;
734 
735  if (top == NULL) {
736  return NULL;
737  }
738 
739  while (top->parent != NULL) {
740  top = top->parent;
741  }
742 
743  get_container_variant_data(container_data, top);
744  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
745  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
746  if(tuple->remote == remote) {
747  return tuple;
748  }
749  }
750  CRM_LOG_ASSERT(FALSE);
751  return NULL;
752 }
753 
754 bool
756 {
757  const char *name;
758  const char *value;
759  const char *attr_list[] = {
763  };
764  const char *value_list[] = {
765  "remote",
767  "pacemaker"
768  };
769 
770  if(rsc == NULL) {
771  return FALSE;
772  }
773 
774  name = "addr";
775  value = g_hash_table_lookup(rsc->parameters, name);
776  if (safe_str_eq(value, "#uname") == FALSE) {
777  return FALSE;
778  }
779 
780  for (int lpc = 0; lpc < DIMOF(attr_list); lpc++) {
781  name = attr_list[lpc];
782  value = crm_element_value(rsc->xml, attr_list[lpc]);
783  if (safe_str_eq(value, value_list[lpc]) == FALSE) {
784  return FALSE;
785  }
786  }
787  return TRUE;
788 }
789 
790 const char *
791 container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field)
792 {
793  // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
794 
795  pe_node_t *node = NULL;
796  container_grouping_t *tuple = NULL;
797 
798  if(container_fix_remote_addr(rsc) == FALSE) {
799  return NULL;
800  }
801 
802  tuple = tuple_for_remote(rsc);
803  if(tuple == NULL) {
804  return NULL;
805  }
806 
807  node = tuple->docker->allocated_to;
808  if(node == NULL && tuple->docker->running_on) {
809  /* If it won't be running anywhere after the
810  * transition, go with where it's running now.
811  */
812  node = tuple->docker->running_on->data;
813  }
814 
815  if(node == NULL) {
816  crm_trace("Cannot fix address for %s", tuple->remote->id);
817  return NULL;
818  }
819 
820  crm_trace("Fixing addr for %s on %s", rsc->id, node->details->uname);
821  if(xml != NULL && field != NULL) {
822  crm_xml_add(xml, field, node->details->uname);
823  }
824 
825  return node->details->uname;
826 }
827 
828 gboolean
830 {
831  const char *value = NULL;
832  xmlNode *xml_obj = NULL;
833  xmlNode *xml_resource = NULL;
834  container_variant_data_t *container_data = NULL;
835 
836  CRM_ASSERT(rsc != NULL);
837  pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
838 
839  container_data = calloc(1, sizeof(container_variant_data_t));
840  rsc->variant_opaque = container_data;
841  container_data->prefix = strdup(rsc->id);
842 
843  xml_obj = first_named_child(rsc->xml, "docker");
844  if (xml_obj != NULL) {
845  container_data->type = PE_CONTAINER_TYPE_DOCKER;
846  } else {
847  xml_obj = first_named_child(rsc->xml, "rkt");
848  if (xml_obj != NULL) {
849  container_data->type = PE_CONTAINER_TYPE_RKT;
850  } else {
851  return FALSE;
852  }
853  }
854 
855  value = crm_element_value(xml_obj, "masters");
856  container_data->masters = crm_parse_int(value, "0");
857  if (container_data->masters < 0) {
858  pe_err("'masters' for %s must be nonnegative integer, using 0",
859  rsc->id);
860  container_data->masters = 0;
861  }
862 
863  value = crm_element_value(xml_obj, "replicas");
864  if ((value == NULL) && (container_data->masters > 0)) {
865  container_data->replicas = container_data->masters;
866  } else {
867  container_data->replicas = crm_parse_int(value, "1");
868  }
869  if (container_data->replicas < 1) {
870  pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
871  container_data->replicas = 1;
872  }
873 
874  /*
875  * Communication between containers on the same host via the
876  * floating IPs only works if docker is started with:
877  * --userland-proxy=false --ip-masq=false
878  */
879  value = crm_element_value(xml_obj, "replicas-per-host");
880  container_data->replicas_per_host = crm_parse_int(value, "1");
881  if (container_data->replicas_per_host < 1) {
882  pe_err("'replicas-per-host' for %s must be positive integer, using 1",
883  rsc->id);
884  container_data->replicas_per_host = 1;
885  }
886  if (container_data->replicas_per_host == 1) {
888  }
889 
890  container_data->docker_run_command = crm_element_value_copy(xml_obj, "run-command");
891  container_data->docker_run_options = crm_element_value_copy(xml_obj, "options");
892  container_data->image = crm_element_value_copy(xml_obj, "image");
893  container_data->docker_network = crm_element_value_copy(xml_obj, "network");
894 
895  xml_obj = first_named_child(rsc->xml, "network");
896  if(xml_obj) {
897 
898  container_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
899  container_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
900  container_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
901  container_data->control_port = crm_element_value_copy(xml_obj, "control-port");
902 
903  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
904  xml_child = __xml_next_element(xml_child)) {
905 
906  container_port_t *port = calloc(1, sizeof(container_port_t));
907  port->source = crm_element_value_copy(xml_child, "port");
908 
909  if(port->source == NULL) {
910  port->source = crm_element_value_copy(xml_child, "range");
911  } else {
912  port->target = crm_element_value_copy(xml_child, "internal-port");
913  }
914 
915  if(port->source != NULL && strlen(port->source) > 0) {
916  if(port->target == NULL) {
917  port->target = strdup(port->source);
918  }
919  container_data->ports = g_list_append(container_data->ports, port);
920 
921  } else {
922  pe_err("Invalid port directive %s", ID(xml_child));
923  port_free(port);
924  }
925  }
926  }
927 
928  xml_obj = first_named_child(rsc->xml, "storage");
929  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
930  xml_child = __xml_next_element(xml_child)) {
931 
932  const char *source = crm_element_value(xml_child, "source-dir");
933  const char *target = crm_element_value(xml_child, "target-dir");
934  const char *options = crm_element_value(xml_child, "options");
935  int flags = 0;
936 
937  if (source == NULL) {
938  source = crm_element_value(xml_child, "source-dir-root");
939  flags = 1;
940  }
941 
942  if (source && target) {
943  mount_add(container_data, source, target, options, flags);
944  } else {
945  pe_err("Invalid mount directive %s", ID(xml_child));
946  }
947  }
948 
949  xml_obj = first_named_child(rsc->xml, "primitive");
950  if (xml_obj && valid_network(container_data)) {
951  char *value = NULL;
952  xmlNode *xml_set = NULL;
953 
954  if(container_data->masters > 0) {
955  xml_resource = create_xml_node(NULL, XML_CIB_TAG_MASTER);
956 
957  } else {
958  xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
959  }
960 
961  crm_xml_set_id(xml_resource, "%s-%s", container_data->prefix, xml_resource->name);
962 
963  xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
964  crm_xml_set_id(xml_set, "%s-%s-meta", container_data->prefix, xml_resource->name);
965 
966  crm_create_nvpair_xml(xml_set, NULL,
968 
969  value = crm_itoa(container_data->replicas);
970  crm_create_nvpair_xml(xml_set, NULL,
972  free(value);
973 
974  value = crm_itoa(container_data->replicas_per_host);
975  crm_create_nvpair_xml(xml_set, NULL,
977  free(value);
978 
980  (container_data->replicas_per_host > 1)?
982 
983  if(container_data->masters) {
984  value = crm_itoa(container_data->masters);
985  crm_create_nvpair_xml(xml_set, NULL,
986  XML_RSC_ATTR_MASTER_MAX, value);
987  free(value);
988  }
989 
990  //crm_xml_add(xml_obj, XML_ATTR_ID, container_data->prefix);
991  add_node_copy(xml_resource, xml_obj);
992 
993  } else if(xml_obj) {
994  pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
995  rsc->id, ID(xml_obj));
996  return FALSE;
997  }
998 
999  if(xml_resource) {
1000  int lpc = 0;
1001  GListPtr childIter = NULL;
1002  resource_t *new_rsc = NULL;
1003  container_port_t *port = NULL;
1004 
1005  int offset = 0, max = 1024;
1006  char *buffer = NULL;
1007 
1008  if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
1009  pe_err("Failed unpacking resource %s", ID(rsc->xml));
1010  if (new_rsc != NULL && new_rsc->fns != NULL) {
1011  new_rsc->fns->free(new_rsc);
1012  }
1013  return FALSE;
1014  }
1015 
1016  container_data->child = new_rsc;
1017 
1018  /* Currently, we always map the default authentication key location
1019  * into the same location inside the container.
1020  *
1021  * Ideally, we would respect the host's PCMK_authkey_location, but:
1022  * - it may be different on different nodes;
1023  * - the actual connection will do extra checking to make sure the key
1024  * file exists and is readable, that we can't do here on the DC
1025  * - tools such as crm_resource and crm_simulate may not have the same
1026  * environment variables as the cluster, causing operation digests to
1027  * differ
1028  *
1029  * Always using the default location inside the container is fine,
1030  * because we control the pacemaker_remote environment, and it avoids
1031  * having to pass another environment variable to the container.
1032  *
1033  * @TODO A better solution may be to have only pacemaker_remote use the
1034  * environment variable, and have the cluster nodes use a new
1035  * cluster option for key location. This would introduce the limitation
1036  * of the location being the same on all cluster nodes, but that's
1037  * reasonable.
1038  */
1039  mount_add(container_data, DEFAULT_REMOTE_KEY_LOCATION,
1040  DEFAULT_REMOTE_KEY_LOCATION, NULL, 0);
1041 
1042  mount_add(container_data, CRM_LOG_DIR "/bundles", "/var/log", NULL, 1);
1043 
1044  port = calloc(1, sizeof(container_port_t));
1045  if(container_data->control_port) {
1046  port->source = strdup(container_data->control_port);
1047  } else {
1048  /* If we wanted to respect PCMK_remote_port, we could use
1049  * crm_default_remote_port() here and elsewhere in this file instead
1050  * of DEFAULT_REMOTE_PORT.
1051  *
1052  * However, it gains nothing, since we control both the container
1053  * environment and the connection resource parameters, and the user
1054  * can use a different port if desired by setting control-port.
1055  */
1056  port->source = crm_itoa(DEFAULT_REMOTE_PORT);
1057  }
1058  port->target = strdup(port->source);
1059  container_data->ports = g_list_append(container_data->ports, port);
1060 
1061  buffer = calloc(1, max+1);
1062  for(childIter = container_data->child->children; childIter != NULL; childIter = childIter->next) {
1063  container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
1064  tuple->child = childIter->data;
1065  tuple->child->exclusive_discover = TRUE;
1066  tuple->offset = lpc++;
1067 
1068  // Ensure the child's notify gets set based on the underlying primitive's value
1069  if(is_set(tuple->child->flags, pe_rsc_notify)) {
1070  set_bit(container_data->child->flags, pe_rsc_notify);
1071  }
1072 
1073  offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
1074  container_data->tuples = g_list_append(container_data->tuples, tuple);
1075  container_data->attribute_target = g_hash_table_lookup(tuple->child->meta, XML_RSC_ATTR_TARGET);
1076  }
1077  container_data->docker_host_options = buffer;
1078  if(container_data->attribute_target) {
1079  g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
1080  g_hash_table_replace(container_data->child->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
1081  }
1082 
1083  } else {
1084  // Just a naked container, no pacemaker-remote
1085  int offset = 0, max = 1024;
1086  char *buffer = calloc(1, max+1);
1087 
1088  for(int lpc = 0; lpc < container_data->replicas; lpc++) {
1089  container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
1090  tuple->offset = lpc;
1091  offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
1092  container_data->tuples = g_list_append(container_data->tuples, tuple);
1093  }
1094 
1095  container_data->docker_host_options = buffer;
1096  }
1097 
1098  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1099  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1100  if (create_container(rsc, container_data, tuple, data_set) == FALSE) {
1101  pe_err("Failed unpacking resource %s", rsc->id);
1102  rsc->fns->free(rsc);
1103  return FALSE;
1104  }
1105  }
1106 
1107  if(container_data->child) {
1108  rsc->children = g_list_append(rsc->children, container_data->child);
1109  }
1110  return TRUE;
1111 }
1112 
1113 static int
1114 tuple_rsc_active(resource_t *rsc, gboolean all)
1115 {
1116  if (rsc) {
1117  gboolean child_active = rsc->fns->active(rsc, all);
1118 
1119  if (child_active && !all) {
1120  return TRUE;
1121  } else if (!child_active && all) {
1122  return FALSE;
1123  }
1124  }
1125  return -1;
1126 }
1127 
1128 gboolean
1129 container_active(resource_t * rsc, gboolean all)
1130 {
1131  container_variant_data_t *container_data = NULL;
1132  GListPtr iter = NULL;
1133 
1134  get_container_variant_data(container_data, rsc);
1135  for (iter = container_data->tuples; iter != NULL; iter = iter->next) {
1136  container_grouping_t *tuple = (container_grouping_t *)(iter->data);
1137  int rsc_active;
1138 
1139  rsc_active = tuple_rsc_active(tuple->ip, all);
1140  if (rsc_active >= 0) {
1141  return (gboolean) rsc_active;
1142  }
1143 
1144  rsc_active = tuple_rsc_active(tuple->child, all);
1145  if (rsc_active >= 0) {
1146  return (gboolean) rsc_active;
1147  }
1148 
1149  rsc_active = tuple_rsc_active(tuple->docker, all);
1150  if (rsc_active >= 0) {
1151  return (gboolean) rsc_active;
1152  }
1153 
1154  rsc_active = tuple_rsc_active(tuple->remote, all);
1155  if (rsc_active >= 0) {
1156  return (gboolean) rsc_active;
1157  }
1158  }
1159 
1160  /* If "all" is TRUE, we've already checked that no resources were inactive,
1161  * so return TRUE; if "all" is FALSE, we didn't find any active resources,
1162  * so return FALSE.
1163  */
1164  return all;
1165 }
1166 
1167 resource_t *
1168 find_container_child(const char *stem, resource_t * rsc, node_t *node)
1169 {
1170  container_variant_data_t *container_data = NULL;
1171  resource_t *parent = uber_parent(rsc);
1172  CRM_ASSERT(parent->parent);
1173 
1174  parent = parent->parent;
1175  get_container_variant_data(container_data, parent);
1176 
1177  if (is_not_set(rsc->flags, pe_rsc_unique)) {
1178  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1179  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1180 
1181  CRM_ASSERT(tuple);
1182  if(tuple->node->details == node->details) {
1183  rsc = tuple->child;
1184  break;
1185  }
1186  }
1187  }
1188 
1189  if (rsc && safe_str_neq(stem, rsc->id)) {
1190  free(rsc->clone_name);
1191  rsc->clone_name = strdup(stem);
1192  }
1193 
1194  return rsc;
1195 }
1196 
1197 static void
1198 print_rsc_in_list(resource_t *rsc, const char *pre_text, long options,
1199  void *print_data)
1200 {
1201  if (rsc != NULL) {
1202  if (options & pe_print_html) {
1203  status_print("<li>");
1204  }
1205  rsc->fns->print(rsc, pre_text, options, print_data);
1206  if (options & pe_print_html) {
1207  status_print("</li>\n");
1208  }
1209  }
1210 }
1211 
1212 static const char*
1213 container_type_as_string(enum container_type t)
1214 {
1215  if (t == PE_CONTAINER_TYPE_DOCKER) {
1216  return PE_CONTAINER_TYPE_DOCKER_S;
1217  } else if (t == PE_CONTAINER_TYPE_RKT) {
1218  return PE_CONTAINER_TYPE_RKT_S;
1219  } else {
1220  return PE_CONTAINER_TYPE_UNKNOWN_S;
1221  }
1222 }
1223 
1224 static void
1225 container_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data)
1226 {
1227  container_variant_data_t *container_data = NULL;
1228  char *child_text = NULL;
1229  CRM_CHECK(rsc != NULL, return);
1230 
1231  if (pre_text == NULL) {
1232  pre_text = "";
1233  }
1234  child_text = crm_concat(pre_text, " ", ' ');
1235 
1236  get_container_variant_data(container_data, rsc);
1237 
1238  status_print("%s<bundle ", pre_text);
1239  status_print("id=\"%s\" ", rsc->id);
1240 
1241  // Always lowercase the container technology type for use as XML value
1242  status_print("type=\"");
1243  for (const char *c = container_type_as_string(container_data->type);
1244  *c; ++c) {
1245  status_print("%c", tolower(*c));
1246  }
1247  status_print("\" ");
1248 
1249  status_print("image=\"%s\" ", container_data->image);
1250  status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
1251  status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
1252  status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
1253  status_print(">\n");
1254 
1255  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1256  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1257 
1258  CRM_ASSERT(tuple);
1259  status_print("%s <replica id=\"%d\">\n", pre_text, tuple->offset);
1260  print_rsc_in_list(tuple->ip, child_text, options, print_data);
1261  print_rsc_in_list(tuple->child, child_text, options, print_data);
1262  print_rsc_in_list(tuple->docker, child_text, options, print_data);
1263  print_rsc_in_list(tuple->remote, child_text, options, print_data);
1264  status_print("%s </replica>\n", pre_text);
1265  }
1266  status_print("%s</bundle>\n", pre_text);
1267  free(child_text);
1268 }
1269 
1270 static void
1271 tuple_print(container_grouping_t * tuple, const char *pre_text, long options, void *print_data)
1272 {
1273  node_t *node = NULL;
1274  resource_t *rsc = tuple->child;
1275 
1276  int offset = 0;
1277  char buffer[LINE_MAX];
1278 
1279  if(rsc == NULL) {
1280  rsc = tuple->docker;
1281  }
1282 
1283  if(tuple->remote) {
1284  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->remote));
1285  } else {
1286  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->docker));
1287  }
1288  if(tuple->ipaddr) {
1289  offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr);
1290  }
1291 
1292  if (tuple->docker->running_on) {
1293  node = tuple->docker->running_on->data;
1294  }
1295  common_print(rsc, pre_text, buffer, node, options, print_data);
1296 }
1297 
1298 void
1299 container_print(resource_t * rsc, const char *pre_text, long options, void *print_data)
1300 {
1301  container_variant_data_t *container_data = NULL;
1302  char *child_text = NULL;
1303  CRM_CHECK(rsc != NULL, return);
1304 
1305  if (options & pe_print_xml) {
1306  container_print_xml(rsc, pre_text, options, print_data);
1307  return;
1308  }
1309 
1310  get_container_variant_data(container_data, rsc);
1311 
1312  if (pre_text == NULL) {
1313  pre_text = " ";
1314  }
1315 
1316  status_print("%s%s container%s: %s [%s]%s%s\n",
1317  pre_text, container_type_as_string(container_data->type),
1318  container_data->replicas>1?" set":"", rsc->id, container_data->image,
1319  is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
1320  is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
1321  if (options & pe_print_html) {
1322  status_print("<br />\n<ul>\n");
1323  }
1324 
1325 
1326  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1327  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1328 
1329  CRM_ASSERT(tuple);
1330  if (options & pe_print_html) {
1331  status_print("<li>");
1332  }
1333 
1334  if (is_set(options, pe_print_implicit)) {
1335  child_text = crm_strdup_printf(" %s", pre_text);
1336  if(g_list_length(container_data->tuples) > 1) {
1337  status_print(" %sReplica[%d]\n", pre_text, tuple->offset);
1338  }
1339  if (options & pe_print_html) {
1340  status_print("<br />\n<ul>\n");
1341  }
1342  print_rsc_in_list(tuple->ip, child_text, options, print_data);
1343  print_rsc_in_list(tuple->docker, child_text, options, print_data);
1344  print_rsc_in_list(tuple->remote, child_text, options, print_data);
1345  print_rsc_in_list(tuple->child, child_text, options, print_data);
1346  if (options & pe_print_html) {
1347  status_print("</ul>\n");
1348  }
1349  } else {
1350  child_text = crm_strdup_printf("%s ", pre_text);
1351  tuple_print(tuple, child_text, options, print_data);
1352  }
1353  free(child_text);
1354 
1355  if (options & pe_print_html) {
1356  status_print("</li>\n");
1357  }
1358  }
1359  if (options & pe_print_html) {
1360  status_print("</ul>\n");
1361  }
1362 }
1363 
1364 void
1365 tuple_free(container_grouping_t *tuple)
1366 {
1367  if(tuple == NULL) {
1368  return;
1369  }
1370 
1371  if(tuple->node) {
1372  free(tuple->node);
1373  tuple->node = NULL;
1374  }
1375 
1376  if(tuple->ip) {
1377  free_xml(tuple->ip->xml);
1378  tuple->ip->xml = NULL;
1379  tuple->ip->fns->free(tuple->ip);
1380  tuple->ip = NULL;
1381  }
1382  if(tuple->docker) {
1383  free_xml(tuple->docker->xml);
1384  tuple->docker->xml = NULL;
1385  tuple->docker->fns->free(tuple->docker);
1386  tuple->docker = NULL;
1387  }
1388  if(tuple->remote) {
1389  free_xml(tuple->remote->xml);
1390  tuple->remote->xml = NULL;
1391  tuple->remote->fns->free(tuple->remote);
1392  tuple->remote = NULL;
1393  }
1394  free(tuple->ipaddr);
1395  free(tuple);
1396 }
1397 
1398 void
1400 {
1401  container_variant_data_t *container_data = NULL;
1402  CRM_CHECK(rsc != NULL, return);
1403 
1404  get_container_variant_data(container_data, rsc);
1405  pe_rsc_trace(rsc, "Freeing %s", rsc->id);
1406 
1407  free(container_data->prefix);
1408  free(container_data->image);
1409  free(container_data->control_port);
1410  free(container_data->host_network);
1411  free(container_data->host_netmask);
1412  free(container_data->ip_range_start);
1413  free(container_data->docker_network);
1414  free(container_data->docker_run_options);
1415  free(container_data->docker_run_command);
1416  free(container_data->docker_host_options);
1417 
1418  g_list_free_full(container_data->tuples, (GDestroyNotify)tuple_free);
1419  g_list_free_full(container_data->mounts, (GDestroyNotify)mount_free);
1420  g_list_free_full(container_data->ports, (GDestroyNotify)port_free);
1421  g_list_free(rsc->children);
1422 
1423  if(container_data->child) {
1424  free_xml(container_data->child->xml);
1425  container_data->child->xml = NULL;
1426  container_data->child->fns->free(container_data->child);
1427  }
1428  common_free(rsc);
1429 }
1430 
1431 enum rsc_role_e
1432 container_resource_state(const resource_t * rsc, gboolean current)
1433 {
1434  enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
1435  return container_role;
1436 }
1437 
1445 int
1447 {
1448  if ((rsc == NULL) || (rsc->variant != pe_container)) {
1449  return 0;
1450  } else {
1451  container_variant_data_t *container_data = NULL;
1452 
1453  get_container_variant_data(container_data, rsc);
1454  return container_data->replicas;
1455  }
1456 }
bool remote_id_conflict(const char *remote_name, pe_working_set_t *data)
Definition: unpack.c:418
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:164
GListPtr nodes
Definition: status.h:107
const char * uname
Definition: status.h:139
void container_free(resource_t *rsc)
Definition: container.c:1399
xmlNode * xml
Definition: status.h:259
gboolean safe_str_neq(const char *a, const char *b)
Definition: strings.c:150
#define INFINITY
Definition: crm.h:83
#define CRM_ATTR_KIND
Definition: crm.h:100
node_t * node_copy(const node_t *this_node)
Definition: utils.c:127
int weight
Definition: status.h:175
node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t *data_set)
Definition: unpack.c:356
#define XML_ATTR_TYPE
Definition: msg_xml.h:105
bool container_fix_remote_addr(resource_t *rsc)
Definition: container.c:755
#define XML_BOOLEAN_FALSE
Definition: msg_xml.h:118
gboolean common_unpack(xmlNode *xml_obj, resource_t **rsc, resource_t *parent, pe_working_set_t *data_set)
Definition: complex.c:465
enum pe_obj_types variant
Definition: status.h:265
void(* free)(resource_t *)
Definition: complex.h:51
void common_free(resource_t *rsc)
Definition: complex.c:911
#define status_print(fmt, args...)
Definition: unpack.h:79
int crm_parse_int(const char *text, const char *default_text)
Definition: strings.c:125
char * crm_element_value_copy(xmlNode *data, const char *name)
Definition: xml.c:3869
GListPtr resources
Definition: status.h:108
node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:301
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:150
char * clone_name
Definition: status.h:258
resource_t * uber_parent(resource_t *rsc)
Definition: complex.c:897
#define clear_bit(word, bit)
Definition: crm_internal.h:191
#define XML_RSC_ATTR_INCARNATION_MAX
Definition: msg_xml.h:212
GListPtr children
Definition: status.h:301
#define XML_RSC_ATTR_TARGET
Definition: msg_xml.h:203
#define pe_rsc_allow_remote_remotes
Definition: status.h:200
void crm_xml_sanitize_id(char *id)
Sanitize a string so it is usable as an XML ID.
Definition: xml.c:3021
char * id
Definition: status.h:257
GHashTable * parameters
Definition: status.h:298
#define DEFAULT_REMOTE_PORT
Definition: lrmd.h:54
#define DEFAULT_REMOTE_KEY_LOCATION
Definition: lrmd.h:52
#define CRM_LOG_DIR
Definition: config.h:59
#define XML_TAG_ATTR_SETS
Definition: msg_xml.h:184
gboolean container_unpack(resource_t *rsc, pe_working_set_t *data_set)
Definition: container.c:829
char uname[MAX_NAME]
Definition: internal.h:53
gboolean is_remote_node(node_t *node)
Definition: remote.c:62
gboolean(* active)(resource_t *, gboolean)
Definition: complex.h:48
struct node_shared_s * details
Definition: status.h:178
resource_t * find_container_child(const char *stem, resource_t *rsc, node_t *node)
Definition: container.c:1168
#define set_bit(word, bit)
Definition: crm_internal.h:190
#define PCMK_RESOURCE_CLASS_OCF
Definition: services.h:57
xmlNode * pe_create_remote_xml(xmlNode *parent, const char *uname, const char *container_id, const char *migrateable, const char *is_managed, const char *interval, const char *monitor_timeout, const char *start_timeout, const char *server, const char *port)
Definition: remote.c:158
#define XML_ATTR_ID
Definition: msg_xml.h:102
#define XML_CIB_TAG_RESOURCE
Definition: msg_xml.h:195
#define XML_BOOLEAN_TRUE
Definition: msg_xml.h:117
#define pe_rsc_failed
Definition: status.h:202
resource_object_functions_t * fns
Definition: status.h:266
GHashTable * allowed_nodes
Definition: status.h:292
void * variant_opaque
Definition: status.h:264
#define crm_trace(fmt, args...)
Definition: logging.h:254
xmlNode * add_node_copy(xmlNode *new_parent, xmlNode *xml_node)
Definition: xml.c:2405
xmlNode * crm_create_op_xml(xmlNode *parent, const char *prefix, const char *task, const char *interval, const char *timeout)
Create a CIB XML element for an operation.
Definition: operations.c:439
#define XML_AGENT_ATTR_PROVIDER
Definition: msg_xml.h:254
#define XML_RSC_ATTR_ORDERED
Definition: msg_xml.h:209
#define XML_TAG_META_SETS
Definition: msg_xml.h:185
const char * container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field)
Definition: container.c:791
xmlNode * create_xml_node(xmlNode *parent, const char *name)
Definition: xml.c:2588
const char * crm_element_value(xmlNode *data, const char *name)
Definition: xml.c:5165
unsigned long long flags
Definition: status.h:281
#define XML_RSC_ATTR_INCARNATION_NODEMAX
Definition: msg_xml.h:214
resource_t * parent
Definition: status.h:263
void free_xml(xmlNode *child)
Definition: xml.c:2706
int pe_bundle_replicas(const resource_t *rsc)
Get the number of configured replicas in a bundle.
Definition: container.c:1446
#define XML_RSC_ATTR_UNIQUE
Definition: msg_xml.h:220
void common_print(resource_t *rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data)
Definition: native.c:479
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Definition: xml.c:2490
#define XML_RSC_ATTR_MASTER_MAX
Definition: msg_xml.h:215
#define pe_rsc_unique
Definition: status.h:190
#define SBIN_DIR
Definition: config.h:686
GHashTable * meta
Definition: status.h:297
void tuple_free(container_grouping_t *tuple)
Definition: container.c:1365
#define XML_CIB_TAG_INCARNATION
Definition: msg_xml.h:197
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:423
void(* print)(resource_t *, const char *, long, void *)
Definition: complex.h:47
void crm_xml_set_id(xmlNode *xml, const char *format,...) __attribute__((__format__(__printf__
enum rsc_role_e container_resource_state(const resource_t *rsc, gboolean current)
Definition: container.c:1432
xmlNode * crm_create_nvpair_xml(xmlNode *parent, const char *id, const char *name, const char *value)
Create an XML name/value pair.
Definition: xml.c:4831
#define DIMOF(a)
Definition: crm.h:39
#define pe_rsc_managed
Definition: status.h:185
#define crm_str_hash
Definition: util.h:73
#define CRM_ASSERT(expr)
Definition: error.h:35
char data[0]
Definition: internal.h:58
void container_print(resource_t *rsc, const char *pre_text, long options, void *print_data)
Definition: container.c:1299
rsc_role_e
Definition: common.h:81
gboolean container_active(resource_t *rsc, gboolean all)
Definition: container.c:1129
#define XML_CIB_TAG_MASTER
Definition: msg_xml.h:198
int rsc_discover_mode
Definition: status.h:179
xmlNode * first_named_child(xmlNode *parent, const char *name)
Definition: xml.c:5053
#define crm_log_xml_trace(xml, text)
Definition: logging.h:262
Definition: status.h:174
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:26
char * crm_concat(const char *prefix, const char *suffix, char join)
Definition: strings.c:32
#define ID(x)
Definition: msg_xml.h:446
#define pe_err(fmt...)
Definition: internal.h:28
char * crm_itoa(int an_int)
Definition: strings.c:60
#define safe_str_eq(a, b)
Definition: util.h:72
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
GList * GListPtr
Definition: crm.h:218
#define pe_rsc_notify
Definition: status.h:189
void g_hash_destroy_str(gpointer data)
Definition: strings.c:74
const char * rsc_printable_id(resource_t *rsc)
Definition: utils.c:2083
uint64_t flags
Definition: remote.c:156
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:253