public Future<Void> deletePvc(StatefulSet ss, String pvcName) { String namespace = ss.getMetadata().getNamespace(); Future<Void> f = Future.future(); Future<ReconcileResult<PersistentVolumeClaim>> r = pvcOperations.reconcile(namespace, pvcName, null); r.setHandler(h -> { if (h.succeeded()) { f.complete(); } else { f.fail(h.cause()); } }); return f; }
@Override public RouterCluster getRouterCluster() throws IOException { StatefulSet s = client.apps().statefulSets().withName("qdrouterd-" + infraUuid).get(); StandardInfraConfig infraConfig = null; if (s.getMetadata().getAnnotations() != null && s.getMetadata().getAnnotations().get(AnnotationKeys.APPLIED_INFRA_CONFIG) != null) { infraConfig = mapper.readValue(s.getMetadata().getAnnotations().get(AnnotationKeys.APPLIED_INFRA_CONFIG), StandardInfraConfig.class); } return new RouterCluster(s.getMetadata().getName(), s.getSpec().getReplicas(), infraConfig); }
private void checkStatefulSetsReady(AddressSpace addressSpace, List<HasMetadata> requiredResources) { Set<String> readyStatefulSets = kubernetes.getReadyStatefulSets(addressSpace).stream() .map(statefulSet -> statefulSet.getMetadata().getName()) .collect(Collectors.toSet()); Set<String> requiredStatefulSets = requiredResources.stream() .filter(KubernetesHelper::isStatefulSet) .map(item -> item.getMetadata().getName()) .collect(Collectors.toSet()); boolean isReady = readyStatefulSets.containsAll(requiredStatefulSets); if (!isReady) { Set<String> missing = new HashSet<>(requiredStatefulSets); missing.removeAll(readyStatefulSets); addressSpace.getStatus().setReady(false); addressSpace.getStatus().appendMessage("The following stateful sets are not ready: " + missing); } }
private boolean isPodUpToDate(StatefulSet ss, Pod pod) { final int ssGeneration = StatefulSetOperator.getSsGeneration(ss); final int podGeneration = StatefulSetOperator.getPodGeneration(pod); log.debug("Rolling update of {}/{}: pod {} has {}={}; ss has {}={}", ss.getMetadata().getNamespace(), ss.getMetadata().getName(), pod.getMetadata().getName(), StatefulSetOperator.ANNO_STRIMZI_IO_GENERATION, podGeneration, StatefulSetOperator.ANNO_STRIMZI_IO_GENERATION, ssGeneration); return ssGeneration == podGeneration; }
/** * Asynchronously perform a rolling update of all the pods in the StatefulSet identified by the given * {@code namespace} and {@code name}, returning a Future that will complete when the rolling update * is complete. Starting with pod 0, each pod will be deleted and re-created automatically by the ReplicaSet, * once the pod has been recreated then given {@code isReady} function will be polled until it returns true, * before the process proceeds with the pod with the next higher number. */ public Future<Void> maybeRollingUpdate(StatefulSet ss, Predicate<Pod> podRestart) { String namespace = ss.getMetadata().getNamespace(); String name = ss.getMetadata().getName(); final int replicas = ss.getSpec().getReplicas(); log.debug("Considering rolling update of {}/{}", namespace, name); Future<Void> f = Future.succeededFuture(); for (int i = 0; i < replicas; i++) { String podName = name + "-" + i; f = f.compose(ignored -> maybeRestartPod(ss, podName, podRestart)); } return f; }
Future<ReconciliationState> kafkaManualPodCleaning() { String reason = "manual pod cleaning"; Future<StatefulSet> futss = kafkaSetOperations.getAsync(namespace, KafkaCluster.kafkaClusterName(name)); if (futss != null) { return futss.compose(ss -> { if (ss != null) { log.debug("{}: Cleaning Pods for StatefulSet {} to {}", reconciliation, ss.getMetadata().getName(), reason); return kafkaSetOperations.maybeDeletePodAndPvc(ss); } return Future.succeededFuture(); }).map(i -> this); } return Future.succeededFuture(this); }
Future<ReconciliationState> zkManualPodCleaning() { String reason = "manual pod cleaning"; Future<StatefulSet> futss = zkSetOperations.getAsync(namespace, ZookeeperCluster.zookeeperClusterName(name)); if (futss != null) { return futss.compose(ss -> { if (ss != null) { log.debug("{}: Cleaning Pods for StatefulSet {} to {}", reconciliation, ss.getMetadata().getName(), reason); return zkSetOperations.maybeDeletePodAndPvc(ss); } return Future.succeededFuture(); }).map(i -> this); } return Future.succeededFuture(this); }
for (Pattern pattern : IGNORABLE_PATHS) { if (pattern.matcher(pathValue).matches()) { log.debug("StatefulSet {}/{} ignoring diff {}", current.getMetadata().getNamespace(), current.getMetadata().getName(), d); continue outer; log.debug("StatefulSet {}/{} differs: {}", current.getMetadata().getNamespace(), current.getMetadata().getName(), d); log.debug("Current StatefulSet path {} has value {}", pathValue, getFromPath(current, pathValue)); log.debug("Desired StatefulSet path {} has value {}", pathValue, getFromPath(desired, pathValue));
@Override public void visit(StatefulSetBuilder o) { StatefulSet s = o.build(); if (canWriteTriggers(s)) { o.withMetadata(getMetaEnrichedWithTriggers(s.getMetadata(), o)); } } });
String namespace = ss.getMetadata().getNamespace(); String name = ss.getMetadata().getName(); final int replicas = ss.getSpec().getReplicas(); log.debug("Considering rolling update of {}/{}", namespace, name); boolean zkRoll = false; ArrayList<Pod> pods = new ArrayList<>(); String cluster = ss.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL); for (int i = 0; i < replicas; i++) { Pod pod = podOperations.get(ss.getMetadata().getNamespace(), KafkaResources.zookeeperPodName(cluster, i)); zkRoll |= podRestart.test(pod); pods.add(pod);
public static Map<String, String> getKafkaContainerEnv(StatefulSet ss) { for (Container container : ss.getSpec().getTemplate().getSpec().getContainers()) { if ("kafka".equals(container.getName())) { LinkedHashMap<String, String> map = new LinkedHashMap<>(container.getEnv() == null ? 2 : container.getEnv().size()); if (container.getEnv() != null) { for (EnvVar envVar : container.getEnv()) { map.put(envVar.getName(), envVar.getValue()); } } return map; } } throw new KafkaUpgradeException("Could not find 'kafka' container in StatefulSet " + ss.getMetadata().getName()); }
public Future<Void> maybeDeletePodAndPvc(StatefulSet ss) { String namespace = ss.getMetadata().getNamespace(); String name = ss.getMetadata().getName(); final int replicas = ss.getSpec().getReplicas(); log.debug("Considering manual deletion and restart of pods for {}/{}", namespace, name); Map<String, String> ssLabels = ss.getMetadata().getLabels();
return Future.succeededFuture(this); log.debug("Does SS {} need to be upgraded?", ss.getMetadata().getName()); Future<?> result; log.debug("SS {} has current version {}", ss.getMetadata().getName(), currentVersion); String fromVersionAnno = Annotations.annotations(ss).get(ANNO_STRIMZI_IO_FROM_VERSION); KafkaVersion fromVersion; fromVersion = currentVersion; log.debug("SS {} is from version {}", ss.getMetadata().getName(), fromVersion); String toVersionAnno = Annotations.annotations(ss).get(ANNO_STRIMZI_IO_TO_VERSION); KafkaVersion toVersion; toVersion = versions.version(kafkaAssembly.getSpec().getKafka().getVersion()); log.debug("SS {} is to version {}", ss.getMetadata().getName(), toVersion); KafkaUpgrade upgrade = new KafkaUpgrade(fromVersion, toVersion); log.debug("Kafka upgrade {}", upgrade);