@Override public RemoteAction clean(AccountDeploymentDetails<KubernetesAccount> details, SpinnakerRuntimeSettings runtimeSettings) { DaemonTaskHandler.newStage("Invoking kubectl"); DaemonTaskHandler.message("Deleting all 'svc,deploy,secret' resources with label 'app=spin'..."); KubernetesSharedServiceSettings kubernetesSharedServiceSettings = new KubernetesSharedServiceSettings(details.getDeploymentConfiguration()); new KubernetesV2Executor(DaemonTaskHandler.getJobExecutor(), details.getAccount()).deleteSpinnaker(kubernetesSharedServiceSettings.getDeployLocation()); return new RemoteAction(); }
@RequestMapping(value = "/currentDeployment", method = RequestMethod.GET) DaemonTask<Halconfig, String> currentDeployment() { StaticRequestBuilder<String> builder = new StaticRequestBuilder<>(configService::getCurrentDeployment); return DaemonTaskHandler.submitTask(builder::build, "Get current deployment"); }
static private boolean checkIfProxyIsOpen(Proxy proxy) { boolean connected = false; int tries = 0; int port = proxy.getPort(); while (!connected && tries < connectRetries) { tries++; try { log.info("Attempting to connect to localhost:" + port + "..."); Socket testSocket = new Socket("localhost", port); log.info("Connection opened"); connected = testSocket.isConnected(); testSocket.close(); } catch (IOException e) { DaemonTaskHandler.safeSleep(TimeUnit.SECONDS.toMillis(5)); } } return connected; }
public static <C, T> DaemonTask<C, T> submitTask(Supplier<DaemonResponse<T>> taskSupplier, String name) { DaemonTask task = getTask(); long timeout = task != null ? task.getTimeout() : TaskRepository.DEFAULT_TIMEOUT; return submitTask(taskSupplier, name, timeout); }
SpinnakerRuntimeSettings runtimeSettings, List<SpinnakerService.Type> serviceTypes) { DaemonTaskHandler.newStage("Checking if it is safe to roll back all services"); for (DistributedService distributedService : serviceProvider .getPrioritizedDistributedServices(serviceTypes)) { DaemonTaskHandler.newStage("Rolling back all updatable services"); for (DistributedService distributedService : serviceProvider .getPrioritizedDistributedServices(serviceTypes)) { .getDeployableService(SpinnakerService.Type.ORCA_BOOTSTRAP, Orca.class) .connectToPrimaryService(deploymentDetails, runtimeSettings); DaemonTaskHandler.message("Rolling back " + distributedService.getServiceName() + " via Spinnaker red/black"); rollbackService(deploymentDetails, orca, distributedService, runtimeSettings); .submitTask(builder::build, "Rollback " + distributedService.getServiceName()); DaemonTaskHandler.message("Waiting on rollbacks to complete"); DaemonTaskHandler.reduceChildren(null, (t1, t2) -> null, (t1, t2) -> null) .getProblemSet().throwifSeverityExceeds(Problem.Severity.WARNING);
DaemonTaskHandler.newStage("Deploying " + service.getServiceName() + " with kubectl"); KubernetesV2Executor executor = new KubernetesV2Executor(DaemonTaskHandler.getJobExecutor(), account); String namespaceDefinition = service.getNamespaceYaml(resolvedConfiguration); String serviceDefinition = service.getServiceYaml(resolvedConfiguration); if (((SpinnakerService) service).getType().equals(Type.REDIS) && executor.exists(resourceDefinition)) { DaemonTaskHandler.message("Redis deployment already exists... not redeploying..."); } else { DaemonTaskHandler.message("Running kubectl apply on the resource definition..."); executor.apply(resourceDefinition); DaemonTaskHandler.message("Waiting for service to be ready..."); while (!executor.isReady(service.getNamespace(settings), service.getServiceName())) { DaemonTaskHandler.safeSleep(TimeUnit.SECONDS.toMillis(5)); }); DaemonTaskHandler .submitTask(builder::build, "Deploy " + service.getServiceName(), TimeUnit.MINUTES.toMillis(10)); }); DaemonTaskHandler.message("Waiting on deployments to complete"); DaemonTaskHandler.reduceChildren(null, (t1, t2) -> null, (t1, t2) -> null) .getProblemSet().throwifSeverityExceeds(Problem.Severity.WARNING);
String jobId = proxy.jobId; if (StringUtils.isEmpty(jobId) || !jobExecutor.jobExists(jobId)) { DaemonTaskHandler.newStage("Connecting to the Kubernetes cluster in account \"" + account.getName() + "\""); List<String> command = kubectlAccountCommand(details); command.add("proxy"); DaemonTaskHandler.safeSleep(TimeUnit.SECONDS.toMillis(2)); status = jobExecutor.updateJob(proxy.jobId); proxy.setPort(Integer.valueOf(matcher.group(1))); proxyMap.put(Proxy.buildKey(details.getDeploymentName()), proxy); DaemonTaskHandler.message("Connected to kubernetes cluster for account " + account.getName() + " on port " + proxy.getPort()); DaemonTaskHandler.message("View the kube ui on http://localhost:" + proxy.getPort() + "/ui/"); } else { throw new HalException(Severity.FATAL,
default JobExecutor getJobExecutor() { return DaemonTaskHandler.getJobExecutor(); }
DaemonTaskHandler.message("Validating " + node.getNodeName() + " with " + validator.getClass().getSimpleName()); m.invoke(validator, psBuilder, node); return true;
@Override public RemoteAction clean(AccountDeploymentDetails<KubernetesAccount> details, SpinnakerRuntimeSettings runtimeSettings) { DaemonTaskHandler.newStage("Invoking kubectl"); DaemonTaskHandler.message("Deleting all 'svc,deploy,secret' resources with label 'app=spin'..."); KubernetesSharedServiceSettings kubernetesSharedServiceSettings = new KubernetesSharedServiceSettings(details.getDeploymentConfiguration()); KubernetesV2Utils.deleteSpinnaker(details.getAccount(), kubernetesSharedServiceSettings.getDeployLocation()); return new RemoteAction(); }
static private Proxy openSshTunnel(String ip, int port, String keyFile) throws InterruptedException { JobExecutor jobExecutor = DaemonTaskHandler.getJobExecutor(); List<String> command = new ArrayList<>(); DaemonTaskHandler.safeSleep(TimeUnit.SECONDS.toMillis(1)); status = jobExecutor.updateJob(jobId);
private <T extends Account> void rollbackService(AccountDeploymentDetails<T> details, Orca orca, DistributedService distributedService, SpinnakerRuntimeSettings runtimeSettings) { DaemonTaskHandler.newStage("Rolling back " + distributedService.getServiceName()); Map<String, Object> pipeline = distributedService .buildRollbackPipeline(details, runtimeSettings); Supplier<String> idSupplier = () -> (String) orca.orchestrate(pipeline).get("ref"); orcaRunner.monitorPipeline(idSupplier, orca); }
/** * Returns the current halconfig stored at the halconfigPath. * * @return the fully parsed halconfig. * @see Halconfig */ public Halconfig getHalconfig() { Halconfig local = (Halconfig) DaemonTaskHandler.getContext(); if (local == null) { try { InputStream is = getHalconfigStream(); local = parseHalconfig(is); } catch (FileNotFoundException ignored) { // leave res as `null` } catch (ParserException e) { throw new ParseConfigException(e); } catch (ScannerException e) { throw new ParseConfigException(e); } catch (IllegalArgumentException e) { throw new ParseConfigException(e); } } local = transformHalconfig(local); DaemonTaskHandler.setContext(local); return local; }
/** * Undoes changes to the staged in-memory halconfig. */ public void undoChanges() { DaemonTaskHandler.setContext(null); }
SpinnakerRuntimeSettings runtimeSettings, List<SpinnakerService.Type> serviceTypes) { DaemonTaskHandler.newStage("Checking if it is safe to roll back all services"); for (DistributedService distributedService : serviceProvider .getPrioritizedDistributedServices(serviceTypes)) { DaemonTaskHandler.newStage("Rolling back all updatable services"); for (DistributedService distributedService : serviceProvider .getPrioritizedDistributedServices(serviceTypes)) { .getDeployableService(SpinnakerService.Type.ORCA_BOOTSTRAP, Orca.class) .connectToPrimaryService(deploymentDetails, runtimeSettings); DaemonTaskHandler.message("Rolling back " + distributedService.getServiceName() + " via Spinnaker red/black"); rollbackService(deploymentDetails, orca, distributedService, runtimeSettings); .submitTask(builder::build, "Rollback " + distributedService.getServiceName()); DaemonTaskHandler.message("Waiting on rollbacks to complete"); DaemonTaskHandler.reduceChildren(null, (t1, t2) -> null, (t1, t2) -> null) .getProblemSet().throwifSeverityExceeds(Problem.Severity.WARNING);
String jobId = proxy.jobId; if (StringUtils.isEmpty(jobId) || !jobExecutor.jobExists(jobId)) { DaemonTaskHandler.newStage("Connecting to the Kubernetes cluster in account \"" + account.getName() + "\""); List<String> command = kubectlAccountCommand(details); command.add("proxy"); DaemonTaskHandler.safeSleep(TimeUnit.SECONDS.toMillis(2)); status = jobExecutor.updateJob(proxy.jobId); proxy.setPort(Integer.valueOf(matcher.group(1))); proxyMap.put(Proxy.buildKey(details.getDeploymentName()), proxy); DaemonTaskHandler.message("Connected to kubernetes cluster for account " + account.getName() + " on port " + proxy.getPort()); DaemonTaskHandler.message("View the kube ui on http://localhost:" + proxy.getPort() + "/ui/"); } else { throw new HalException(Severity.FATAL,
default JobExecutor getJobExecutor() { return DaemonTaskHandler.getJobExecutor(); }
DaemonTaskHandler.message("Validating " + node.getNodeName() + " with " + validator.getClass().getSimpleName()); m.invoke(validator, psBuilder, node); return true;
DaemonTaskHandler.newStage("Generating all Spinnaker profile files and endpoints"); log.info("Generating config from \"" + halconfigPath + "\" with deploymentName \"" + deploymentName + "\""); File spinnakerStaging = halconfigDirectoryStructure.getStagingPath(deploymentName).toFile(); DeploymentConfiguration deploymentConfiguration = deploymentService.getDeploymentConfiguration(deploymentName); DaemonTaskHandler.message("Building service endpoints"); SpinnakerServiceProvider<DeploymentDetails> serviceProvider = serviceProviderFactory.create(deploymentConfiguration); SpinnakerRuntimeSettings runtimeSettings = serviceProvider.buildRuntimeSettings(deploymentConfiguration); DaemonTaskHandler.message(profileMessage); mergeProfilesAndPreserveProperties(outputProfiles, processProfiles(spinnakerStaging, customProfiles));
static private Proxy openSshTunnel(String ip, int port, String keyFile) throws InterruptedException { JobExecutor jobExecutor = DaemonTaskHandler.getJobExecutor(); List<String> command = new ArrayList<>(); DaemonTaskHandler.safeSleep(TimeUnit.SECONDS.toMillis(1)); status = jobExecutor.updateJob(jobId);