/** * Same as {@link #createLogbackJar(Location)} except this method uses local {@link File} instead. */ @Nullable public static File createLogbackJar(File targetFile) throws IOException { Location logbackJar = createLogbackJar(Locations.toLocation(targetFile)); return logbackJar == null ? null : new File(logbackJar.toURI()); }
/** * Returns a list of {@link Service} to start before the program execution and shutdown when program completed. */ private Deque<Service> createCoreServices(Injector injector, ProgramOptions programOptions) { Deque<Service> services = new LinkedList<>(); MetricsCollectionService metricsCollectionService = injector.getInstance(MetricsCollectionService.class); services.add(metricsCollectionService); services.add(injector.getInstance(ZKClientService.class)); switch (ProgramRunners.getClusterMode(programOptions)) { case ON_PREMISE: addOnPremiseServices(injector, programOptions, metricsCollectionService, services); break; case ISOLATED: addIsolatedServices(injector, services); break; default: // This shouldn't happen. Just do nothing. } return services; }
BasicWorkflowContext(WorkflowSpecification workflowSpec, WorkflowToken token, Program program, ProgramOptions programOptions, CConfiguration cConf, MetricsCollectionService metricsCollectionService, DatasetFramework datasetFramework, TransactionSystemClient txClient, DiscoveryServiceClient discoveryServiceClient, Map<String, WorkflowNodeState> nodeStates, @Nullable PluginInstantiator pluginInstantiator, SecureStore secureStore, SecureStoreManager secureStoreManager, MessagingService messagingService, @Nullable ConditionSpecification conditionSpecification, MetadataReader metadataReader, MetadataPublisher metadataPublisher) { super(program, programOptions, cConf, new HashSet(), datasetFramework, txClient, discoveryServiceClient, false, metricsCollectionService, Collections.singletonMap(Constants.Metrics.Tag.WORKFLOW_RUN_ID, ProgramRunners.getRunId(programOptions).getId()), secureStore, secureStoreManager, messagingService, pluginInstantiator, metadataReader, metadataPublisher); this.workflowSpec = workflowSpec; this.conditionSpecification = conditionSpecification; this.token = token; this.nodeStates = nodeStates; }
coreServices.add(serviceAnnouncer); if (ProgramRunners.getClusterMode(programOptions) == ClusterMode.ON_PREMISE) { ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(programOptions)); ((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
RunId runId = ProgramRunners.getRunId(options); ClusterMode clusterMode = ProgramRunners.getClusterMode(options); Service mapReduceRuntimeService = new MapReduceRuntimeService(injector, cConf, hConf, mapReduce, spec, context, program.getJarLocation(), locationFactory, mapReduceRuntimeService.start(); } else { ProgramRunners.startAsUser(cConf.get(Constants.CFG_HDFS_USER), mapReduceRuntimeService);
RunId runId = ProgramRunners.getRunId(options); sparkRuntimeService.start(); } else { ProgramRunners.startAsUser(cConf.get(Constants.CFG_HDFS_USER), sparkRuntimeService);
super(program.getId()); this.artifactId = ProgramRunners.getArtifactId(programOptions); this.program = program; this.programOptions = programOptions; this.cConf = cConf; this.programRunId = program.getId().run(ProgramRunners.getRunId(programOptions)); this.triggeringScheduleInfo = getTriggeringScheduleInfo(programOptions); this.discoveryServiceClient = discoveryServiceClient; this.logicalStartTime = ProgramRunners.updateLogicalStartTime(runtimeArgs); this.runtimeArguments = Collections.unmodifiableMap(runtimeArgs); program.getApplicationSpecification().getPlugins()); KerberosPrincipalId principalId = ProgramRunners.getApplicationPrincipal(programOptions); this.admin = new DefaultAdmin(dsFramework, program.getId().getNamespaceId(), secureStoreManager, new BasicMessagingAdmin(messagingService, program.getId().getNamespaceId()),
/** * Impersonates as the given user to start a guava service * * @param user user to impersonate * @param service guava service start start */ public static void startAsUser(String user, final Service service) throws IOException, InterruptedException { runAsUser(user, new Callable<ListenableFuture<Service.State>>() { @Override public ListenableFuture<Service.State> call() throws Exception { return service.start(); } }); }
private void createLocalDatasets() throws IOException, DatasetManagementException { final KerberosPrincipalId principalId = ProgramRunners.getApplicationPrincipal(programOptions); for (final Map.Entry<String, String> entry : datasetFramework.getDatasetNameMapping().entrySet()) { final String localInstanceName = entry.getValue(); final DatasetId instanceId = new DatasetId(workflowRunId.getNamespace(), localInstanceName); final DatasetCreationSpec instanceSpec = workflowSpec.getLocalDatasetSpecs().get(entry.getKey()); LOG.debug("Adding Workflow local dataset instance: {}", localInstanceName); try { Retries.callWithRetries(new Retries.Callable<Void, Exception>() { @Override public Void call() throws Exception { DatasetProperties properties = addLocalDatasetProperty(instanceSpec.getProperties(), keepLocal(entry.getKey())); // we have to do this check since addInstance method can only be used when app impersonation is enabled if (principalId != null) { datasetFramework.addInstance(instanceSpec.getTypeName(), instanceId, properties, principalId); } else { datasetFramework.addInstance(instanceSpec.getTypeName(), instanceId, properties); } return null; } }, RetryStrategies.fixDelay(Constants.Retry.LOCAL_DATASET_OPERATION_RETRY_DELAY_SECONDS, TimeUnit.SECONDS)); } catch (IOException | DatasetManagementException e) { throw e; } catch (Exception e) { // this should never happen throw new IllegalStateException(e); } } }
coreServices.add(serviceAnnouncer); if (ProgramRunners.getClusterMode(programOptions) == ClusterMode.ON_PREMISE) { ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(programOptions)); ((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
RunId runId = ProgramRunners.getRunId(options); ClusterMode clusterMode = ProgramRunners.getClusterMode(options); Service mapReduceRuntimeService = new MapReduceRuntimeService(injector, cConf, hConf, mapReduce, spec, context, program.getJarLocation(), locationFactory, mapReduceRuntimeService.start(); } else { ProgramRunners.startAsUser(cConf.get(Constants.CFG_HDFS_USER), mapReduceRuntimeService);
RunId runId = ProgramRunners.getRunId(options); sparkRuntimeService.start(); } else { ProgramRunners.startAsUser(cConf.get(Constants.CFG_HDFS_USER), sparkRuntimeService);
super(program.getId()); this.artifactId = ProgramRunners.getArtifactId(programOptions); this.program = program; this.programOptions = programOptions; this.cConf = cConf; this.programRunId = program.getId().run(ProgramRunners.getRunId(programOptions)); this.triggeringScheduleInfo = getTriggeringScheduleInfo(programOptions); this.discoveryServiceClient = discoveryServiceClient; this.logicalStartTime = ProgramRunners.updateLogicalStartTime(runtimeArgs); this.runtimeArguments = Collections.unmodifiableMap(runtimeArgs); program.getApplicationSpecification().getPlugins()); KerberosPrincipalId principalId = ProgramRunners.getApplicationPrincipal(programOptions); this.admin = new DefaultAdmin(dsFramework, program.getId().getNamespaceId(), secureStoreManager, new BasicMessagingAdmin(messagingService, program.getId().getNamespaceId()),
/** * Impersonates as the given user to start a guava service * * @param user user to impersonate * @param service guava service start start */ public static void startAsUser(String user, final Service service) throws IOException, InterruptedException { runAsUser(user, new Callable<ListenableFuture<Service.State>>() { @Override public ListenableFuture<Service.State> call() throws Exception { return service.start(); } }); }
private void createLocalDatasets() throws IOException, DatasetManagementException { final KerberosPrincipalId principalId = ProgramRunners.getApplicationPrincipal(programOptions); for (final Map.Entry<String, String> entry : datasetFramework.getDatasetNameMapping().entrySet()) { final String localInstanceName = entry.getValue(); final DatasetId instanceId = new DatasetId(workflowRunId.getNamespace(), localInstanceName); final DatasetCreationSpec instanceSpec = workflowSpec.getLocalDatasetSpecs().get(entry.getKey()); LOG.debug("Adding Workflow local dataset instance: {}", localInstanceName); try { Retries.callWithRetries(new Retries.Callable<Void, Exception>() { @Override public Void call() throws Exception { DatasetProperties properties = addLocalDatasetProperty(instanceSpec.getProperties(), keepLocal(entry.getKey())); // we have to do this check since addInstance method can only be used when app impersonation is enabled if (principalId != null) { datasetFramework.addInstance(instanceSpec.getTypeName(), instanceId, properties, principalId); } else { datasetFramework.addInstance(instanceSpec.getTypeName(), instanceId, properties); } return null; } }, RetryStrategies.fixDelay(Constants.Retry.LOCAL_DATASET_OPERATION_RETRY_DELAY_SECONDS, TimeUnit.SECONDS)); } catch (IOException | DatasetManagementException e) { throw e; } catch (Exception e) { // this should never happen throw new IllegalStateException(e); } } }
coreServices.add(serviceAnnouncer); if (ProgramRunners.getClusterMode(programOptions) == ClusterMode.ON_PREMISE) { ProgramRunId programRunId = program.getId().run(ProgramRunners.getRunId(programOptions)); ((ProgramContextAware) programDatasetFramework).setContext(new BasicProgramContext(programRunId));
BasicWorkflowContext(WorkflowSpecification workflowSpec, WorkflowToken token, Program program, ProgramOptions programOptions, CConfiguration cConf, MetricsCollectionService metricsCollectionService, DatasetFramework datasetFramework, TransactionSystemClient txClient, DiscoveryServiceClient discoveryServiceClient, Map<String, WorkflowNodeState> nodeStates, @Nullable PluginInstantiator pluginInstantiator, SecureStore secureStore, SecureStoreManager secureStoreManager, MessagingService messagingService, @Nullable ConditionSpecification conditionSpecification, MetadataReader metadataReader, MetadataPublisher metadataPublisher, NamespaceQueryAdmin namespaceQueryAdmin) { super(program, programOptions, cConf, new HashSet<>(), datasetFramework, txClient, discoveryServiceClient, false, metricsCollectionService, Collections.singletonMap(Constants.Metrics.Tag.WORKFLOW_RUN_ID, ProgramRunners.getRunId(programOptions).getId()), secureStore, secureStoreManager, messagingService, pluginInstantiator, metadataReader, metadataPublisher, namespaceQueryAdmin); this.workflowSpec = workflowSpec; this.conditionSpecification = conditionSpecification; this.token = token; this.nodeStates = nodeStates; }
/** * Returns a list of {@link Service} to start before the program execution and shutdown when program completed. */ private Deque<Service> createCoreServices(Injector injector, ProgramOptions programOptions) { Deque<Service> services = new LinkedList<>(); MetricsCollectionService metricsCollectionService = injector.getInstance(MetricsCollectionService.class); services.add(metricsCollectionService); services.add(injector.getInstance(ZKClientService.class)); switch (ProgramRunners.getClusterMode(programOptions)) { case ON_PREMISE: addOnPremiseServices(injector, programOptions, metricsCollectionService, services); break; case ISOLATED: addIsolatedServices(injector, services); break; default: // This shouldn't happen. Just do nothing. } return services; }
RunId runId = ProgramRunners.getRunId(options); sparkRuntimeService.start(); } else { ProgramRunners.startAsUser(cConf.get(Constants.CFG_HDFS_USER), sparkRuntimeService);
/** * Same as {@link #createLogbackJar(Location)} except this method uses local {@link File} instead. */ @Nullable public static File createLogbackJar(File targetFile) throws IOException { Location logbackJar = createLogbackJar(Locations.toLocation(targetFile)); return logbackJar == null ? null : new File(logbackJar.toURI()); }