/** * FLINK-6641 * * <p>Tests that the {@link ClusterClient} does not clean up HA data when being shut down. */ @Test public void testClusterClientShutdown() throws Exception { Configuration config = new Configuration(); HighAvailabilityServices highAvailabilityServices = mock(HighAvailabilityServices.class); StandaloneClusterClient clusterClient = new StandaloneClusterClient(config, highAvailabilityServices, false); clusterClient.shutdown(); // check that the client does not clean up HA data but closes the services verify(highAvailabilityServices, never()).closeAndCleanupAllData(); verify(highAvailabilityServices).close(); }
StandaloneClusterClient client = new StandaloneClusterClient(config); client.setDetached(true); client.run(packagedProgramMock, 1);
@Test public void testTriggerSavepointFailureIllegalJobID() throws Exception { replaceStdOutAndStdErr(); try { CliFrontend frontend = new MockedCliFrontend(new StandaloneClusterClient( getConfiguration(), new TestingHighAvailabilityServices(), false)); String[] parameters = { "invalid job id" }; try { frontend.savepoint(parameters); fail("Should have failed."); } catch (CliArgsException e) { assertThat(e.getMessage(), Matchers.containsString("Cannot parse JobID")); } } finally { restoreStdOutAndStdErr(); } }
private static void testFailureBehavior(final InetSocketAddress unreachableEndpoint) throws Exception { final Configuration config = new Configuration(); config.setString(AkkaOptions.ASK_TIMEOUT, ASK_STARTUP_TIMEOUT + " ms"); config.setString(AkkaOptions.LOOKUP_TIMEOUT, CONNECT_TIMEOUT + " ms"); config.setString(JobManagerOptions.ADDRESS, unreachableEndpoint.getHostName()); config.setInteger(JobManagerOptions.PORT, unreachableEndpoint.getPort()); StandaloneClusterClient client = new StandaloneClusterClient(config); try { // we have to query the cluster status to start the connection attempts client.getClusterStatus(); fail("This should fail with an exception since the endpoint is unreachable."); } catch (Exception e) { // check that we have failed with a LeaderRetrievalException which says that we could // not connect to the leading JobManager assertTrue(CommonTestUtils.containsCause(e, LeaderRetrievalException.class)); } }
/** * This test verifies correct job submission messaging logic and plan translation calls. */ @Test public void shouldSubmitToJobClient() throws Exception { jobManagerSystem.actorOf( Props.create(SuccessReturningActor.class), JobMaster.JOB_MANAGER_NAME); StandaloneClusterClient out = new StandaloneClusterClient(config); out.setDetached(true); JobSubmissionResult result = out.run(program.getPlanWithJars(), 1); assertNotNull(result); program.deleteExtractedLibraries(); }
/** * This test verifies correct that the correct exception is thrown when the job submission fails. */ @Test public void shouldSubmitToJobClientFails() throws Exception { jobManagerSystem.actorOf( Props.create(FailureReturningActor.class), JobMaster.JOB_MANAGER_NAME); StandaloneClusterClient out = new StandaloneClusterClient(config); out.setDetached(true); try { out.run(program.getPlanWithJars(), 1); fail("This should fail with an exception"); } catch (ProgramInvocationException e) { // bam! } catch (Exception e) { fail("wrong exception " + e); } }
Props.create(SuccessReturningActor.class), JobMaster.JOB_MANAGER_NAME); StandaloneClusterClient out = new StandaloneClusterClient(config); out.setDetached(true);
StandaloneClusterClient client = new StandaloneClusterClient(configuration, highAvailabilityServices, true);
@Override public StandaloneClusterClient retrieve(String applicationID) { try { return new StandaloneClusterClient(config); } catch (Exception e) { throw new RuntimeException("Couldn't retrieve standalone cluster", e); } }
@Override public StandaloneClusterClient retrieve(StandaloneClusterId standaloneClusterId) throws ClusterRetrieveException { try { return new StandaloneClusterClient(config); } catch (Exception e) { throw new ClusterRetrieveException("Couldn't retrieve standalone cluster", e); } }
@Override public StandaloneClusterClient retrieve(StandaloneClusterId standaloneClusterId) throws ClusterRetrieveException { try { return new StandaloneClusterClient(config); } catch (Exception e) { throw new ClusterRetrieveException("Couldn't retrieve standalone cluster", e); } }
@Override public void start() throws Exception { synchronized (lock) { if (client == null) { client = new StandaloneClusterClient(clientConfiguration); client.setPrintStatusDuringExecution(isPrintingStatusDuringExecution()); } else { throw new IllegalStateException("The remote executor was already started."); } } }
@Override public void start() throws Exception { synchronized (lock) { if (client == null) { if (CoreOptions.LEGACY_MODE.equals(clientConfiguration.getString(CoreOptions.MODE))) { client = new StandaloneClusterClient(clientConfiguration); } else { client = new RestClusterClient<>(clientConfiguration, "RemoteExecutor"); } client.setPrintStatusDuringExecution(isPrintingStatusDuringExecution()); client.setJobListeners(this.jobListeners); } else { throw new IllegalStateException("The remote executor was already started."); } } }
public void killTopologyWithOpts(final String name, final KillOptions options) throws NotAliveException { final JobID jobId = this.getTopologyJobId(name); if (jobId == null) { throw new NotAliveException("Storm topology with name " + name + " not found."); } if (options != null) { try { Thread.sleep(1000 * options.get_wait_secs()); } catch (final InterruptedException e) { throw new RuntimeException(e); } } final Configuration configuration = GlobalConfiguration.loadConfiguration(); configuration.setString(JobManagerOptions.ADDRESS, this.jobManagerHost); configuration.setInteger(JobManagerOptions.PORT, this.jobManagerPort); final StandaloneClusterClient client; try { client = new StandaloneClusterClient(configuration); } catch (final Exception e) { throw new RuntimeException("Could not establish a connection to the job manager", e); } try { client.stop(jobId); } catch (final Exception e) { throw new RuntimeException("Cannot stop job.", e); } }
client = new StandaloneClusterClient(configuration); client.setPrintStatusDuringExecution(getConfig().isSysoutLoggingEnabled());
client = new StandaloneClusterClient(configuration); } catch (final Exception e) { throw new RuntimeException("Could not establish a connection to the job manager", e);
client = new StandaloneClusterClient(configuration); } else { client = new RestClusterClient<>(configuration, "RemoteStreamEnvironment");
private void startLegacyMiniCluster() throws Exception { final Configuration configuration = new Configuration(miniClusterResourceConfiguration.getConfiguration()); configuration.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, miniClusterResourceConfiguration.getNumberTaskManagers()); configuration.setInteger(TaskManagerOptions.NUM_TASK_SLOTS, miniClusterResourceConfiguration.getNumberSlotsPerTaskManager()); configuration.setString(CoreOptions.TMP_DIRS, temporaryFolder.newFolder().getAbsolutePath()); final LocalFlinkMiniCluster flinkMiniCluster = TestBaseUtils.startCluster( configuration, !enableClusterClient); // the cluster client only works if separate actor systems are used jobExecutorService = flinkMiniCluster; if (enableClusterClient) { clusterClient = new StandaloneClusterClient(configuration, flinkMiniCluster.highAvailabilityServices(), true); } Configuration restClientConfig = new Configuration(); restClientConfig.setInteger(JobManagerOptions.PORT, flinkMiniCluster.getLeaderRPCPort()); this.restClusterClientConfig = new UnmodifiableConfiguration(restClientConfig); if (flinkMiniCluster.webMonitor().isDefined()) { webUIPort = flinkMiniCluster.webMonitor().get().getServerPort(); } }