static <R> CheckedSupplier<R> checked(Supplier<R> supplier) { return () -> { try { return supplier.get(); } catch (RuntimeException e) { throw new FlinkException(e); } }; } }
static FlinkException createAndLogException(PyException pe, Logger log) { StringWriter sw = new StringWriter(); try (PrintWriter pw = new PrintWriter(sw)) { pe.printStackTrace(pw); } String pythonStackTrace = sw.toString().trim(); log.error("Python function failed: " + System.lineSeparator() + pythonStackTrace); return new FlinkException("Python function failed: " + pythonStackTrace); } }
private static <R> OptionalFailure<R> wrapUnchecked(String name, Supplier<R> supplier) { return OptionalFailure.createFrom(() -> { try { return supplier.get(); } catch (RuntimeException ex) { LOG.error("Unexpected error while handling accumulator [" + name + "]", ex); throw new FlinkException(ex); } }); }
private PackagedProgram createPackagedProgram() throws FlinkException { try { final Class<?> mainClass = getClass().getClassLoader().loadClass(jobClassName); return new PackagedProgram(mainClass, programArguments); } catch (ClassNotFoundException | ProgramInvocationException e) { throw new FlinkException("Could not load the provided entrypoint class.", e); } } }
/** * @return stored value or throw a {@link FlinkException} with {@code failureCause}. */ public T get() throws FlinkException { if (value != null) { return value; } checkNotNull(failureCause); throw new FlinkException(failureCause); }
default void close() throws Exception { try { closeAsync().get(); } catch (ExecutionException e) { throw new FlinkException("Could not close resource.", ExceptionUtils.stripExecutionException(e)); } }
@Override public void invoke() throws Exception { if (hasFailed.compareAndSet(false, true)) { throw new FlinkException("One time failure."); } } }
@Override public void killCluster(ApplicationId applicationId) throws FlinkException { try { yarnClient.killApplication(applicationId); Utils.deleteApplicationFiles(Collections.singletonMap( YarnConfigKeys.FLINK_YARN_FILES, getYarnFilesDir(applicationId).toUri().toString())); } catch (YarnException | IOException e) { throw new FlinkException("Could not kill the Yarn Flink cluster with id " + applicationId + '.', e); } }
@Override public void triggerCheckpoint(long checkpointId) throws FlinkException { // TODO - we need to see how to derive those. We should probably not encode this in the // TODO - source's trigger message, but do a handshake in this task between the trigger // TODO - message from the master, and the source's trigger notification final CheckpointOptions checkpointOptions = CheckpointOptions.forCheckpointWithDefaultLocation(); final long timestamp = System.currentTimeMillis(); final CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointId, timestamp); try { SourceStreamTask.super.triggerCheckpoint(checkpointMetaData, checkpointOptions); } catch (RuntimeException | FlinkException e) { throw e; } catch (Exception e) { throw new FlinkException(e.getMessage(), e); } } };
@Override public List<MesosWorkerStore.Worker> recoverWorkers() throws Exception { synchronized (startStopLock) { verifyIsRunning(); List<Tuple2<RetrievableStateHandle<Worker>, String>> handles = workersInZooKeeper.getAllAndLock(); if (handles.isEmpty()) { return Collections.emptyList(); } else { List<MesosWorkerStore.Worker> workers = new ArrayList<>(handles.size()); for (Tuple2<RetrievableStateHandle<Worker>, String> handle : handles) { final Worker worker; try { worker = handle.f0.retrieveState(); } catch (ClassNotFoundException cnfe) { throw new FlinkException("Could not retrieve Mesos worker from state handle under " + handle.f1 + ". This indicates that you are trying to recover from state written by an " + "older Flink version which is not compatible. Try cleaning the state handle store.", cnfe); } catch (IOException ioe) { throw new FlinkException("Could not retrieve Mesos worker from state handle under " + handle.f1 + ". This indicates that the retrieved state handle is broken. Try cleaning " + "the state handle store.", ioe); } workers.add(worker); } return workers; } } }
throw new FlinkException("Stopping restore attempts for already cancelled task.", collectedException); throw new FlinkException("Could not restore " + logDescription + " from any of the " + restoreOptions.size() + " provided restore options.", collectedException);
@Test public void testInvalidExceptionStripping() { final FlinkException expectedException = new FlinkException(new RuntimeException(new FlinkException("inner exception"))); final Throwable strippedException = ExceptionUtils.stripException(expectedException, RuntimeException.class); assertThat(strippedException, is(equalTo(expectedException))); } }
/** * Creates a new ApplicationClient actor or returns an existing one. May start an ActorSystem. * @return ActorSystem */ public ActorRef get() throws FlinkException { if (applicationClient == null) { // start application client LOG.info("Start application client."); final ActorSystem actorSystem; try { actorSystem = actorSystemLoader.get(); } catch (FlinkException fle) { throw new FlinkException("Could not start the ClusterClient's ActorSystem.", fle); } try { applicationClient = actorSystem.actorOf( Props.create( ApplicationClient.class, flinkConfig, highAvailabilityServices.getJobManagerLeaderRetriever(HighAvailabilityServices.DEFAULT_JOB_ID)), "applicationClient"); } catch (Exception e) { throw new FlinkException("Could not start the ApplicationClient.", e); } } return applicationClient; } }
private Configuration applyYarnProperties(Configuration configuration) throws FlinkException { final Configuration effectiveConfiguration = new Configuration(configuration); // configure the default parallelism from YARN String propParallelism = yarnPropertiesFile.getProperty(YARN_PROPERTIES_PARALLELISM); if (propParallelism != null) { // maybe the property is not set try { int parallelism = Integer.parseInt(propParallelism); effectiveConfiguration.setInteger(CoreOptions.DEFAULT_PARALLELISM, parallelism); logAndSysout("YARN properties set default parallelism to " + parallelism); } catch (NumberFormatException e) { throw new FlinkException("Error while parsing the YARN properties: " + "Property " + YARN_PROPERTIES_PARALLELISM + " is not an integer.", e); } } // handle the YARN client's dynamic properties String dynamicPropertiesEncoded = yarnPropertiesFile.getProperty(YARN_PROPERTIES_DYNAMIC_PROPERTIES_STRING); Map<String, String> dynamicProperties = getDynamicProperties(dynamicPropertiesEncoded); for (Map.Entry<String, String> dynamicProperty : dynamicProperties.entrySet()) { effectiveConfiguration.setString(dynamicProperty.getKey(), dynamicProperty.getValue()); } return effectiveConfiguration; }
timeout); } catch (LeaderRetrievalException lre) { throw new FlinkException("Could not find out our own hostname by connecting to the " + "leading JobManager. Please make sure that the Flink cluster has been started.", lre); log); } catch (Exception e) { throw new FlinkException("Could not start the ActorSystem lazily.", e);
/** * Returns the {@link ActorGateway} of the current job manager leader using * the {@link LeaderRetrievalService}. * * @return ActorGateway of the current job manager leader * @throws Exception */ public ActorGateway getJobManagerGateway() throws FlinkException { log.debug("Looking up JobManager"); try { return LeaderRetrievalUtils.retrieveLeaderGateway( highAvailabilityServices.getJobManagerLeaderRetriever(HighAvailabilityServices.DEFAULT_JOB_ID), actorSystemLoader.get(), lookupTimeout); } catch (LeaderRetrievalException lre) { throw new FlinkException("Could not connect to the leading JobManager. Please check that the " + "JobManager is running.", lre); } }
throw new CompletionException(new FlinkException("Failed to fetch jar list.", e));
throw new FlinkException("Failed to retrieve job list.", cause);
/** * Method to validate cluster specification before deploy it, it will throw * an {@link FlinkException} if the {@link ClusterSpecification} is invalid. * * @param clusterSpecification cluster specification to check against the configuration of the * AbstractYarnClusterDescriptor * @throws FlinkException if the cluster cannot be started with the provided {@link ClusterSpecification} */ private void validateClusterSpecification(ClusterSpecification clusterSpecification) throws FlinkException { try { final long taskManagerMemorySize = clusterSpecification.getTaskManagerMemoryMB(); // We do the validation by calling the calculation methods here // Internally these methods will check whether the cluster can be started with the provided // ClusterSpecification and the configured memory requirements final long cutoff = ContaineredTaskManagerParameters.calculateCutoffMB(flinkConfiguration, taskManagerMemorySize); TaskManagerServices.calculateHeapSizeMB(taskManagerMemorySize - cutoff, flinkConfiguration); } catch (IllegalArgumentException iae) { throw new FlinkException("Cannot fulfill the minimum memory requirements with the provided " + "cluster specification. Please increase the memory of the cluster.", iae); } }
@Test public void testExceptionStripping() { final FlinkException expectedException = new FlinkException("test exception"); final Throwable strippedException = ExceptionUtils.stripException(new RuntimeException(new RuntimeException(expectedException)), RuntimeException.class); assertThat(strippedException, is(equalTo(expectedException))); }