/** * Closes all {@link AutoCloseable} objects in the parameter, suppressing exceptions. Exception will be emitted * after calling close() on every object. * * @param closeables iterable with closeables to close. * @throws Exception collected exceptions that occurred during closing */ public static void closeAll(Iterable<? extends AutoCloseable> closeables) throws Exception { if (null != closeables) { Exception collectedExceptions = null; for (AutoCloseable closeable : closeables) { try { if (null != closeable) { closeable.close(); } } catch (Exception e) { collectedExceptions = ExceptionUtils.firstOrSuppressed(collectedExceptions, e); } } if (null != collectedExceptions) { throw collectedExceptions; } } }
@Override public void dispose() throws Exception { Exception exception = null; try { super.dispose(); } catch (InterruptedException interrupted) { exception = interrupted; Thread.currentThread().interrupt(); } catch (Exception e) { exception = e; } try { stopResources(false); } catch (InterruptedException interrupted) { exception = ExceptionUtils.firstOrSuppressed(interrupted, exception); Thread.currentThread().interrupt(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } if (exception != null) { throw exception; } }
/** * This method supplies all elements from the input to the consumer. Exceptions that happen on elements are * suppressed until all elements are processed. If exceptions happened for one or more of the inputs, they are * reported in a combining suppressed exception. * * @param inputs iterator for all inputs to the throwingConsumer. * @param throwingConsumer this consumer will be called for all elements delivered by the input iterator. * @param <T> the type of input. * @throws Exception collected exceptions that happened during the invocation of the consumer on the input elements. */ public static <T> void applyToAllWhileSuppressingExceptions( Iterable<T> inputs, ThrowingConsumer<T, ? extends Exception> throwingConsumer) throws Exception { if (inputs != null && throwingConsumer != null) { Exception exception = null; for (T input : inputs) { if (input != null) { try { throwingConsumer.accept(input); } catch (Exception ex) { exception = ExceptionUtils.firstOrSuppressed(ex, exception); } } } if (exception != null) { throw exception; } } }
@Override public void close(boolean cleanup) throws Exception { Throwable exception = null; try { actorSystem.shutdown(); } catch (Throwable t) { exception = ExceptionUtils.firstOrSuppressed(t, exception); } try { artifactServer.stop(); } catch (Throwable t) { exception = ExceptionUtils.firstOrSuppressed(t, exception); } if (exception != null) { throw new FlinkException("Could not properly shut down the Mesos services.", exception); } } }
@Override public void close(boolean cleanup) throws Exception { Throwable exception = null; try { // this also closes the underlying CuratorFramework instance zooKeeperUtilityFactory.close(cleanup); } catch (Throwable t) { exception = ExceptionUtils.firstOrSuppressed(t, exception); } try { super.close(cleanup); } catch (Throwable t) { exception = ExceptionUtils.firstOrSuppressed(t, exception); } if (exception != null) { throw new FlinkException("Could not properly shut down the Mesos services.", exception); } } }
exception = ExceptionUtils.firstOrSuppressed(interrupted, exception); exception = ExceptionUtils.firstOrSuppressed(e, exception);
reportedException = ExceptionUtils.firstOrSuppressed(ioe, reportedException); } catch (InterruptedException e) { Thread.currentThread().interrupt(); reportedException = ExceptionUtils.firstOrSuppressed(e, reportedException);
@Override public void close() throws Exception { lock.lock(); try { // close only once if (closed) { return; } closed = true; Throwable exception = null; try { blobStoreService.close(); } catch (Throwable t) { exception = t; } // we do not propagate exceptions here, but only log them try { hadoopFileSystem.close(); } catch (Throwable t) { exception = ExceptionUtils.firstOrSuppressed(t, exception); } if (exception != null) { ExceptionUtils.rethrowException(exception, "Could not properly close the YarnHighAvailabilityServices."); } } finally { lock.unlock(); } }
@Override protected void internalDeregisterApplication( ApplicationStatus finalStatus, @Nullable String diagnostics) throws ResourceManagerException { LOG.info("Shutting down and unregistering as a Mesos framework."); Exception exception = null; try { // unregister the framework, which implicitly removes all tasks. schedulerDriver.stop(false); } catch (Exception ex) { exception = new Exception("Could not unregister the Mesos framework.", ex); } try { workerStore.stop(true); } catch (Exception ex) { exception = ExceptionUtils.firstOrSuppressed( new Exception("Could not stop the Mesos worker store.", ex), exception); } if (exception != null) { throw new ResourceManagerException("Could not properly shut down the Mesos application.", exception); } }
private void cleanup() throws Exception { LOG.debug( "Cleanup AsyncCheckpointRunnable for checkpoint {} of {}.", checkpointMetaData.getCheckpointId(), owner.getName()); Exception exception = null; // clean up ongoing operator snapshot results and non partitioned state handles for (OperatorSnapshotFutures operatorSnapshotResult : operatorSnapshotsInProgress.values()) { if (operatorSnapshotResult != null) { try { operatorSnapshotResult.cancel(); } catch (Exception cancelException) { exception = ExceptionUtils.firstOrSuppressed(cancelException, exception); } } } if (null != exception) { throw exception; } }
exception = ExceptionUtils.firstOrSuppressed(t, exception); exception = firstOrSuppressed(t, exception);
StateUtil.discardStateFuture(getOperatorStateManagedFuture()); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed( new Exception("Could not properly cancel managed operator state future.", e), exception); StateUtil.discardStateFuture(getKeyedStateRawFuture()); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed( new Exception("Could not properly cancel raw keyed state future.", e), exception); StateUtil.discardStateFuture(getOperatorStateRawFuture()); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed( new Exception("Could not properly cancel raw operator state future.", e), exception);
backendInstance.close(); } catch (IOException closeEx) { ex = ExceptionUtils.firstOrSuppressed(closeEx, ex); backendInstance.dispose(); } catch (Exception disposeEx) { ex = ExceptionUtils.firstOrSuppressed(disposeEx, ex);
/** * Closes this streamer. * * @throws IOException */ public void close() throws IOException { Throwable throwable = null; try { socket.close(); sender.close(); receiver.close(); } catch (Throwable t) { throwable = t; } try { destroyProcess(process); } catch (Throwable t) { throwable = ExceptionUtils.firstOrSuppressed(t, throwable); } ShutdownHookUtil.removeShutdownHook(shutdownThread, getClass().getSimpleName(), LOG); ExceptionUtils.tryRethrowIOException(throwable); }
FileUtils.deleteDirectory(directory); } catch (IOException delEx) { ex = ExceptionUtils.firstOrSuppressed(delEx, ex);
private void takeDBNativeCheckpoint(@Nonnull SnapshotDirectory outputDirectory) throws Exception { // create hard links of living files in the output path try ( ResourceGuard.Lease ignored = rocksDBResourceGuard.acquireResource(); Checkpoint checkpoint = Checkpoint.create(db)) { checkpoint.createCheckpoint(outputDirectory.getDirectory().getPath()); } catch (Exception ex) { try { outputDirectory.cleanup(); } catch (IOException cleanupEx) { ex = ExceptionUtils.firstOrSuppressed(cleanupEx, ex); } throw ex; } }
@Override public void close() throws Exception { cancel(); joinDiscoveryLoopThread(); Exception exception = null; if (partitionDiscoverer != null) { try { partitionDiscoverer.close(); } catch (Exception e) { exception = e; } } try { super.close(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } if (exception != null) { throw exception; } }
@Override public void close() throws FlinkKafkaException { final FlinkKafkaProducer.KafkaTransactionState currentTransaction = currentTransaction(); if (currentTransaction != null) { // to avoid exceptions on aborting transactions with some pending records flush(currentTransaction); // normal abort for AT_LEAST_ONCE and NONE do not clean up resources because of producer reusing, thus // we need to close it manually switch (semantic) { case EXACTLY_ONCE: break; case AT_LEAST_ONCE: case NONE: currentTransaction.producer.close(); break; } } try { super.close(); } catch (Exception e) { asyncException = ExceptionUtils.firstOrSuppressed(e, asyncException); } // make sure we propagate pending errors checkErroneous(); pendingTransactions().forEach(transaction -> IOUtils.closeQuietly(transaction.getValue().producer) ); }
@Override public void close() throws FlinkKafka011Exception { final KafkaTransactionState currentTransaction = currentTransaction(); if (currentTransaction != null) { // to avoid exceptions on aborting transactions with some pending records flush(currentTransaction); // normal abort for AT_LEAST_ONCE and NONE do not clean up resources because of producer reusing, thus // we need to close it manually switch (semantic) { case EXACTLY_ONCE: break; case AT_LEAST_ONCE: case NONE: currentTransaction.producer.close(); break; } } try { super.close(); } catch (Exception e) { asyncException = ExceptionUtils.firstOrSuppressed(e, asyncException); } // make sure we propagate pending errors checkErroneous(); pendingTransactions().forEach(transaction -> IOUtils.closeQuietly(transaction.getValue().producer) ); }
@Override public CompletableFuture<Void> postStop() { // shut down all components Throwable firstException = null; if (resourceManagerClient != null) { try { resourceManagerClient.stop(); } catch (Throwable t) { firstException = t; } } if (nodeManagerClient != null) { try { nodeManagerClient.stop(); } catch (Throwable t) { firstException = ExceptionUtils.firstOrSuppressed(t, firstException); } } final CompletableFuture<Void> terminationFuture = super.postStop(); if (firstException != null) { return FutureUtils.completedExceptionally(new FlinkException("Error while shutting down YARN resource manager", firstException)); } else { return terminationFuture; } }