@Override public void interrupted(final InterruptedException e) { exception.addException(e); } }
@Override public void failed(RpcException ex) { exception.addException(ex); }
public void addThrowable(final Throwable throwable) { Preconditions.checkNotNull(throwable); if (throwable instanceof Exception) { addException((Exception) throwable); return; } addException(new RuntimeException(throwable)); }
@Override public void waitForCompletion() { try { completionLatch.await(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); exception.addException(ex); } }
/** * Close the given AutoCloseable, suppressing any exceptions that are thrown. * If an exception is thrown, the rules for {@link #addException(Exception)} * are followed. * * @param autoCloseable the AutoCloseable to close; may be null */ public void suppressingClose(final AutoCloseable autoCloseable) { /* * For the sake of detecting code that doesn't follow the conventions, * we want this to complain whether the closeable exists or not. */ Preconditions.checkState(!isClosed); if (autoCloseable == null) { return; } try { autoCloseable.close(); } catch(final Exception e) { addException(e); } }
@Override public void waitForCompletion() { try { completionLatch.await(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); exception.addException(ex); } exception.throwNoClearRuntime(); }
@Override public void success(Ack value, ByteBuf buffer) { if (value.getOk()) { return; } logger.error("Data not accepted downstream. Stopping future sends."); // if we didn't get ack ok, we'll need to kill the query. exception.addException(new RpcException("Data not accepted downstream.")); }
@Override public void jobFailed(Exception paramException) { ex.addException(paramException); latch.countDown(); }
@Override public RecordBatches load(int offset, int limit) { try { completionLatch.await(); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); exception.addException(ex); } exception.throwNoClearRuntime(); return jobResultsStore.loadJobData(id, store.get(id), offset, limit); }
@Override public void jobCancelled(String reason) { ex.addException(new RuntimeException("Job cancelled.")); latch.countDown(); }
void deleteEverything(Set<String> skipNames) throws IOException { final DeferredException deleteException = new DeferredException(); for(Entry<String, LuceneSearchIndex> index : indexes.asMap().entrySet()){ if (!skipNames.contains(index.getKey())) { try{ index.getValue().deleteEverything(); }catch(IOException e){ deleteException.addException(e); } } } try{ deleteException.close(); }catch(IOException ex){ throw ex; }catch(Exception ex){ throw new IOException("Failure deleting indeices.", ex); } }
public void start(ExecutionPlan plan, AttemptObserver observer) throws Exception { try{ startFragments(plan, observer); } catch(ForemanException ex){ exception.addException(ex); } finally { exception.close(); } }
@Override public void failed(final RpcException ex) { if (latch != null) { // this block only applies to intermediate fragments fragmentSubmitFailures.addFailure(endpoint, ex); latch.countDown(); } else { // this block only applies to leaf fragments // since this won't be waited on, we can wait to deliver this event once the AttemptManager is ready exception.addException(new RpcException(String.format("Failure sending leaf fragment to %s:%d.", endpoint.getAddress(), endpoint.getFabricPort()), ex)); } }
@Override public void execCompletion(UserResult result) { if (result.hasException()) { exception.addException(result.getException()); } latch.countDown(); }
@Override public void close() throws IOException { if (!closed.compareAndSet(false, true)) { return; } if (COLLECT_METRICS) { MetricUtils.removeAllMetricsThatStartWith(MetricRegistry.name(METRICS_PREFIX, name)); } exclusively((deferred) -> { deleteAllIterators(deferred); try(FlushOptions options = new FlushOptions()){ options.setWaitForFlush(true); db.flush(options, handle); } catch (RocksDBException ex) { deferred.addException(ex); } deferred.suppressingClose(handle); }); }
@Override public void execDataArrived(RpcOutcomeListener<Ack> outcomeListener, QueryWritableBatch result) { try { AutoCloseables.close( FluentIterable.of(result.getBuffers()).transform(new Function<ByteBuf, AutoCloseable>(){ @Override public AutoCloseable apply(ByteBuf input) { return new CloseableByteBuf(input); }}).toList()); } catch (Exception e) { exception.addException(e); } outcomeListener.success(Acks.OK, null); } };
@Override public void execDataArrived(RpcOutcomeListener<Ack> outcomeListener, QueryWritableBatch result) { try { AutoCloseables.close( FluentIterable.of(result.getBuffers()).transform(new Function<ByteBuf, AutoCloseable>(){ @Override public AutoCloseable apply(ByteBuf input) { return new CloseableByteBuf(input); }}).toList()); } catch (Exception e) { exception.addException(e); } outcomeListener.success(Acks.OK, null); }
@Override public void test() throws Exception { for (final String[] data: datum) { final String query = buildQuery(data); final E expected = buildResult(data); try { doTest(query, expected); } catch (final Exception|Error ex) { final ParameterizedTestFailure failure = new ParameterizedTestFailure(Joiner.on(" -- ").join(data), ex); if (!forgiving) { throw failure; } exception.addException(failure); } } exception.throwAndClear(); } }
@Override public void planCompleted(final ExecutionPlan plan) { if (plan != null) { try { builder.addBatchSchema(RootSchemaFinder.getSchema(plan.getRootOperator(), contextProvider.get().getFunctionImplementationRegistry())); } catch (Exception e) { exception.addException(e); } } job.getJobAttempt().setAccelerationDetails( ByteString.copyFrom(detailsPopulator.computeAcceleration())); job.getJobAttempt().setState(STARTING); storeJob(job); if (externalListenerManager != null) { externalListenerManager.queryUpdate(job); } // plan is parallelized after physical planning is done so we need to finalize metadata here finalizeMetadata(); }
@Override public void attemptCompletion(UserResult result) { try { final QueryState queryState = result.getState(); if (queryState == QueryState.COMPLETED) { detailsPopulator.attemptCompleted(result.getProfile()); JoinAnalyzer joinAnalyzer = new JoinAnalyzer(result.getProfile(), detailsPopulator.getFinalPrel()); JoinAnalysis joinAnalysis = joinAnalyzer.computeJoinAnalysis(); if (joinAnalysis != null) { job.getJobAttempt().getInfo().setJoinAnalysis(joinAnalysis); } } addAttemptToJob(job, queryState, result.getProfile()); } catch (IOException e) { exception.addException(e); } }