public ResultT getBlockingResult() { try { return getAsyncResult().get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); cancel(); throw Status.CANCELLED.withCause(e).asRuntimeException(); } catch (ExecutionException e) { cancel(); throw Status.fromThrowable(e).asRuntimeException(); } }
/** {@inheritDoc} */ @Override public void onClose(Status status, Metadata trailers) { try (Scope scope = TRACER.withSpan(operationSpan)) { callWrapper.resetCall(); rpcTimerContext.close(); // OK if (status.isOk()) { if (onOK(trailers)) { finalizeStats(status); } } else { onError(status, trailers); } } catch (Exception e) { setException(e); } }
/** * Create an {@link CallOptions} that has a fail safe RPC deadline to make sure that unary * operations don't hang. This will have to be overridden for streaming RPCs like read rows. * <p> * The logic is as follows: * <ol> * <li> If the user provides a deadline, use the deadline</li> * <li> Else If this is a streaming read, don't set an explicit deadline. The * {@link com.google.cloud.bigtable.grpc.io.Watchdog} will handle hanging</li> * <li> Else Set a deadline of {@link #UNARY_DEADLINE_MINUTES} minutes deadline.</li> * </ol> * * @see com.google.cloud.bigtable.grpc.io.Watchdog Watchdog which handles hanging for streaming * reads. * * @return a {@link CallOptions} */ protected CallOptions getRpcCallOptions() { if (callOptions.getDeadline() != null || isStreamingRead()) { // If the user set a deadline, honor it. // If this is a streaming read, then the Watchdog will take affect and ensure that hanging does not occur. return getOperationCallOptions(); } else { // Unary calls should fail after 6 minutes, if there isn't any response from the server. return callOptions.withDeadlineAfter(UNARY_DEADLINE_MINUTES, TimeUnit.MINUTES); } }
setException(status.asRuntimeException()); finalizeStats(status); return; || !(isRequestRetryable() || code == Code.UNAUTHENTICATED || code == Code.UNAVAILABLE)) { LOG.error("Could not complete RPC. Failure #%d, got: %s on channel %s.\nTrailers: %s", status.getCause(), failedCount, status, channelId, trailers); rpc.getRpcMetrics().markFailure(); finalizeStats(status); setException(status.asRuntimeException()); return; Long nextBackOff = getNextBackoff(); failedCount += 1; LOG.error("All retries were exhausted. Failure #%d, got: %s on channel %s.\nTrailers: %s", status.getCause(), failedCount, status, channelId, trailers); setException(getExhaustedRetriesException(status)); } else { LOG.warn("Retrying failed call. Failure #%d, got: %s on channel %s.\nTrailers: %s", status.getCause(), failedCount, status, channelId, trailers); performRetry(nextBackOff);
/** * Calls {@link BigtableAsyncRpc#newCall(CallOptions)} and * {@link BigtableAsyncRpc#start(Object, io.grpc.ClientCall.Listener, Metadata, ClientCall)} } * with this as the listener so that retries happen correctly. */ protected void run() { try (Scope scope = TRACER.withSpan(operationSpan)) { rpcTimerContext = rpc.getRpcMetrics().timeRpc(); operationSpan.addAnnotation(Annotation.fromDescriptionAndAttributes("rpcStart", ImmutableMap.of("attempt", AttributeValue.longAttributeValue(failedCount)))); Metadata metadata = new Metadata(); metadata.merge(originalMetadata); callWrapper.setCallAndStart(rpc, getRpcCallOptions(), getRetryRequest(), this, metadata); } catch (Exception e) { setException(e); } }
this.originalMetadata = originalMetadata; this.completionFuture = new GrpcFuture<>(); String spanName = makeSpanName("Operation", rpc.getMethodDescriptor().getFullMethodName()); this.operationSpan = TRACER.spanBuilder(spanName).setRecordEvents(true).startSpan(); this.clock = clock; this.exponentialRetryAlgorithm = createRetryAlgorithm(clock); this.callWrapper = new CallController<>();
/** * <p>createBackoff.</p> * * @return a {@link ExponentialRetryAlgorithm} object. */ private ExponentialRetryAlgorithm createRetryAlgorithm(ApiClock clock) { long timeoutMs = retryOptions.getMaxElapsedBackoffMillis(); Deadline deadline = getOperationCallOptions().getDeadline(); if (deadline != null) { timeoutMs = deadline.timeRemaining(TimeUnit.MILLISECONDS); } RetrySettings retrySettings = RetrySettings.newBuilder() .setJittered(true) // How long should the sleep be between RPC failure and the next RPC retry? .setInitialRetryDelay(toDuration(retryOptions.getInitialBackoffMillis())) // How fast should the retry delay increase? .setRetryDelayMultiplier(retryOptions.getBackoffMultiplier()) // What is the maximum amount of sleep time between retries? // There needs to be some sane number for max retry delay, and it's unclear what that // number ought to be. 1 Minute time was chosen because some number is needed. .setMaxRetryDelay(Duration.of(1, ChronoUnit.MINUTES)) // How long should we wait before giving up retries after the first failure? .setTotalTimeout(toDuration(timeoutMs)) .build(); return new ExponentialRetryAlgorithm(retrySettings, clock); }
@Override protected void finalizeStats(Status status) { // Add an annotation for the total number of rows that were returned across all responses. operationSpan.addAnnotation("Total Rows Processed", ImmutableMap.of("rowCount", AttributeValue.longAttributeValue(totalRowsProcessed))); super.finalizeStats(status); }
/** * Cancels the RPC. */ public void cancel() { cancel("User requested cancelation."); }
protected boolean isRequestRetryable() { return rpc.isRetryable(getRetryRequest()); }
protected void performRetry(long nextBackOff) { operationSpan.addAnnotation("retryWithBackoff", ImmutableMap.of("backoff", AttributeValue.longAttributeValue(nextBackOff))); rpc.getRpcMetrics().markRetry(); retryExecutorService.schedule(getRunnable(), nextBackOff, TimeUnit.MILLISECONDS); }
protected BigtableRetriesExhaustedException getExhaustedRetriesException(Status status) { operationSpan.addAnnotation("exhaustedRetries"); rpc.getRpcMetrics().markRetriesExhasted(); finalizeStats(status); String message = String.format("Exhausted retries after %d failures.", failedCount); return new BigtableRetriesExhaustedException(message, status.asRuntimeException()); }