/** * Initial execution of the RPC. */ public ListenableFuture<ResultT> getAsyncResult() { Preconditions.checkState(operationTimerContext == null); operationTimerContext = rpc.getRpcMetrics().timeOperation(); run(); return completionFuture; }
protected void performRetry(long nextBackOff) { operationSpan.addAnnotation("retryWithBackoff", ImmutableMap.of("backoff", AttributeValue.longAttributeValue(nextBackOff))); rpc.getRpcMetrics().markRetry(); retryExecutorService.schedule(getRunnable(), nextBackOff, TimeUnit.MILLISECONDS); }
protected BigtableRetriesExhaustedException getExhaustedRetriesException(Status status) { operationSpan.addAnnotation("exhaustedRetries"); rpc.getRpcMetrics().markRetriesExhasted(); finalizeStats(status); String message = String.format("Exhausted retries after %d failures.", failedCount); return new BigtableRetriesExhaustedException(message, status.asRuntimeException()); }
/** * Calls {@link BigtableAsyncRpc#newCall(CallOptions)} and * {@link BigtableAsyncRpc#start(Object, io.grpc.ClientCall.Listener, Metadata, ClientCall)} } * with this as the listener so that retries happen correctly. */ protected void run() { try (Scope scope = TRACER.withSpan(operationSpan)) { rpcTimerContext = rpc.getRpcMetrics().timeRpc(); operationSpan.addAnnotation(Annotation.fromDescriptionAndAttributes("rpcStart", ImmutableMap.of("attempt", AttributeValue.longAttributeValue(failedCount)))); Metadata metadata = new Metadata(); metadata.merge(originalMetadata); callWrapper.setCallAndStart(rpc, getRpcCallOptions(), getRetryRequest(), this, metadata); } catch (Exception e) { setException(e); } }
@Override protected boolean onOK(Metadata trailers) { ProcessingStatus status = requestManager.onOK(); if (status == ProcessingStatus.INVALID) { // Set an exception. onError(INVALID_RESPONSE, trailers); return true; } // There was a problem in the data found in onMessage(), so fail the RPC. if (status == ProcessingStatus.SUCCESS || status == ProcessingStatus.NOT_RETRYABLE) { // Set the response, with either success, or non-retryable responses. completionFuture.set(Arrays.asList(requestManager.buildResponse())); return true; } // Perform a partial retry, if the backoff policy allows it. Long nextBackOff = getNextBackoff(); if (nextBackOff == null) { // Return the response as is, and don't retry; rpc.getRpcMetrics().markRetriesExhasted(); completionFuture.set(Arrays.asList(requestManager.buildResponse())); operationSpan.addAnnotation("MutationCount", ImmutableMap.of("failureCount", AttributeValue.longAttributeValue(requestManager.getRetryRequest().getEntriesCount()))); return true; } performRetry(nextBackOff); operationSpan.addAnnotation("MutationCount", ImmutableMap.of("retryCount", AttributeValue.longAttributeValue(requestManager.getRetryRequest().getEntriesCount()))); return false; }
/** * Special retry handling for watchdog timeouts, which uses its own fail counter. * * @return true if a retry has been scheduled */ private void handleTimeoutError(Status status) { Preconditions.checkArgument(status.getCause() instanceof StreamWaitTimeoutException, "status is not caused by a StreamWaitTimeoutException"); StreamWaitTimeoutException e = ((StreamWaitTimeoutException) status.getCause()); // Cancel the existing rpc. rpcTimerContext.close(); failedCount++; // Can this request be retried int maxRetries = retryOptions.getMaxScanTimeoutRetries(); if (retryOptions.enableRetries() && ++timeoutRetryCount <= maxRetries) { LOG.warn("The client could not get a response in %d ms. Retrying the scan.", e.getWaitTimeMs()); resetStatusBasedBackoff(); performRetry(0); } else { LOG.warn("The client could not get a response after %d tries, giving up.", timeoutRetryCount); rpc.getRpcMetrics().markFailure(); finalizeStats(status); setException(getExhaustedRetriesException(status)); } }
LOG.error("Could not complete RPC. Failure #%d, got: %s on channel %s.\nTrailers: %s", status.getCause(), failedCount, status, channelId, trailers); rpc.getRpcMetrics().markFailure(); finalizeStats(status); setException(status.asRuntimeException());