private <ReqT, RespT> RetryingUnaryOperation<ReqT, RespT> createUnaryListener( ReqT request, BigtableAsyncRpc<ReqT, RespT> rpc, String tableName) { CallOptions callOptions = getCallOptions(rpc.getMethodDescriptor(), request); Metadata metadata = createMetadata(tableName); return new RetryingUnaryOperation<>( retryOptions, request, rpc, callOptions, retryExecutorService, metadata, CLOCK); }
/** * Initial execution of the RPC. */ public ListenableFuture<ResultT> getAsyncResult() { Preconditions.checkState(operationTimerContext == null); operationTimerContext = rpc.getRpcMetrics().timeOperation(); run(); return completionFuture; }
protected boolean isRequestRetryable() { return rpc.isRetryable(getRetryRequest()); }
synchronized void setCallAndStart(BigtableAsyncRpc<RequestT, ResponseT> rpc, CallOptions callOptions, RequestT request, ClientCall.Listener<ResponseT> listener, Metadata metadata) { // There's a subtle race condition in RetryingStreamOperation which requires a separate // newCall/start split. The call variable needs to be set before onMessage() happens; that // usually will occur, but some unit tests broke with a merged newCall and start. this.call = rpc.newCall(callOptions); rpc.start(request, listener, metadata, call); }
protected void performRetry(long nextBackOff) { operationSpan.addAnnotation("retryWithBackoff", ImmutableMap.of("backoff", AttributeValue.longAttributeValue(nextBackOff))); rpc.getRpcMetrics().markRetry(); retryExecutorService.schedule(getRunnable(), nextBackOff, TimeUnit.MILLISECONDS); }
private <ReqT, RespT> RetryingStreamOperation<ReqT, RespT> createStreamingListener( ReqT request, BigtableAsyncRpc<ReqT, RespT> rpc, String tableName) { CallOptions callOptions = getCallOptions(rpc.getMethodDescriptor(), request); Metadata metadata = createMetadata(tableName); return new RetryingStreamOperation<>( retryOptions, request, rpc, callOptions, retryExecutorService, metadata, CLOCK); }
protected BigtableRetriesExhaustedException getExhaustedRetriesException(Status status) { operationSpan.addAnnotation("exhaustedRetries"); rpc.getRpcMetrics().markRetriesExhasted(); finalizeStats(status); String message = String.format("Exhausted retries after %d failures.", failedCount); return new BigtableRetriesExhaustedException(message, status.asRuntimeException()); }
private RetryingReadRowsOperation createReadRowsRetryListener(ReadRowsRequest request, StreamObserver<FlatRow> observer) { return new RetryingReadRowsOperation( observer, retryOptions, request, readRowsAsync, getCallOptions(readRowsAsync.getMethodDescriptor(), request), retryExecutorService, createMetadata(request.getTableName()), CLOCK); }
/** * Calls {@link BigtableAsyncRpc#newCall(CallOptions)} and * {@link BigtableAsyncRpc#start(Object, io.grpc.ClientCall.Listener, Metadata, ClientCall)} } * with this as the listener so that retries happen correctly. */ protected void run() { try (Scope scope = TRACER.withSpan(operationSpan)) { rpcTimerContext = rpc.getRpcMetrics().timeRpc(); operationSpan.addAnnotation(Annotation.fromDescriptionAndAttributes("rpcStart", ImmutableMap.of("attempt", AttributeValue.longAttributeValue(failedCount)))); Metadata metadata = new Metadata(); metadata.merge(originalMetadata); callWrapper.setCallAndStart(rpc, getRpcCallOptions(), getRetryRequest(), this, metadata); } catch (Exception e) { setException(e); } }
private RetryingMutateRowsOperation createMutateRowsOperation(MutateRowsRequest request) { if (shouldOverrideAppProfile(request.getAppProfileId())) { request = request.toBuilder().setAppProfileId(clientDefaultAppProfileId).build(); } CallOptions callOptions = getCallOptions(mutateRowsRpc.getMethodDescriptor(), request); Metadata metadata = createMetadata(request.getTableName()); return new RetryingMutateRowsOperation(retryOptions, request, mutateRowsRpc, callOptions, retryExecutorService, metadata, CLOCK); }
@Override protected boolean onOK(Metadata trailers) { ProcessingStatus status = requestManager.onOK(); if (status == ProcessingStatus.INVALID) { // Set an exception. onError(INVALID_RESPONSE, trailers); return true; } // There was a problem in the data found in onMessage(), so fail the RPC. if (status == ProcessingStatus.SUCCESS || status == ProcessingStatus.NOT_RETRYABLE) { // Set the response, with either success, or non-retryable responses. completionFuture.set(Arrays.asList(requestManager.buildResponse())); return true; } // Perform a partial retry, if the backoff policy allows it. Long nextBackOff = getNextBackoff(); if (nextBackOff == null) { // Return the response as is, and don't retry; rpc.getRpcMetrics().markRetriesExhasted(); completionFuture.set(Arrays.asList(requestManager.buildResponse())); operationSpan.addAnnotation("MutationCount", ImmutableMap.of("failureCount", AttributeValue.longAttributeValue(requestManager.getRetryRequest().getEntriesCount()))); return true; } performRetry(nextBackOff); operationSpan.addAnnotation("MutationCount", ImmutableMap.of("retryCount", AttributeValue.longAttributeValue(requestManager.getRetryRequest().getEntriesCount()))); return false; }
this.originalMetadata = originalMetadata; this.completionFuture = new GrpcFuture<>(); String spanName = makeSpanName("Operation", rpc.getMethodDescriptor().getFullMethodName()); this.operationSpan = TRACER.spanBuilder(spanName).setRecordEvents(true).startSpan(); this.clock = clock;
/** * Special retry handling for watchdog timeouts, which uses its own fail counter. * * @return true if a retry has been scheduled */ private void handleTimeoutError(Status status) { Preconditions.checkArgument(status.getCause() instanceof StreamWaitTimeoutException, "status is not caused by a StreamWaitTimeoutException"); StreamWaitTimeoutException e = ((StreamWaitTimeoutException) status.getCause()); // Cancel the existing rpc. rpcTimerContext.close(); failedCount++; // Can this request be retried int maxRetries = retryOptions.getMaxScanTimeoutRetries(); if (retryOptions.enableRetries() && ++timeoutRetryCount <= maxRetries) { LOG.warn("The client could not get a response in %d ms. Retrying the scan.", e.getWaitTimeMs()); resetStatusBasedBackoff(); performRetry(0); } else { LOG.warn("The client could not get a response after %d tries, giving up.", timeoutRetryCount); rpc.getRpcMetrics().markFailure(); finalizeStats(status); setException(getExhaustedRetriesException(status)); } }
LOG.error("Could not complete RPC. Failure #%d, got: %s on channel %s.\nTrailers: %s", status.getCause(), failedCount, status, channelId, trailers); rpc.getRpcMetrics().markFailure(); finalizeStats(status); setException(status.asRuntimeException());