private void processFailBulkResponse(BulkResponse bulkResponse, boolean hasParent) { for (BulkItemResponse response : bulkResponse.getItems()) { if (!response.isFailed()) { continue; } if (response.getFailure().getStatus() == RestStatus.NOT_FOUND) { logger.warn(response.getFailureMessage()); } else { logger.error("全量导入数据有误 {}", response.getFailureMessage()); throw new RuntimeException("全量数据 etl 异常: " + response.getFailureMessage()); } } }
@Override public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse) { if (!bulkItemResponse.isFailed()) { return null; } else { return bulkItemResponse.getFailure().getCause(); } }
/** * 批量提交 * * @param bulkRequestBuilder * @return */ private static boolean commitBulkRequest(BulkRequestBuilder bulkRequestBuilder) { if (bulkRequestBuilder.numberOfActions() > 0) { BulkResponse response = bulkRequestBuilder.execute().actionGet(); if (response.hasFailures()) { for (BulkItemResponse itemResponse : response.getItems()) { if (!itemResponse.isFailed()) { continue; } if (itemResponse.getFailure().getStatus() == RestStatus.NOT_FOUND) { logger.warn(itemResponse.getFailureMessage()); } else { logger.error("ES sync commit error: {}", itemResponse.getFailureMessage()); } } } return !response.hasFailures(); } return true; }
@Override public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse) { if (!bulkItemResponse.isFailed()) { return null; } else { return bulkItemResponse.getFailure().getCause(); } }
response.getType(), response.getIndex(), response.getFailureMessage() ); if(response.getFailure()!=null && response.getFailure().getStatus() == RestStatus.TOO_MANY_REQUESTS){ hasTooManyRequests =true;
@Override public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse) { if (!bulkItemResponse.isFailed()) { return null; } else { return bulkItemResponse.getFailure().getCause(); } }
for(BulkItemResponse response : bulkItemResponses.getItems()) { if(response.isFailed() && response.getFailure().getStatus() != RestStatus.NOT_FOUND) { log.error("Failed to execute ES query {}", response.getFailureMessage()); actualFailure = true;
@Nullable @Override public Throwable extractFailureCauseFromBulkItemResponse(BulkItemResponse bulkItemResponse) { if (bulkItemResponse.isFailed()) { return new Exception(bulkItemResponse.getFailure().getMessage()); } else { return null; } }
private void setupElasticsearchToFail() throws IOException { final String errorMessage = "error message"; final Exception cause = new Exception("test exception"); final boolean isFailed = true; final int itemID = 0; // define the item failure BulkItemResponse.Failure failure = mock(BulkItemResponse.Failure.class); when(failure.getCause()).thenReturn(cause); when(failure.getMessage()).thenReturn(errorMessage); // define the item level response BulkItemResponse itemResponse = mock(BulkItemResponse.class); when(itemResponse.isFailed()).thenReturn(isFailed); when(itemResponse.getItemId()).thenReturn(itemID); when(itemResponse.getFailure()).thenReturn(failure); when(itemResponse.getFailureMessage()).thenReturn("error message"); List<BulkItemResponse> itemsResponses = Collections.singletonList(itemResponse); // define the bulk response to indicate failure BulkResponse response = mock(BulkResponse.class); when(response.iterator()).thenReturn(itemsResponses.iterator()); when(response.hasFailures()).thenReturn(isFailed); // have the client return the mock response when(highLevelClient.bulk(any(BulkRequest.class))).thenReturn(response); }
if (item.isFailed()) { logger.warn("Failed to insert {} into Elasticsearch due to {}, transferring to failure", new Object[]{flowFile, item.getFailure().getMessage()}); session.transfer(flowFile, REL_FAILURE);
/** * Handles the {@link BulkResponse} received from Elasticsearch. * @param bulkResponse The response received from Elasticsearch. * @param documents The documents included in the bulk request. * @param results The writer results. */ private void handleBulkResponse(BulkResponse bulkResponse, List<Indexable> documents, BulkDocumentWriterResults<D> results) { if (bulkResponse.hasFailures()) { // interrogate the response to distinguish between those that succeeded and those that failed for(BulkItemResponse response: bulkResponse) { if(response.isFailed()) { // request failed D failed = getDocument(response.getItemId()); Exception cause = response.getFailure().getCause(); String message = response.getFailureMessage(); results.addFailure(failed, cause, message); } else { // request succeeded D success = getDocument(response.getItemId()); success.setDocumentID(response.getResponse().getId()); results.addSuccess(success); } } } else { // all requests succeeded for(Indexable success: documents) { results.addSuccess(success.document); } } }
if (responses[i].isFailed()) { logger.error("Failed to insert {} into Elasticsearch due to {}, transferring to failure", new Object[]{flowFile, responses[i].getFailure().getMessage()}); session.transfer(flowFile, REL_FAILURE);
private static boolean isAborted(BulkItemResponse response) { return response != null && response.isFailed() && response.getFailure().isAborted(); }
/** * Abort this request, and store a {@link org.elasticsearch.action.bulk.BulkItemResponse.Failure} response. * * @param index The concrete index that was resolved for this request * @param cause The cause of the rejection (may not be null) * @throws IllegalStateException If a response already exists for this request */ public void abort(String index, Exception cause) { if (primaryResponse == null) { final BulkItemResponse.Failure failure = new BulkItemResponse.Failure(index, request.type(), request.id(), Objects.requireNonNull(cause), true); setPrimaryResponse(new BulkItemResponse(id, request.opType(), failure)); } else { assert primaryResponse.isFailed() && primaryResponse.getFailure().isAborted() : "response [" + Strings.toString(primaryResponse) + "]; cause [" + cause + "]"; if (primaryResponse.isFailed() && primaryResponse.getFailure().isAborted()) { primaryResponse.getFailure().getCause().addSuppressed(cause); } else { throw new IllegalStateException( "aborting item that with response [" + primaryResponse + "] that was previously processed", cause); } } }
public static <Response extends ReplicationResponse & WriteResponse> ActionListener<BulkResponse> wrapBulkResponse(ActionListener<Response> listener) { return ActionListener.wrap(bulkItemResponses -> { assert bulkItemResponses.getItems().length == 1 : "expected only one item in bulk request"; BulkItemResponse bulkItemResponse = bulkItemResponses.getItems()[0]; if (bulkItemResponse.isFailed() == false) { final DocWriteResponse response = bulkItemResponse.getResponse(); listener.onResponse((Response) response); } else { listener.onFailure(bulkItemResponse.getFailure().getCause()); } }, listener::onFailure); }
/** * Determines whether a bulk item request should be executed on the replica. * * @return {@link ReplicaItemExecutionMode#NORMAL} upon normal primary execution with no failures * {@link ReplicaItemExecutionMode#FAILURE} upon primary execution failure after sequence no generation * {@link ReplicaItemExecutionMode#NOOP} upon primary execution failure before sequence no generation or * when primary execution resulted in noop (only possible for write requests from pre-6.0 nodes) */ static ReplicaItemExecutionMode replicaItemExecutionMode(final BulkItemRequest request, final int index) { final BulkItemResponse primaryResponse = request.getPrimaryResponse(); assert primaryResponse != null : "expected primary response to be set for item [" + index + "] request [" + request.request() + "]"; if (primaryResponse.isFailed()) { return primaryResponse.getFailure().getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO ? ReplicaItemExecutionMode.FAILURE // we have a seq no generated with the failure, replicate as no-op : ReplicaItemExecutionMode.NOOP; // no seq no generated, ignore replication } else { // TODO: once we know for sure that every operation that has been processed on the primary is assigned a seq# // (i.e., all nodes on the cluster are on v6.0.0 or higher) we can use the existence of a seq# to indicate whether // an operation should be processed or be treated as a noop. This means we could remove this method and the // ReplicaItemExecutionMode enum and have a simple boolean check for seq != UNASSIGNED_SEQ_NO which will work for // both failures and indexing operations. return primaryResponse.getResponse().getResult() != DocWriteResponse.Result.NOOP ? ReplicaItemExecutionMode.NORMAL // execution successful on primary : ReplicaItemExecutionMode.NOOP; // ignore replication } }
public static Translog.Location performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception { Translog.Location location = null; for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; final Engine.Result operationResult; DocWriteRequest docWriteRequest = item.request(); switch (replicaItemExecutionMode(item, i)) { case NORMAL: final DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse(); operationResult = performOpOnReplica(primaryResponse, docWriteRequest, replica); assert operationResult != null : "operation result must never be null when primary response has no failure"; location = syncOperationResultOrThrow(operationResult, location); break; case NOOP: break; case FAILURE: final BulkItemResponse.Failure failure = item.getPrimaryResponse().getFailure(); assert failure.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO : "seq no must be assigned"; operationResult = replica.markSeqNoAsNoop(failure.getSeqNo(), failure.getMessage()); assert operationResult != null : "operation result must never be null when primary response has no failure"; location = syncOperationResultOrThrow(operationResult, location); break; default: throw new IllegalStateException("illegal replica item execution mode for: " + docWriteRequest); } } return location; }
@Override protected WritePrimaryResult<Request, Response> shardOperationOnPrimary( Request request, final IndexShard primary) throws Exception { BulkItemRequest[] itemRequests = new BulkItemRequest[1]; WriteRequest.RefreshPolicy refreshPolicy = request.getRefreshPolicy(); request.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE); itemRequests[0] = new BulkItemRequest(0, ((DocWriteRequest) request)); BulkShardRequest bulkShardRequest = new BulkShardRequest(request.shardId(), refreshPolicy, itemRequests); WritePrimaryResult<BulkShardRequest, BulkShardResponse> bulkResult = shardBulkAction.shardOperationOnPrimary(bulkShardRequest, primary); assert bulkResult.finalResponseIfSuccessful.getResponses().length == 1 : "expected only one bulk shard response"; BulkItemResponse itemResponse = bulkResult.finalResponseIfSuccessful.getResponses()[0]; final Response response; final Exception failure; if (itemResponse.isFailed()) { failure = itemResponse.getFailure().getCause(); response = null; } else { response = (Response) itemResponse.getResponse(); failure = null; } return new WritePrimaryResult<>(request, response, bulkResult.location, failure, primary, logger); }
processUpdateResponse(updateRequest, context.getConcreteIndex(), executionResult, updateResult)); } else if (executionResult.isFailed()) { final Exception failure = executionResult.getFailure().getCause(); final DocWriteRequest docWriteRequest = context.getCurrent(); if (TransportShardBulkAction.isConflictException(failure)) {
DocWriteResponse.Result translatedResult = translate.getResponseResult(); if (operationResponse.isFailed()) { response = new BulkItemResponse(operationResponse.getItemId(), DocWriteRequest.OpType.UPDATE, operationResponse.getFailure()); } else { final UpdateResponse updateResponse;