@Override public void execute() throws Exception { try { BulkResponse bulkResponse = bulkRequestBuilder.execute().actionGet(); if (bulkResponse.hasFailures()) { throw new EventDeliveryException(bulkResponse.buildFailureMessage()); } } finally { bulkRequestBuilder = client.prepareBulk(); } }
@Override public void mutate(Map<String, Map<String, IndexMutation>> mutations, KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException { BulkRequestBuilder brb = client.prepareBulk(); if (mutation.isDeleted()) { log.trace("Deleting entire document {}", docid); brb.add(new DeleteRequest(indexName, storename, docid)); } else { String script = getDeletionScript(informations, storename, mutation); brb.add(client.prepareUpdate(indexName, storename, docid).setScript(script, ScriptService.ScriptType.INLINE)); log.trace("Adding script {}", script); brb.add(new IndexRequest(indexName, storename, docid) .source(getNewDocument(mutation.getAdditions(), informations.get(storename), ttl))); UpdateRequestBuilder update = client.prepareUpdate(indexName, storename, docid).setScript(script, ScriptService.ScriptType.INLINE); if (needUpsert) { XContentBuilder doc = getNewDocument(mutation.getAdditions(), informations.get(storename), ttl); BulkResponse bulkItemResponses = brb.execute().actionGet(); if (bulkItemResponses.hasFailures()) { boolean actualFailure = false; for(BulkItemResponse response : bulkItemResponses.getItems()) { if(response.isFailed() && response.getFailure().getStatus() != RestStatus.NOT_FOUND) { log.error("Failed to execute ES query {}", response.getFailureMessage()); actualFailure = true; throw new Exception(bulkItemResponses.buildFailureMessage());
@Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { if (response.hasFailures()) { logger.error("Bulk processor failed. {}", response.buildFailureMessage()); MongoDBRiverHelper.setRiverStatus(client, definition.getRiverName(), Status.IMPORT_FAILED); request.requests().clear(); bulkProcessor.close(); river.close(); } else { documentCount.addAndGet(response.getItems().length); logStatistics(response.getTookInMillis()); deletedDocuments.set(0); updatedDocuments.set(0); insertedDocuments.set(0); if (logger.isTraceEnabled()) { logger.trace("afterBulk - bulk [{}] success [{} items] [{} ms] total [{}]", executionId, response.getItems().length, response.getTookInMillis(), documentCount.get()); } } } };
private void checkForBulkUpdateFailure(BulkResponse bulkResponse) { if (bulkResponse.hasFailures()) { Map<String, String> failedDocuments = new HashMap<>(); for (BulkItemResponse item : bulkResponse.getItems()) { if (item.isFailed()) failedDocuments.put(item.getId(), item.getFailureMessage()); } throw new ElasticsearchException( "Bulk indexing has failures. Use ElasticsearchException.getFailedDocuments() for detailed messages [" + failedDocuments + "]", failedDocuments); } }
/** * 批量提交 * * @param bulkRequestBuilder * @return */ private static boolean commitBulkRequest(BulkRequestBuilder bulkRequestBuilder) { if (bulkRequestBuilder.numberOfActions() > 0) { BulkResponse response = bulkRequestBuilder.execute().actionGet(); if (response.hasFailures()) { for (BulkItemResponse itemResponse : response.getItems()) { if (!itemResponse.isFailed()) { continue; } if (itemResponse.getFailure().getStatus() == RestStatus.NOT_FOUND) { logger.warn(itemResponse.getFailureMessage()); } else { logger.error("ES sync commit error: {}", itemResponse.getFailureMessage()); } } } return !response.hasFailures(); } return true; }
/** * Permanently delete index documents. * * @param type index type * @param ids entity ids */ private void hardDeleteDoc(final String type, final List<String> ids) { try { RETRY_ES_PUBLISH.call(() -> { final BulkRequestBuilder bulkRequest = client.prepareBulk(); ids.forEach(id -> bulkRequest.add(client.prepareDelete(esIndex, type, id))); final BulkResponse bulkResponse = bulkRequest.execute().actionGet(esBulkCallTimeout); log.info("Deleting metadata of type {} with count {}", type, ids.size()); if (bulkResponse.hasFailures()) { for (BulkItemResponse item : bulkResponse.getItems()) { if (item.isFailed()) { handleException("ElasticSearchUtil.bulkDelete.item", type, item.getId(), item.getFailure().getCause(), Metrics.CounterElasticSearchDelete.getMetricName()); } } } return null; }); } catch (Exception e) { handleException("ElasticSearchUtil.bulkDelete", type, ids, e, Metrics.CounterElasticSearchBulkDelete.getMetricName()); } }
@Override public Void execute(final Client client) throws ElasticsearchException { BulkRequestBuilder bulkRequestBuilder = null; try { if ((bulks != null) && (!bulks.isEmpty())) { bulkRequestBuilder = client.prepareBulk(); for (JSONProvider jsonProvider : bulks) { byte[] content = jsonProvider.toJson().getBytes("UTF-8"); bulkRequestBuilder.add(content, 0, content.length, true); } } CreateIndexResponse response = client.admin().indices().create(request).get(); if ((response.isAcknowledged()) && (bulkRequestBuilder != null)) { BulkResponse bulkResponse = bulkRequestBuilder.setRefresh(true).execute().actionGet(); if (bulkResponse.hasFailures()) { throw new EsSetupRuntimeException("Bulk request has failures: "+bulkResponse.buildFailureMessage()); } } } catch (Exception e) { throw new EsSetupRuntimeException(e); } return null; }
BulkResponse response = client.prepareBulk() .add(buffer, 0, buffer.length, true, elasticsearchBulkRequest.defaultIndexName(), elasticsearchBulkRequest.defaultTypeName()) .setRefresh(true) .execute() .actionGet(); response.getTookInMillis(), response.hasFailures() ? "" : "no ")); } finally { try {
/** * This will flush all the tuples from queue to ElasticSearch. * It uses BulkRequestBuilder API for sending batch. */ private void processBatch() { BulkRequestBuilder bulkRequestBuilder = new BulkRequestBuilder(store.client); while (!tupleBatch.isEmpty()) { T tuple = tupleBatch.remove(); IndexRequestBuilder indexRequestBuilder = getIndexRequestBuilder(tuple); bulkRequestBuilder.add(indexRequestBuilder); } BulkResponse bulkResponse = bulkRequestBuilder.execute().actionGet(); if (bulkResponse.hasFailures()) { DTThrowable.rethrow(new Exception(bulkResponse.buildFailureMessage())); } }
BulkRequestBuilder bulkBuilder = support.client().prepareBulk(); if( bulkBuilder.numberOfActions() > 0 ) BulkResponse bulkResponse = bulkBuilder.execute().actionGet(); if( bulkResponse.hasFailures() ) throw new ElasticSearchIndexingException( bulkResponse.buildFailureMessage() ); LOGGER.debug( "Indexing changed Entity states took {}ms", bulkResponse.getTookInMillis() ); support.client().admin().indices().prepareRefresh( support.index() ).execute().actionGet();
final BulkRequestBuilder bulk = esClient.get().prepareBulk(); if (bulk.numberOfActions() > 0) { final BulkResponse response = bulk.execute().actionGet(); if (response.hasFailures()) { BulkItemResponse[] responses = response.getItems(); if (responses != null && responses.length > 0) { for (int i = responses.length - 1; i >= 0; i--) { final BulkItemResponse item = responses[i]; final FlowFile flowFile = flowFilesToTransfer.get(item.getItemId()); if (item.isFailed()) { logger.warn("Failed to insert {} into Elasticsearch due to {}, transferring to failure", new Object[]{flowFile, item.getFailure().getMessage()}); session.transfer(flowFile, REL_FAILURE); session.getProvenanceReporter().send(flowFile, response.remoteAddress().getAddress()); session.transfer(flowFile, REL_SUCCESS); session.getProvenanceReporter().send(ff, response.remoteAddress().getAddress()); session.transfer(ff, REL_SUCCESS);
BulkRequestBuilder bulkRequest = elasticsearchClient.prepareBulk(); for (BulkWriteEntry be: jsonMapList) { if (be.getId() == null) continue; bulkRequest.add( elasticsearchClient.prepareIndex(indexName, be.getType(), be.getId()).setSource(be.getJsonMap()) .setVersion(be.getVersion() == null ? 1 : be.getVersion().longValue()) .setVersionType(be.getVersion() == null ? VersionType.FORCE : VersionType.EXTERNAL)); BulkResponse bulkResponse = bulkRequest.get(); BulkWriteResult result = new BulkWriteResult(); for (BulkItemResponse r: bulkResponse.getItems()) { String id = r.getId(); ActionWriteResponse response = r.getResponse(); if (response instanceof IndexResponse) { if (((IndexResponse) response).isCreated()) result.getCreated().add(id); String err = r.getFailureMessage(); if (err != null) { result.getErrors().put(id, err);
final BulkRequestBuilder bulk = esClient.get().prepareBulk(); if (authToken != null) { bulk.putHeader("Authorization", authToken); final BulkResponse response = bulk.execute().actionGet(); if (response.hasFailures()) { BulkItemResponse[] responses = response.getItems(); if (responses != null && responses.length > 0) { for (int i = responses.length - 1; i >= 0; i--) { final FlowFile flowFile = flowFilesToTransfer.get(i); if (responses[i].isFailed()) { logger.error("Failed to insert {} into Elasticsearch due to {}, transferring to failure", new Object[]{flowFile, responses[i].getFailure().getMessage()}); session.transfer(flowFile, REL_FAILURE); session.getProvenanceReporter().send(flowFile, context.getProperty(HOSTS).evaluateAttributeExpressions().getValue() + "/" + responses[i].getIndex()); session.transfer(flowFile, REL_SUCCESS);
public static void populateELServer( String index, String type, String dataFile ) throws Exception { String data = getFileAsString( dataFile ); BulkRequestBuilder bulkRequestBuilder = client.prepareBulk().setRefresh(true); String textStr[] = data.split("\\r\\n|\\n|\\r"); for ( String line : textStr ) { bulkRequestBuilder.add(client.prepareIndex( index, type ).setSource( line )); } BulkResponse response = bulkRequestBuilder.execute().actionGet(); if ( response.hasFailures() ) { throw new RuntimeException( "Error when performing test index's data bulk operation. " + "Index=[" + index + "]" ); } }
@Override public List<T> saveAll(final List<T> entities) { if(entities.isEmpty()) { return entities; } final BulkRequestBuilder bulk = client .prepareBulk() .setRefreshPolicy(policy.get()); for(final T entity : entities) { final String source = serializer.apply(entity); final IndexRequestBuilder request = client .prepareIndex(index, type) .setSource(source, JSON); ofNullable(emptyToNull(entity.getId())).ifPresent(request::setId); bulk.add(request); } final BulkResponse response = bulk.execute().actionGet(); final BulkItemResponse[] items = response.getItems(); final ImmutableList.Builder<T> saved = ImmutableList.builder(); for(int i=0; i<items.length; i++) { final BulkItemResponse item = items[i]; @SuppressWarnings("unchecked") final T entity = (T) entities.get(i).withId(item.getId()); saved.add(entity); } return saved.build(); }
public void putDocuments(IndexType indexType, Map<String, Object>... docs) { try { BulkRequestBuilder bulk = SHARED_NODE.client().prepareBulk() .setRefreshPolicy(REFRESH_IMMEDIATE); for (Map<String, Object> doc : docs) { bulk.add(new IndexRequest(indexType.getIndex(), indexType.getType()) .source(doc)); } BulkResponse bulkResponse = bulk.get(); if (bulkResponse.hasFailures()) { throw new IllegalStateException(bulkResponse.buildFailureMessage()); } } catch (Exception e) { throw Throwables.propagate(e); } }
@Override public List<String> deleteAllIds(final Collection<String> ids) { if (ids.isEmpty()) { return ImmutableList.of(); } final BulkRequestBuilder bulk = client .prepareBulk() .setRefreshPolicy(policy.get()); for (final String id : ids) { bulk.add(client.prepareDelete(index, type, id)); } final BulkResponse response = bulk.execute().actionGet(); final ImmutableList.Builder<String> builder = ImmutableList.builder(); for (final BulkItemResponse item : response.getItems()) { builder.add(item.getId()); } return builder.build(); }
protected void executeBulkRequest(BulkRequestBuilder bulkRequest) { if (bulkRequest.numberOfActions() == 0) return; BulkResponse bulkResponse = bulkRequest.execute().actionGet(); if (!bulkResponse.hasFailures()) return; for (BulkItemResponse response : bulkResponse) { if (!response.isFailed()) continue; LOG.warning(String.format("Unable to save Entity %s in %s/%s, cause: %s", response.getId(), response.getIndex(), response.getType(), response.getFailureMessage())); } }
@Override public void bulkIndex(List<IndexQuery> queries) { BulkRequestBuilder bulkRequest = client.prepareBulk(); for(IndexQuery query : queries){ bulkRequest.add(prepareIndex(query)); } BulkResponse bulkResponse = bulkRequest.execute().actionGet(); if (bulkResponse.hasFailures()) { Map<String, String> failedDocuments = new HashMap<String, String>(); for (BulkItemResponse item : bulkResponse.items()) { if (item.failed()) failedDocuments.put(item.getId(), item.failureMessage()); } throw new ElasticsearchException("Bulk indexing has failures. Use ElasticsearchException.getFailedDocuments() for detailed messages [" + failedDocuments+"]", failedDocuments); } }
/** * * @return returns true if all rules were addded. False indicates one or more rules have failed. */ public boolean writePercolateRules() { if (this.numOfPercolateRules() < 0) { throw new RuntimeException("No Rules Have been added!"); } BulkResponse response = this.bulkBuilder.execute().actionGet(); for (BulkItemResponse r : response.getItems()) { if (r.isFailed()) { LOGGER.error(r.getId() + "\t" + r.getFailureMessage()); } } return !response.hasFailures(); }