private boolean isOverTheLimit() { if (bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) { return true; } if (bulkSize != -1 && bulkRequest.estimatedSizeInBytes() >= bulkSize) { return true; } return false; }
private boolean isOverTheLimit() { if (bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) { return true; } if (bulkSize != -1 && bulkRequest.estimatedSizeInBytes() >= bulkSize) { return true; } return false; }
private boolean isOverTheLimit() { if (bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) { return true; } if (bulkSize != -1 && bulkRequest.estimatedSizeInBytes() >= bulkSize) { return true; } return false; }
private boolean isOverTheLimit() { if (bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) { return true; } if (bulkSize != -1 && bulkRequest.estimatedSizeInBytes() >= bulkSize) { return true; } return false; }
private boolean isOverTheLimit() { if (bulkActions != -1 && bulkRequest.numberOfActions() >= bulkActions) { return true; } if (bulkSize != -1 && bulkRequest.estimatedSizeInBytes() >= bulkSize) { return true; } return false; }
@Override public void beforeBulk(final long executionId, final BulkRequest request) { inflightRequestCount.getAndAdd(request.numberOfActions()); if (log.isDebugEnabled()) { log.debug("index update starting, executionId: {}, request count: {}, request size (bytes): {}", executionId, request.numberOfActions(), request.estimatedSizeInBytes() ); } }
@Override public void afterBulk(final long executionId, final BulkRequest request, final Throwable failure) { inflightRequestCount.getAndAdd(-request.numberOfActions()); log.error("index update failure, executionId: {}, request count: {}, request size (bytes): {}; " + "this may indicate that not enough CPU is available to effectively index repository content", executionId, request.numberOfActions(), request.estimatedSizeInBytes(), failure); } }
@Override public void afterBulk(final long executionId, final BulkRequest request, final BulkResponse response) { inflightRequestCount.getAndAdd(-request.numberOfActions()); if (log.isDebugEnabled()) { log.debug("index update success, executionId: {}, request count: {}, request size (bytes): {}; " + "response took: {}, response hasFailures: {}", executionId, request.numberOfActions(), request.estimatedSizeInBytes(), response.getTook(), response.hasFailures()); } }
@Override public void beforeBulk(long executionId, BulkRequest request) { if (logger.isTraceEnabled()) { logger.trace("[{}] executing [{}]/[{}]", executionId, request.numberOfActions(), new ByteSizeValue(request.estimatedSizeInBytes())); } }
/** * Send a bulk request, handling retries. */ void sendBulkRequest(TimeValue thisBatchStartTime, BulkRequest request) { if (logger.isDebugEnabled()) { logger.debug("[{}]: sending [{}] entry, [{}] bulk request", task.getId(), request.requests().size(), new ByteSizeValue(request.estimatedSizeInBytes())); } if (task.isCancelled()) { logger.debug("[{}]: finishing early because the task was cancelled", task.getId()); finishHim(null); return; } bulkRetry.withBackoff(client::bulk, request, new ActionListener<BulkResponse>() { @Override public void onResponse(BulkResponse response) { onBulkResponse(thisBatchStartTime, response); } @Override public void onFailure(Exception e) { finishHim(e); } }); }
protected void append(IndexRequest indexRequest) { if (bulkRequest.estimatedSizeInBytes() + indexRequest.source().length() > MAX_RECORD_SIZE) { if (bulkRequest.numberOfActions() > 0) { // Create multiple elastic bulk requests when we exceed the record size bulkRequests.add(bulkRequest); bulkRequest = new BulkRequest(); } if (indexRequest.source().length() > MAX_RECORD_SIZE) { log.warn(String.format("Indexing request for doc: %s, is too large: %d, max record size: %d", indexRequest.id(), indexRequest.source().length(), MAX_RECORD_SIZE)); } } bulkRequest.add(indexRequest); }
if (bulkRequest.numberOfActions() >= MAX_BATCH_COUNT || bulkRequest.estimatedSizeInBytes() > MAX_BATCH_SIZE) { LOGGER.debug("adding elements... %d (est size %d)", bulkRequest.numberOfActions(), bulkRequest.estimatedSizeInBytes()); totalCount += bulkRequest.numberOfActions(); doBulkRequest(bulkRequest); LOGGER.debug("adding elements... %d (est size %d)", bulkRequest.numberOfActions(), bulkRequest.estimatedSizeInBytes()); totalCount += bulkRequest.numberOfActions(); doBulkRequest(bulkRequest);
if (bulkRequest.numberOfActions() >= MAX_BATCH_COUNT || bulkRequest.estimatedSizeInBytes() > MAX_BATCH_SIZE) { LOGGER.debug("adding elements... %d (est size %d)", bulkRequest.numberOfActions(), bulkRequest.estimatedSizeInBytes()); totalCount += bulkRequest.numberOfActions(); doBulkRequest(bulkRequest); LOGGER.debug("adding elements... %d (est size %d)", bulkRequest.numberOfActions(), bulkRequest.estimatedSizeInBytes()); totalCount += bulkRequest.numberOfActions(); doBulkRequest(bulkRequest);
if (logger.isDebugEnabled()) { logger.debug("sending [{}] entry, [{}] bulk request", request.requests().size(), new ByteSizeValue(request.estimatedSizeInBytes()));
if (logger.isDebugEnabled()) { logger.debug("sending [{}] entry, [{}] bulk request", request.requests().size(), new ByteSizeValue(request.estimatedSizeInBytes()));
@Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { if (logger.isTraceEnabled()) { logger.trace("[{}] executed [{}]/[{}], took [{}]", executionId, request.numberOfActions(), new ByteSizeValue(request.estimatedSizeInBytes()), response.getTook()); } if (response.hasFailures()) { logger.warn("[{}] failed to execute bulk request: {}", executionId, response.buildFailureMessage()); } }