if (data.length() > 0) { int totalDocs = data.entries(); int docsSent = 0; int docsSkipped = 0; stats.docsRetried += data.entries(); stats.bytesRetried += data.length(); stats.bulkRetries++; stats.bulkRetriesTotalTime += bar.getTimeSpent(); stats.bytesAccepted += data.length(); stats.docsAccepted += data.entries(); retryOperation = false; bulkResult = BulkResponse.complete(bar.getResponseCode(), totalTime, totalDocs, totalDocs, 0); stats.bytesAccepted += data.length(trackingBytesPosition); stats.docsAccepted += 1; docsSent += 1; data.remove(trackingBytesPosition); } else { BytesArray document = data.entry(trackingBytesPosition); data.remove(trackingBytesPosition); data.copyFrom(newEntry); data.remove(trackingBytesPosition); docsSkipped += 1;
public void copyFrom(BytesArray from) { addEntry(from.size); from.copyTo(data); }
@Test public void testRemoveSize() throws Exception { assertEquals(0, data.length()); data.copyFrom(new BytesArray("a")); data.copyFrom(new BytesArray("bb")); data.copyFrom(new BytesArray("ccc")); data.copyFrom(new BytesArray("dddd")); assertEquals(10, data.length()); data.remove(1); assertEquals(8, data.length()); data.remove(1); assertEquals(5, data.length()); }
@Test public void testWriteAfterRemoving() throws Exception { data.copyFrom(new BytesArray("a")); data.copyFrom(new BytesArray("bb")); data.copyFrom(new BytesArray("ccc")); data.remove(1); ByteArrayOutputStream out = new ByteArrayOutputStream(); data.writeTo(out); assertEquals("accc", out.toString()); }
@Test public void testAddArraySize() throws Exception { assertEquals(0, data.length()); data.copyFrom(new BytesArray("one")); assertEquals(3, data.length()); data.copyFrom(new BytesArray("two")); assertEquals(6, data.length()); data.copyFrom(new BytesArray("three")); assertEquals(11, data.length()); }
@Override public String toString() { StringBuilder sb = new StringBuilder((int) length()); for (Entry entry : entries) { sb.append(new String(data.bytes, entry.offset, entry.length, StringUtils.UTF_8)); } return sb.toString(); } }
@Test public void testPopData() throws Exception { assertEquals(0, data.length()); data.copyFrom(new BytesArray("a")); data.copyFrom(new BytesArray("bb")); data.copyFrom(new BytesArray("ccc")); data.copyFrom(new BytesArray("dddd")); assertEquals(10, data.length()); BytesArray entry = data.pop(); assertEquals(9, data.length()); assertEquals(1, entry.length()); entry = data.pop(); assertEquals(7, data.length()); assertEquals(2, entry.length()); } }
data.copyFrom(payload);
public BitSet bulk(Resource resource, TrackingBytesArray data) { Retry retry = retryPolicy.init(); int httpStatus = 0; boolean isRetry = false; do { // NB: dynamically get the stats since the transport can change long start = network.transportStats().netTotalTime; Response response = execute(PUT, resource.bulk(), data); long spent = network.transportStats().netTotalTime - start; stats.bulkTotal++; stats.docsSent += data.entries(); stats.bulkTotalTime += spent; // bytes will be counted by the transport layer if (isRetry) { stats.docsRetried += data.entries(); stats.bytesRetried += data.length(); stats.bulkRetries++; stats.bulkRetriesTotalTime += spent; } isRetry = true; httpStatus = (retryFailedEntries(response, data) ? HttpStatus.SERVICE_UNAVAILABLE : HttpStatus.OK); } while (data.length() > 0 && retry.retry(httpStatus)); return data.leftoversPosition(); }
@Before public void init() { data = new TrackingBytesArray(new BytesArray(256)); }
/** * Executes a single bulk operation against the provided resource, using the passed data as the request body. * This method will retry bulk requests if the entire bulk request fails, but will not retry singular * document failures. * * @param resource target of the bulk request. * @param data bulk request body. This body will be cleared of entries on any successful bulk request. * @return a BulkActionResponse object that will detail if there were failing documents that should be retried. */ public BulkActionResponse bulk(Resource resource, TrackingBytesArray data) { // NB: dynamically get the stats since the transport can change long start = network.transportStats().netTotalTime; Response response = execute(PUT, resource.bulk(), data); long spent = network.transportStats().netTotalTime - start; stats.bulkTotal++; stats.docsSent += data.entries(); stats.bulkTotalTime += spent; // bytes will be counted by the transport layer return new BulkActionResponse(parseBulkActionResponse(response), response.status(), spent); }
public void discard() { data.reset(); dataEntries = 0; }
/** * Logs flushing messages and performs backoff waiting if there is a wait time for retry. */ private void initFlushOperation(String bulkLoggingID, boolean retryOperation, long retriedDocs, long waitTime) { if (retryOperation) { if (waitTime > 0L) { debugLog(bulkLoggingID, "Retrying [%d] entries after backing off for [%s] ms", retriedDocs, TimeValue.timeValueMillis(waitTime)); try { Thread.sleep(waitTime); } catch (InterruptedException e) { debugLog(bulkLoggingID, "Thread interrupted - giving up on retrying..."); throw new EsHadoopException("Thread interrupted - giving up on retrying...", e); } } else { debugLog(bulkLoggingID, "Retrying [%d] entries immediately (without backoff)", retriedDocs); } } else { debugLog(bulkLoggingID, "Sending batch of [%d] bytes/[%s] entries", data.length(), dataEntries); } }
data.copyFrom(payload);
public BulkProcessor(RestClient restClient, Resource resource, Settings settings) { this.restClient = restClient; this.resource = resource; this.settings = settings; // Flushing bounds this.autoFlush = !settings.getBatchFlushManual(); this.bufferEntriesThreshold = settings.getBatchSizeInEntries(); this.requiresRefreshAfterBulk = settings.getBatchRefreshAfterWrite(); // Negative retry count means that we're going to retry forever in the retry handler. int retryCount = settings.getBatchWriteRetryCount(); // Negative retry limit means that we'll let retry handlers retry forever if need be. int limit = settings.getBatchWriteRetryLimit(); // Set the processors retry limit to a smart value based on both the configured limit and the configured retry count. this.retryLimit = (limit < retryCount || retryCount < 0) ? retryCount : limit; // Backing data array this.ba = new BytesArray(new byte[settings.getBatchSizeInBytes()], 0); this.data = new TrackingBytesArray(ba); // Create error handlers BulkWriteErrorHandler httpRetryHandler = new HttpRetryHandler(settings); BulkWriteHandlerLoader handlerLoader = new BulkWriteHandlerLoader(); handlerLoader.setSettings(settings); // Order up the handlers. this.documentBulkErrorHandlers = new ArrayList<IBulkWriteErrorHandler>(); this.documentBulkErrorHandlers.add(httpRetryHandler); this.documentBulkErrorHandlers.addAll(handlerLoader.loadHandlers()); // Error Extractor this.errorExtractor = new ErrorExtractor(settings.getInternalVersionOrThrow()); }
/** * Executes a single bulk operation against the provided resource, using the passed data as the request body. * This method will retry bulk requests if the entire bulk request fails, but will not retry singular * document failures. * * @param resource target of the bulk request. * @param data bulk request body. This body will be cleared of entries on any successful bulk request. * @return a BulkActionResponse object that will detail if there were failing documents that should be retried. */ public BulkActionResponse bulk(Resource resource, TrackingBytesArray data) { // NB: dynamically get the stats since the transport can change long start = network.transportStats().netTotalTime; Response response = execute(PUT, resource.bulk(), data); long spent = network.transportStats().netTotalTime - start; stats.bulkTotal++; stats.docsSent += data.entries(); stats.bulkTotalTime += spent; // bytes will be counted by the transport layer return new BulkActionResponse(parseBulkActionResponse(response), response.status(), spent); }
if (data.length() > 0) { int totalDocs = data.entries(); int docsSent = 0; int docsSkipped = 0; stats.docsRetried += data.entries(); stats.bytesRetried += data.length(); stats.bulkRetries++; stats.bulkRetriesTotalTime += bar.getTimeSpent(); stats.bytesAccepted += data.length(); stats.docsAccepted += data.entries(); retryOperation = false; bulkResult = BulkResponse.complete(bar.getResponseCode(), totalTime, totalDocs, totalDocs, 0); stats.bytesAccepted += data.length(trackingBytesPosition); stats.docsAccepted += 1; docsSent += 1; data.remove(trackingBytesPosition); } else { BytesArray document = data.entry(trackingBytesPosition); data.remove(trackingBytesPosition); data.copyFrom(newEntry); data.remove(trackingBytesPosition); docsSkipped += 1;