@Override public String toString() { StringBuilder sb = new StringBuilder((int) length()); for (Entry entry : entries) { sb.append(new String(data.bytes, entry.offset, entry.length, StringUtils.UTF_8)); } return sb.toString(); } }
/** * Logs flushing messages and performs backoff waiting if there is a wait time for retry. */ private void initFlushOperation(String bulkLoggingID, boolean retryOperation, long retriedDocs, long waitTime) { if (retryOperation) { if (waitTime > 0L) { debugLog(bulkLoggingID, "Retrying [%d] entries after backing off for [%s] ms", retriedDocs, TimeValue.timeValueMillis(waitTime)); try { Thread.sleep(waitTime); } catch (InterruptedException e) { debugLog(bulkLoggingID, "Thread interrupted - giving up on retrying..."); throw new EsHadoopException("Thread interrupted - giving up on retrying...", e); } } else { debugLog(bulkLoggingID, "Retrying [%d] entries immediately (without backoff)", retriedDocs); } } else { debugLog(bulkLoggingID, "Sending batch of [%d] bytes/[%s] entries", data.length(), dataEntries); } }
@Test public void testAddArraySize() throws Exception { assertEquals(0, data.length()); data.copyFrom(new BytesArray("one")); assertEquals(3, data.length()); data.copyFrom(new BytesArray("two")); assertEquals(6, data.length()); data.copyFrom(new BytesArray("three")); assertEquals(11, data.length()); }
@Test public void testRemoveSize() throws Exception { assertEquals(0, data.length()); data.copyFrom(new BytesArray("a")); data.copyFrom(new BytesArray("bb")); data.copyFrom(new BytesArray("ccc")); data.copyFrom(new BytesArray("dddd")); assertEquals(10, data.length()); data.remove(1); assertEquals(8, data.length()); data.remove(1); assertEquals(5, data.length()); }
if (data.length() > 0) { int totalDocs = data.entries(); int docsSent = 0; stats.bytesRetried += data.length(); stats.bulkRetries++; stats.bulkRetriesTotalTime += bar.getTimeSpent(); stats.bytesAccepted += data.length(); stats.docsAccepted += data.entries(); retryOperation = false; stats.bytesAccepted += data.length(trackingBytesPosition); stats.docsAccepted += 1; docsSent += 1;
@Test public void testPopData() throws Exception { assertEquals(0, data.length()); data.copyFrom(new BytesArray("a")); data.copyFrom(new BytesArray("bb")); data.copyFrom(new BytesArray("ccc")); data.copyFrom(new BytesArray("dddd")); assertEquals(10, data.length()); BytesArray entry = data.pop(); assertEquals(9, data.length()); assertEquals(1, entry.length()); entry = data.pop(); assertEquals(7, data.length()); assertEquals(2, entry.length()); } }
@Test public void testAddRefSize() throws Exception { BytesRef ref = new BytesRef(); ref.add(new BytesArray("one")); ref.add(new BytesArray("three")); data.copyFrom(ref); assertEquals(8, data.length()); }
@Override public String toString() { StringBuilder sb = new StringBuilder((int) length()); for (Entry entry : entries) { sb.append(new String(data.bytes, entry.offset, entry.length, StringUtils.UTF_8)); } return sb.toString(); } }
@Override public String toString() { StringBuilder sb = new StringBuilder((int) length()); for (Entry entry : entries) { sb.append(new String(data.bytes, entry.offset, entry.length, StringUtils.UTF_8)); } return sb.toString(); } }
@Override public String toString() { StringBuilder sb = new StringBuilder((int) length()); for (Entry entry : entries) { sb.append(new String(data.bytes, entry.offset, entry.length, StringUtils.UTF_8)); } return sb.toString(); } }
@Override public String toString() { StringBuilder sb = new StringBuilder((int) length()); for (Entry entry : entries) { sb.append(new String(data.bytes, entry.offset, entry.length, StringUtils.UTF_8)); } return sb.toString(); } }
public BitSet tryFlush() { BitSet bulkResult = EMPTY; try { // double check data - it might be a false flush (called on clean-up) if (data.length() > 0) { if (log.isDebugEnabled()) { log.debug(String.format("Sending batch of [%d] bytes/[%s] entries", data.length(), dataEntries)); } bulkResult = client.bulk(resourceW, data); executedBulkWrite = true; } } catch (EsHadoopException ex) { hadWriteErrors = true; throw ex; } // discard the data buffer, only if it was properly sent/processed //if (bulkResult.isEmpty()) { // always discard data since there's no code path that uses the in flight data discard(); //} return bulkResult; }
/** * Logs flushing messages and performs backoff waiting if there is a wait time for retry. */ private void initFlushOperation(String bulkLoggingID, boolean retryOperation, long retriedDocs, long waitTime) { if (retryOperation) { if (waitTime > 0L) { debugLog(bulkLoggingID, "Retrying [%d] entries after backing off for [%s] ms", retriedDocs, TimeValue.timeValueMillis(waitTime)); try { Thread.sleep(waitTime); } catch (InterruptedException e) { debugLog(bulkLoggingID, "Thread interrupted - giving up on retrying..."); throw new EsHadoopException("Thread interrupted - giving up on retrying...", e); } } else { debugLog(bulkLoggingID, "Retrying [%d] entries immediately (without backoff)", retriedDocs); } } else { debugLog(bulkLoggingID, "Sending batch of [%d] bytes/[%s] entries", data.length(), dataEntries); } }
/** * Logs flushing messages and performs backoff waiting if there is a wait time for retry. */ private void initFlushOperation(String bulkLoggingID, boolean retryOperation, long retriedDocs, long waitTime) { if (retryOperation) { if (waitTime > 0L) { debugLog(bulkLoggingID, "Retrying [%d] entries after backing off for [%s] ms", retriedDocs, TimeValue.timeValueMillis(waitTime)); try { Thread.sleep(waitTime); } catch (InterruptedException e) { debugLog(bulkLoggingID, "Thread interrupted - giving up on retrying..."); throw new EsHadoopException("Thread interrupted - giving up on retrying...", e); } } else { debugLog(bulkLoggingID, "Retrying [%d] entries immediately (without backoff)", retriedDocs); } } else { debugLog(bulkLoggingID, "Sending batch of [%d] bytes/[%s] entries", data.length(), dataEntries); } }
/** * Logs flushing messages and performs backoff waiting if there is a wait time for retry. */ private void initFlushOperation(String bulkLoggingID, boolean retryOperation, long retriedDocs, long waitTime) { if (retryOperation) { if (waitTime > 0L) { debugLog(bulkLoggingID, "Retrying [%d] entries after backing off for [%s] ms", retriedDocs, TimeValue.timeValueMillis(waitTime)); try { Thread.sleep(waitTime); } catch (InterruptedException e) { debugLog(bulkLoggingID, "Thread interrupted - giving up on retrying..."); throw new EsHadoopException("Thread interrupted - giving up on retrying...", e); } } else { debugLog(bulkLoggingID, "Retrying [%d] entries immediately (without backoff)", retriedDocs); } } else { debugLog(bulkLoggingID, "Sending batch of [%d] bytes/[%s] entries", data.length(), dataEntries); } }
public BitSet bulk(Resource resource, TrackingBytesArray data) { Retry retry = retryPolicy.init(); int httpStatus = 0; boolean isRetry = false; do { // NB: dynamically get the stats since the transport can change long start = network.transportStats().netTotalTime; Response response = execute(PUT, resource.bulk(), data); long spent = network.transportStats().netTotalTime - start; stats.bulkTotal++; stats.docsSent += data.entries(); stats.bulkTotalTime += spent; // bytes will be counted by the transport layer if (isRetry) { stats.docsRetried += data.entries(); stats.bytesRetried += data.length(); stats.bulkRetries++; stats.bulkRetriesTotalTime += spent; } isRetry = true; httpStatus = (retryFailedEntries(response, data) ? HttpStatus.SERVICE_UNAVAILABLE : HttpStatus.OK); } while (data.length() > 0 && retry.retry(httpStatus)); return data.leftoversPosition(); }
if (data.length() > 0) { int totalDocs = data.entries(); int docsSent = 0; stats.bytesRetried += data.length(); stats.bulkRetries++; stats.bulkRetriesTotalTime += bar.getTimeSpent(); stats.bytesAccepted += data.length(); stats.docsAccepted += data.entries(); retryOperation = false; stats.bytesAccepted += data.length(trackingBytesPosition); stats.docsAccepted += 1; docsSent += 1;
if (data.length() > 0) { int totalDocs = data.entries(); int docsSent = 0; stats.bytesRetried += data.length(); stats.bulkRetries++; stats.bulkRetriesTotalTime += bar.getTimeSpent(); stats.bytesAccepted += data.length(); stats.docsAccepted += data.entries(); retryOperation = false; stats.bytesAccepted += data.length(trackingBytesPosition); stats.docsAccepted += 1; docsSent += 1;
if (data.length() > 0) { int totalDocs = data.entries(); int docsSent = 0; stats.bytesRetried += data.length(); stats.bulkRetries++; stats.bulkRetriesTotalTime += bar.getTimeSpent(); stats.bytesAccepted += data.length(); stats.docsAccepted += data.entries(); retryOperation = false; stats.bytesAccepted += data.length(trackingBytesPosition); stats.docsAccepted += 1; docsSent += 1;