private void handleError(Exception e, AtomicBoolean aborted) { if (lockOrAbort(aborted)) { try { unflushed.removeFirstOccurrence(buffer); } finally { rwlock.writeLock().unlock(); } } handler.error(e); factory.getConfiguration().getStatistics().getFlush().error(start); }
@Override public void abortBeforeRun() { handler.complete(); factory.getConfiguration().getStatistics().getFlush().end(start); }
/** * Validates two buckets belonging to same region update the same stats */ public void testRegionBucketShareStats() throws Exception { HoplogOrganizer bucket1 = regionManager.create(0); HoplogOrganizer bucket2 = regionManager.create(1); // validate flush stats assertEquals(0, stats.getFlush().getCount()); assertEquals(0, stats.getActiveFileCount()); ArrayList<TestEvent> items = new ArrayList<TestEvent>(); for (int i = 0; i < 100; i++) { items.add(new TestEvent("key-" + i, "value-" + System.nanoTime())); } bucket1.flush(items.iterator(), 100); assertEquals(1, stats.getFlush().getCount()); assertEquals(1, stats.getActiveFileCount()); items.clear(); for (int i = 0; i < 100; i++) { items.add(new TestEvent("key-" + i, "value-" + System.nanoTime())); } bucket2.flush(items.iterator(), 100); assertEquals(2, stats.getFlush().getCount()); assertEquals(2, stats.getActiveFileCount()); }
assertEquals(0, stats.getFlush().getCount()); assertEquals(0, stats.getFlush().getBytes()); assertEquals(0, stats.getActiveFileCount()); int bytesSent = 0; assertEquals(j + 1, stats.getFlush().getCount()); assertTrue(stats.getFlush().getBytes() > bytesSent); assertEquals(j + 1, stats.getActiveFileCount()); assertEquals(1, stats.getActiveFileCount()); assertEquals(0, stats.getInactiveFileCount()); assertEquals(stats.getMinorCompaction().getBytes(), stats.getFlush() .getBytes());
long start = stats.getFlush().begin(); int byteCount = 0; if (writer == null) { HeapDataOutputStream out = new HeapDataOutputStream(1024, null); if (abortFlush) { stats.getFlush().end(byteCount, start); throw new CacheClosedException("Either the region has been cleared " + "or closed. Aborting the ongoing flush operation."); stats.getFlush().error(start); stats.getFlush().end(byteCount, start);
long start = stats.getFlush().begin(); int byteCount = 0; if (writer == null) { while (bufferIter.hasNext()) { if (abortFlush) { stats.getFlush().end(byteCount, start); throw new CacheClosedException("Either the region has been cleared " + "or closed. Aborting the ongoing flush operation."); stats.getFlush().error(start); stats.getFlush().end(byteCount, start);
@Override public void run() { try { compactor.add(soplog); compactor.compact(false, null); unflushed.removeFirstOccurrence(buffer); // TODO need to invoke this while NOT holding write lock handler.complete(); factory.getConfiguration().getStatistics().getFlush().end(buffer.dataSize(), start); } catch (Exception e) { handleError(e, aborted); return; } } };
long start = stats.getFlush().begin(); int byteCount = 0; writer = null; } catch (IOException e) { stats.getFlush().error(start); try { e = handleWriteHdfsIOError(writer, so, e); stats.getFlush().error(start); deleteTmpFile(writer, so); writer = null; stats.getFlush().end(byteCount, start); incrementDiskUsage(so.getSize()); } catch (BucketMovedException e) { stats.getFlush().error(start); deleteTmpFile(writer, so); writer = null; throw e; } catch (IOException e) { stats.getFlush().error(start); logger.warn(LocalizedStrings.HOPLOG_FLUSH_OPERATION_FAILED, e); throw e;
long start = stats.getFlush().begin(); int byteCount = 0; writer = null; } catch (IOException e) { stats.getFlush().error(start); try { e = handleWriteHdfsIOError(writer, so, e); stats.getFlush().error(start); deleteTmpFile(writer, so); writer = null; stats.getFlush().end(byteCount, start); incrementDiskUsage(so.getSize()); } catch (BucketMovedException e) { stats.getFlush().error(start); deleteTmpFile(writer, so); writer = null; throw e; } catch (IOException e) { stats.getFlush().error(start); logger.warning(LocalizedStrings.HOPLOG_FLUSH_OPERATION_FAILED, e); throw e;