@Override public QueueStats getQueueStatistics() { return this.writes.getStatistics(); }
/** * Collects an ordered list of Writes to execute to BookKeeper. * * @return The list of Writes to execute. */ private List<Write> getWritesToExecute() { // Calculate how much estimated space there is in the current ledger. final long maxTotalSize = this.config.getBkLedgerMaxSize() - getWriteLedger().ledger.getLength(); // Get the writes to execute from the queue. List<Write> toExecute = this.writes.getWritesToExecute(maxTotalSize); // Check to see if any writes executed on closed ledgers, in which case they either need to be failed (if deemed // appropriate, or retried). if (handleClosedLedgers(toExecute)) { // If any changes were made to the Writes in the list, re-do the search to get a more accurate list of Writes // to execute (since some may have changed Ledgers, more writes may not be eligible for execution). toExecute = this.writes.getWritesToExecute(maxTotalSize); } return toExecute; }
/** * Gets a snapshot of the queue internals. * * @return The snapshot, including Queue Size, Item Fill Rate and elapsed time of the oldest item. */ synchronized QueueStats getStatistics() { int size = this.writes.size(); double fillRatio = calculateFillRatio(this.totalLength, size); int processingTime = this.lastDurationMillis; if (processingTime == 0 && size > 0) { // We get in here when this method is invoked prior to any operation being completed. Since lastDurationMillis // is only set when an item is completed, in this special case we just estimate based on the amount of time // the first item in the queue has been added. processingTime = (int) ((this.timeSupplier.get() - this.writes.peekFirst().getQueueAddedTimestamp()) / AbstractTimer.NANOS_TO_MILLIS); } return new QueueStats(size, fillRatio, processingTime); }
val q = new WriteQueue(); val expectedWrites = new ArrayList<Write>(); for (int i = 0; i < ITEM_COUNT; i++) { val w = new Write(new ByteArraySegment(new byte[i]), new TestWriteLedger(i), CompletableFuture.completedFuture(null)); q.add(w); expectedWrites.add(w); val removedWrites = q.close(); AssertExtensions.assertListEquals("Unexpected writes removed.", expectedWrites, removedWrites, Object::equals); val clearStats = q.getStatistics(); Assert.assertEquals("Unexpected getSize after clear.", 0, clearStats.getSize()); Assert.assertEquals("Unexpected getAverageFillRate after clear.", 0, clearStats.getAverageItemFillRatio(), 0); () -> q.add(new Write(new ByteArraySegment(new byte[1]), new TestWriteLedger(0), CompletableFuture.completedFuture(null))), ex -> ex instanceof ObjectClosedException); AssertExtensions.assertThrows( "getWritesToExecute() worked after close().", () -> q.getWritesToExecute(1), ex -> ex instanceof ObjectClosedException); AssertExtensions.assertThrows(
public void testGetWritesToExecute() { final int ledgerChangeIndex = ITEM_COUNT - 5; val q = new WriteQueue(); val writes = new ArrayList<Write>(); int ledgerId = 0; q.add(w); writes.add(w); val maxSizeResult = q.getWritesToExecute(sizeLimit); val expectedMaxSizeResult = new ArrayList<Write>(); for (Write w : writes) { writes.get(0).complete(); writes.get(1).beginAttempt(); val result1 = q.getWritesToExecute(Long.MAX_VALUE); val result2 = q.getWritesToExecute(Long.MAX_VALUE); Assert.assertEquals("Unexpected writes fetched when in-progress writes exist after non-in-progress writes.", 0, result2.size()); q.removeFinishedWrites(); val result3 = q.getWritesToExecute(Long.MAX_VALUE); AssertExtensions.assertListEquals("Unexpected writes fetched when ledger changed.", writes.subList(beginIndex, ledgerChangeIndex), result3, Object::equals); q.removeFinishedWrites(); val result4 = q.getWritesToExecute(Long.MAX_VALUE); AssertExtensions.assertListEquals("Unexpected writes fetched from the end, after ledger changed.",
final int timeIncrement = 1234 * 1000; // Just over 1ms. AtomicLong time = new AtomicLong(); val q = new WriteQueue(time::get); val writes = new ArrayDeque<Write>(); for (int i = 0; i < ITEM_COUNT; i++) { q.add(w); writes.addLast(w); val write = writes.pollFirst(); if (!write.isDone()) { val result1 = q.removeFinishedWrites(); Assert.assertEquals("Unexpected value from removeFinishedWrites when there were writes left in the queue.", WriteQueue.CleanupStatus.QueueNotEmpty, result1); val stats1 = q.getStatistics(); Assert.assertEquals("Unexpected size after removeFinishedWrites with no effect.", writes.size() + 1, stats1.getSize()); val result2 = q.removeFinishedWrites(); val expectedResult = writes.isEmpty() ? WriteQueue.CleanupStatus.QueueEmpty : WriteQueue.CleanupStatus.QueueNotEmpty; Assert.assertEquals("Unexpected result from removeFinishedWrites.", expectedResult, result2); val stats2 = q.getStatistics(); Assert.assertEquals("Unexpected size after removeFinishedWrites.", writes.size(), stats2.getSize()); Assert.assertEquals("Unexpected getExpectedProcessingTimeMillis after clear.", expectedElapsed, stats2.getExpectedProcessingTimeMillis()); q.add(w3); w3.fail(new IntentionalException(), true); val result3 = q.removeFinishedWrites(); Assert.assertEquals("Unexpected value from removeFinishedWrites when there were failed writes.", WriteQueue.CleanupStatus.WriteFailed, result3);
/** * Tests the basic functionality of the add() method. */ @Test public void testAdd() { final int timeIncrement = 1234 * 1000; // Just over 1ms. AtomicLong time = new AtomicLong(); val q = new WriteQueue(time::get); val initialStats = q.getStatistics(); Assert.assertEquals("Unexpected getSize on empty queue.", 0, initialStats.getSize()); Assert.assertEquals("Unexpected getAverageFillRate on empty queue.", 0, initialStats.getAverageItemFillRatio(), 0); Assert.assertEquals("Unexpected getExpectedProcessingTimeMillis on empty queue.", 0, initialStats.getExpectedProcessingTimeMillis()); int expectedSize = 0; long firstItemTime = 0; for (int i = 0; i < ITEM_COUNT; i++) { time.addAndGet(timeIncrement); if (i == 0) { firstItemTime = time.get(); } int writeSize = i * 10000; q.add(new Write(new ByteArraySegment(new byte[writeSize]), new TestWriteLedger(i), CompletableFuture.completedFuture(null))); expectedSize += writeSize; val stats = q.getStatistics(); val expectedFillRatio = (double) expectedSize / stats.getSize() / BookKeeperConfig.MAX_APPEND_LENGTH; val expectedProcTime = (time.get() - firstItemTime) / AbstractTimer.NANOS_TO_MILLIS; Assert.assertEquals("Unexpected getSize.", i + 1, stats.getSize()); Assert.assertEquals("Unexpected getAverageFillRate.", expectedFillRatio, stats.getAverageItemFillRatio(), 0.01); Assert.assertEquals("Unexpected getExpectedProcessingTimeMillis.", expectedProcTime, stats.getExpectedProcessingTimeMillis()); } }
@Override public void close() { if (!this.closed.getAndSet(true)) { this.metricReporter.cancel(true); this.metrics.close(); this.rolloverProcessor.close(); this.writeProcessor.close(); // Close active ledger. WriteLedger writeLedger; synchronized (this.lock) { writeLedger = this.writeLedger; this.writeLedger = null; this.logMetadata = null; } // Close the write queue and cancel the pending writes. this.writes.close().forEach(w -> w.fail(new CancellationException("BookKeeperLog has been closed."), true)); if (writeLedger != null) { try { Ledgers.close(writeLedger.ledger); } catch (DurableDataLogException bkEx) { log.error("{}: Unable to close LedgerHandle for Ledger {}.", this.traceObjectId, writeLedger.ledger.getId(), bkEx); } } log.info("{}: Closed.", this.traceObjectId); } }
/** * Creates a new instance of the BookKeeper log class. * * @param containerId The Id of the Container whose BookKeeperLog to open. * @param zkClient A reference to the CuratorFramework client to use. * @param bookKeeper A reference to the BookKeeper client to use. * @param config Configuration to use. * @param executorService An Executor to use for async operations. */ BookKeeperLog(int containerId, CuratorFramework zkClient, BookKeeper bookKeeper, BookKeeperConfig config, ScheduledExecutorService executorService) { Preconditions.checkArgument(containerId >= 0, "containerId must be a non-negative integer."); this.zkClient = Preconditions.checkNotNull(zkClient, "zkClient"); this.bookKeeper = Preconditions.checkNotNull(bookKeeper, "bookKeeper"); this.config = Preconditions.checkNotNull(config, "config"); this.executorService = Preconditions.checkNotNull(executorService, "executorService"); this.closed = new AtomicBoolean(); this.logNodePath = HierarchyUtils.getPath(containerId, this.config.getZkHierarchyDepth()); this.traceObjectId = String.format("Log[%d]", containerId); this.writes = new WriteQueue(); val retry = createRetryPolicy(this.config.getMaxWriteAttempts(), this.config.getBkWriteTimeoutMillis()); this.writeProcessor = new SequentialAsyncProcessor(this::processWritesSync, retry, this::handleWriteProcessorFailures, this.executorService); this.rolloverProcessor = new SequentialAsyncProcessor(this::rollover, retry, this::handleRolloverFailure, this.executorService); this.metrics = new BookKeeperMetrics.BookKeeperLog(containerId); this.metricReporter = this.executorService.scheduleWithFixedDelay(this::reportMetrics, REPORT_INTERVAL, REPORT_INTERVAL, TimeUnit.MILLISECONDS); }
val cs = this.writes.removeFinishedWrites(); if (cs == WriteQueue.CleanupStatus.WriteFailed) {
@Override public CompletableFuture<LogAddress> append(ArrayView data, Duration timeout) { ensurePreconditions(); long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "append", data.getLength()); if (data.getLength() > getMaxAppendLength()) { return Futures.failedFuture(new WriteTooLongException(data.getLength(), getMaxAppendLength())); } Timer timer = new Timer(); // Queue up the write. CompletableFuture<LogAddress> result = new CompletableFuture<>(); this.writes.add(new Write(data, getWriteLedger(), result)); // Trigger Write Processor. this.writeProcessor.runAsync(); // Post append tasks. We do not need to wait for these to happen before returning the call. result.whenCompleteAsync((address, ex) -> { if (ex != null) { handleWriteException(ex); } else { // Update metrics and take care of other logging tasks. this.metrics.writeCompleted(timer.getElapsed()); LoggerHelpers.traceLeave(log, this.traceObjectId, "append", traceId, data.getLength(), address); } }, this.executorService); return result; }
private void reportMetrics() { this.metrics.ledgerCount(getLogMetadata().getLedgers().size()); this.metrics.queueStats(this.writes.getStatistics()); }