void invalidateLedgerHandle(ReadHandle ledgerHandle, Throwable t) { long ledgerId = ledgerHandle.getId(); if (currentLedger != null && ledgerId != currentLedger.getId()) { // remove handle from ledger cache since we got a (read) error ledgerCache.remove(ledgerId); if (log.isDebugEnabled()) { log.debug("[{}] Removed ledger {} from cache (after read error)", name, ledgerId, t); } } else { if (log.isDebugEnabled()) { log.debug("[{}] Ledger that encountered read error is current ledger", name, t); } } }
@Override public void asyncReadEntry(ReadHandle lh, long firstEntry, long lastEntry, boolean isSlowestReader, final ReadEntriesCallback callback, Object ctx) { try { asyncReadEntry0(lh, firstEntry, lastEntry, isSlowestReader, callback, ctx); } catch (Throwable t) { log.warn("failed to read entries for {}--{}-{}", lh.getId(), firstEntry, lastEntry, t); // invalidate all entries related to ledger from the cache (it might happen if entry gets corrupt // (entry.data is already deallocate due to any race-condition) so, invalidate cache and next time read from // the bookie) invalidateAllEntries(lh.getId()); callback.readEntriesFailed(createManagedLedgerException(t), ctx); } }
@Override public void asyncReadEntry(ReadHandle lh, PositionImpl position, final ReadEntryCallback callback, final Object ctx) { try { asyncReadEntry0(lh, position, callback, ctx); } catch (Throwable t) { log.warn("failed to read entries for {}-{}", lh.getId(), position, t); // invalidate all entries related to ledger from the cache (it might happen if entry gets corrupt // (entry.data is already deallocate due to any race-condition) so, invalidate cache and next time read from // the bookie) invalidateAllEntries(lh.getId()); callback.readEntryFailed(createManagedLedgerException(t), ctx); } }
@Override public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid, Map<String, String> extraMetadata) { CompletableFuture<Void> promise = new CompletableFuture<>(); if (offloads.putIfAbsent(ledger.getId(), uuid) == null) { promise.complete(null); } else { promise.completeExceptionally(new Exception("Already exists exception")); } return promise; }
@Override public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid, Map<String, String> extraMetadata) { return errorLedgers.thenCompose( (errors) -> { if (errors.remove(ledger.getId())) { failedOffloads.add(Pair.of(ledger.getId(), uuid)); CompletableFuture<Void> future = new CompletableFuture<>(); future.completeExceptionally(new Exception("Some kind of error")); return future; } else { return super.offload(ledger, uuid, extraMetadata); } }); }
@Override public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid, Map<String, String> extraMetadata) { return errorLedgers.thenCompose( (errors) -> { if (errors.contains(ledger.getId())) { CompletableFuture<Void> future = new CompletableFuture<>(); future.completeExceptionally(new Exception("Some kind of error")); return future; } else { return super.offload(ledger, uuid, extraMetadata); } }); } }
if (ledger.getId() == lastPosition.getLedgerId()) { if (log.isDebugEnabled()) { log.debug("[{}] No more messages to read from ledger={} lastEntry={} readEntry={}", name, ledger.getId(), lastEntryInLedger, firstEntry); if (currentLedger == null || ledger.getId() != currentLedger.getId()) { Long nextLedgerId = ledgers.ceilingKey(ledger.getId() + 1); if (nextLedgerId != null) { opReadEntry.updateReadPosition(new PositionImpl(nextLedgerId, 0)); } else { opReadEntry.updateReadPosition(new PositionImpl(ledger.getId() + 1, 0)); log.debug("[{}] Reading entries from ledger {} - first={} last={}", name, ledger.getId(), firstEntry, lastEntry); final PositionImpl lastReadPosition = PositionImpl.get(ledger.getId(), lastEntry); discardEntriesFromCache(cursor, lastReadPosition);
@Override public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid, Map<String, String> extraMetadata) { offloadStarted.countDown(); return blocker.thenCompose( (trimmedLedger) -> { if (trimmedLedger == ledger.getId()) { CompletableFuture<Void> future = new CompletableFuture<>(); future.completeExceptionally(new BKException.BKNoSuchLedgerExistsException()); return future; } else { return super.offload(ledger, uuid, extraMetadata); } }); } };
protected void asyncReadEntry(ReadHandle ledger, long firstEntry, long lastEntry, boolean isSlowestReader, OpReadEntry opReadEntry, Object ctx) { long timeout = config.getReadEntryTimeoutSeconds(); boolean checkTimeout = timeout > 0; if (checkTimeout) { // set readOpCount to uniquely validate if ReadEntryCallbackWrapper is already recycled long readOpCount = READ_OP_COUNT_UPDATER.incrementAndGet(this); ReadEntryCallbackWrapper readCallback = ReadEntryCallbackWrapper.create(name, ledger.getId(), firstEntry, opReadEntry, readOpCount, ctx); final ScheduledFuture<?> task = scheduledExecutor.schedule(() -> { // validate ReadEntryCallbackWrapper object is not recycled by bk-client callback (by validating // readOpCount) and fail the callback if read is not completed yet if (readCallback.readOpCount == readOpCount && ReadEntryCallbackWrapper.READ_COMPLETED_UPDATER.get(readCallback) == FALSE) { log.warn("[{}]-{} read entry timeout for {}-{} after {} sec", this.name, ledger.getId(), firstEntry, lastEntry, timeout); readCallback.readEntriesFailed(createManagedLedgerException(BKException.Code.TimeoutException), readOpCount); } }, timeout, TimeUnit.SECONDS); readCallback.task = task; entryCache.asyncReadEntry(ledger, firstEntry, lastEntry, isSlowestReader, readCallback, readOpCount); } else { entryCache.asyncReadEntry(ledger, firstEntry, lastEntry, isSlowestReader, opReadEntry, ctx); } }
@SuppressWarnings({ "unchecked", "rawtypes" }) private void asyncReadEntry0(ReadHandle lh, long firstEntry, long lastEntry, boolean isSlowestReader, final ReadEntriesCallback callback, Object ctx) { final long ledgerId = lh.getId(); final int entriesToRead = (int) (lastEntry - firstEntry) + 1; final PositionImpl firstPosition = PositionImpl.get(lh.getId(), firstEntry); final PositionImpl lastPosition = PositionImpl.get(lh.getId(), lastEntry);
@Test(timeOut = 5000) void testReadMissingBefore() throws Exception { ReadHandle lh = getLedgerHandle(); when(lh.getId()).thenReturn((long) 0); EntryCacheManager cacheManager = factory.getEntryCacheManager(); EntryCache entryCache = cacheManager.getEntryCache(ml); byte[] data = new byte[10]; for (int i = 3; i < 10; i++) { entryCache.insert(EntryImpl.create(0, i, data)); } final CountDownLatch counter = new CountDownLatch(1); entryCache.asyncReadEntry(lh, 0, 9, false, new ReadEntriesCallback() { public void readEntriesComplete(List<Entry> entries, Object ctx) { assertEquals(entries.size(), 10); counter.countDown(); } public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { Assert.fail("should not have failed"); } }, null); counter.await(); }
@Test(timeOut = 5000) void testReadMissingAfter() throws Exception { ReadHandle lh = getLedgerHandle(); when(lh.getId()).thenReturn((long) 0); EntryCacheManager cacheManager = factory.getEntryCacheManager(); EntryCache entryCache = cacheManager.getEntryCache(ml); byte[] data = new byte[10]; for (int i = 0; i < 8; i++) { entryCache.insert(EntryImpl.create(0, i, data)); } final CountDownLatch counter = new CountDownLatch(1); entryCache.asyncReadEntry(lh, 0, 9, false, new ReadEntriesCallback() { public void readEntriesComplete(List<Entry> entries, Object ctx) { assertEquals(entries.size(), 10); counter.countDown(); } public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { Assert.fail("should not have failed"); } }, null); counter.await(); }
@Test(timeOut = 5000) void testRead() throws Exception { ReadHandle lh = getLedgerHandle(); when(lh.getId()).thenReturn((long) 0); EntryCacheManager cacheManager = factory.getEntryCacheManager(); EntryCache entryCache = cacheManager.getEntryCache(ml); byte[] data = new byte[10]; for (int i = 0; i < 10; i++) { entryCache.insert(EntryImpl.create(0, i, data)); } final CountDownLatch counter = new CountDownLatch(1); entryCache.asyncReadEntry(lh, 0, 9, false, new ReadEntriesCallback() { public void readEntriesComplete(List<Entry> entries, Object ctx) { assertEquals(entries.size(), 10); entries.forEach(e -> e.release()); counter.countDown(); } public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { Assert.fail("should not have failed"); } }, null); counter.await(); // Verify no entries were read from bookkeeper verify(lh, never()).readAsync(anyLong(), anyLong()); }
@Test(timeOut = 5000) void testReadWithError() throws Exception { final ReadHandle lh = getLedgerHandle(); when(lh.getId()).thenReturn((long) 0); doAnswer((invocation) -> { CompletableFuture<LedgerEntries> future = new CompletableFuture<>(); future.completeExceptionally(new BKNoSuchLedgerExistsException()); return future; }).when(lh).readAsync(anyLong(), anyLong()); EntryCacheManager cacheManager = factory.getEntryCacheManager(); EntryCache entryCache = cacheManager.getEntryCache(ml); byte[] data = new byte[10]; entryCache.insert(EntryImpl.create(0, 2, data)); final CountDownLatch counter = new CountDownLatch(1); entryCache.asyncReadEntry(lh, 0, 9, false, new ReadEntriesCallback() { public void readEntriesComplete(List<Entry> entries, Object ctx) { Assert.fail("should not complete"); } public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { counter.countDown(); } }, null); counter.await(); }
MockOffloadReadHandle(ReadHandle toCopy) throws Exception { id = toCopy.getId(); long lac = toCopy.getLastAddConfirmed(); try (LedgerEntries entries = toCopy.read(0, lac)) { for (LedgerEntry e : entries) { this.entries.add(e.getEntryBuffer().retainedSlice()); } } metadata = new MockMetadata(toCopy.getLedgerMetadata()); }
protected void asyncReadEntry(ReadHandle ledger, PositionImpl position, ReadEntryCallback callback, Object ctx) { long timeout = config.getReadEntryTimeoutSeconds(); boolean checkTimeout = timeout > 0; if (checkTimeout) { // set readOpCount to uniquely validate if ReadEntryCallbackWrapper is already recycled long readOpCount = READ_OP_COUNT_UPDATER.incrementAndGet(this); ReadEntryCallbackWrapper readCallback = ReadEntryCallbackWrapper.create(name, position.getLedgerId(), position.getEntryId(), callback, readOpCount, ctx); final ScheduledFuture<?> task = scheduledExecutor.schedule(() -> { // validate ReadEntryCallbackWrapper object is not recycled by bk-client callback (by validating // readOpCount) and fail the callback if read is not completed yet if (readCallback.readOpCount == readOpCount && ReadEntryCallbackWrapper.READ_COMPLETED_UPDATER.get(readCallback) == FALSE) { log.warn("[{}]-{} read entry timeout for {} after {} sec", this.name, ledger.getId(), position, timeout); readCallback.readEntryFailed(createManagedLedgerException(BKException.Code.TimeoutException), readOpCount); } }, timeout, TimeUnit.SECONDS); readCallback.task = task; entryCache.asyncReadEntry(ledger, position, readCallback, readOpCount); } else { entryCache.asyncReadEntry(ledger, position, callback, ctx); } }
@Test(timeOut = 5000) void testReadMissingMultiple() throws Exception { ReadHandle lh = getLedgerHandle(); when(lh.getId()).thenReturn((long) 0); EntryCacheManager cacheManager = factory.getEntryCacheManager(); EntryCache entryCache = cacheManager.getEntryCache(ml); byte[] data = new byte[10]; entryCache.insert(EntryImpl.create(0, 0, data)); entryCache.insert(EntryImpl.create(0, 2, data)); entryCache.insert(EntryImpl.create(0, 5, data)); entryCache.insert(EntryImpl.create(0, 8, data)); final CountDownLatch counter = new CountDownLatch(1); entryCache.asyncReadEntry(lh, 0, 9, false, new ReadEntriesCallback() { public void readEntriesComplete(List<Entry> entries, Object ctx) { assertEquals(entries.size(), 10); counter.countDown(); } public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { Assert.fail("should not have failed"); } }, null); counter.await(); }
@Test(timeOut = 5000) void testReadMissingMiddle() throws Exception { ReadHandle lh = getLedgerHandle(); when(lh.getId()).thenReturn((long) 0); EntryCacheManager cacheManager = factory.getEntryCacheManager(); EntryCache entryCache = cacheManager.getEntryCache(ml); byte[] data = new byte[10]; entryCache.insert(EntryImpl.create(0, 0, data)); entryCache.insert(EntryImpl.create(0, 1, data)); entryCache.insert(EntryImpl.create(0, 8, data)); entryCache.insert(EntryImpl.create(0, 9, data)); final CountDownLatch counter = new CountDownLatch(1); entryCache.asyncReadEntry(lh, 0, 9, false, new ReadEntriesCallback() { public void readEntriesComplete(List<Entry> entries, Object ctx) { assertEquals(entries.size(), 10); counter.countDown(); } public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { Assert.fail("should not have failed"); } }, null); counter.await(); }
private void asyncReadEntry0(ReadHandle lh, PositionImpl position, final ReadEntryCallback callback, final Object ctx) { if (log.isDebugEnabled()) { log.debug("[{}] Reading entry ledger {}: {}", ml.getName(), lh.getId(), position.getEntryId());
@Override public void asyncReadEntry(ReadHandle lh, PositionImpl position, final ReadEntryCallback callback, final Object ctx) { try { asyncReadEntry0(lh, position, callback, ctx); } catch (Throwable t) { log.warn("failed to read entries for {}-{}", lh.getId(), position, t); // invalidate all entries related to ledger from the cache (it might happen if entry gets corrupt // (entry.data is already deallocate due to any race-condition) so, invalidate cache and next time read from // the bookie) invalidateAllEntries(lh.getId()); callback.readEntryFailed(createManagedLedgerException(t), ctx); } }