private void scheduleDeferredTrimming(CompletableFuture<?> promise) { scheduledExecutor.schedule(safeRun(() -> trimConsumedLedgersInBackground(promise)), 100, TimeUnit.MILLISECONDS); }
private void updateLedgersListAfterRollover(MetaStoreCallback<Void> callback) { if (!ledgersListMutex.tryLock()) { // Defer update for later scheduledExecutor.schedule(() -> updateLedgersListAfterRollover(callback), 100, TimeUnit.MILLISECONDS); return; } if (log.isDebugEnabled()) { log.debug("[{}] Updating ledgers ids with new ledger. version={}", name, ledgersStat); } store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, callback); }
private void asyncDeleteLedger(long ledgerId, long retry) { if (retry <= 0) { log.warn("[{}] Failed to delete ledger after retries {}", name, ledgerId); return; } bookKeeper.asyncDeleteLedger(ledgerId, (rc, ctx) -> { if (rc == BKException.Code.NoSuchLedgerExistsException) { log.warn("[{}] Ledger was already deleted {}", name, ledgerId); } else if (rc != BKException.Code.OK) { log.error("[{}] Error deleting ledger {}", name, ledgerId, BKException.getMessage(rc)); scheduledExecutor.schedule(safeRun(() -> { asyncDeleteLedger(ledgerId, retry - 1); }), DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS); } else { if (log.isDebugEnabled()) { log.debug("[{}] Deleted ledger {}", name, ledgerId); } } }, null); }
/** * Create ledger async and schedule a timeout task to check ledger-creation is complete else it fails the callback * with TimeoutException. * * @param bookKeeper * @param config * @param digestType * @param cb * @param emptyMap */ protected void asyncCreateLedger(BookKeeper bookKeeper, ManagedLedgerConfig config, DigestType digestType, CreateCallback cb, Map<Object, Object> emptyMap) { AtomicBoolean ledgerCreated = new AtomicBoolean(false); bookKeeper.asyncCreateLedger(config.getEnsembleSize(), config.getWriteQuorumSize(), config.getAckQuorumSize(), digestType, config.getPassword(), cb, ledgerCreated, Collections.emptyMap()); scheduledExecutor.schedule(() -> { if (!ledgerCreated.get()) { ledgerCreated.set(true); cb.createComplete(BKException.Code.TimeoutException, null, null); } }, config.getMetadataOperationsTimeoutSeconds(), TimeUnit.SECONDS); }
if (!ledgersListMutex.tryLock()) { scheduledExecutor.schedule( safeRun(() -> tryTransformLedgerInfo(ledgerId, transformation, finalPromise)), 100, TimeUnit.MILLISECONDS);
private void maybeOffload(CompletableFuture<PositionImpl> finalPromise) { if (!offloadMutex.tryLock()) { scheduledExecutor.schedule(safeRun(() -> maybeOffloadInBackground(finalPromise)), 100, TimeUnit.MILLISECONDS); } else {
ledger.getScheduledExecutor().schedule(safeRun(() -> { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Re-trying the read at position {}", ledger.getName(), name, op.readPosition);
private void asyncDeleteLedger(final LedgerHandle lh, int retry) { if (lh == null || retry <= 0) { if (lh != null) { log.warn("[{}-{}] Failed to delete ledger after retries {}", ledger.getName(), name, lh.getId()); } return; } ledger.mbean.startCursorLedgerDeleteOp(); bookkeeper.asyncDeleteLedger(lh.getId(), (rc, ctx) -> { ledger.mbean.endCursorLedgerDeleteOp(); if (rc != BKException.Code.OK) { log.warn("[{}] Failed to delete ledger {}: {}", ledger.getName(), lh.getId(), BKException.getMessage(rc)); if (rc != BKException.Code.NoSuchLedgerExistsException) { ledger.getScheduledExecutor().schedule(safeRun(() -> { asyncDeleteLedger(lh, retry - 1); }), DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS); } return; } else { log.info("[{}][{}] Successfully closed & deleted ledger {} in cursor", ledger.getName(), name, lh.getId()); } }, null); }
private void asyncDeleteCursorLedger(int retry) { STATE_UPDATER.set(this, State.Closed); if (cursorLedger == null || retry <= 0) { if (cursorLedger != null) { log.warn("[{}-{}] Failed to delete ledger after retries {}", ledger.getName(), name, cursorLedger.getId()); } return; } ledger.mbean.startCursorLedgerDeleteOp(); bookkeeper.asyncDeleteLedger(cursorLedger.getId(), (rc, ctx) -> { ledger.mbean.endCursorLedgerDeleteOp(); if (rc == BKException.Code.OK) { log.info("[{}][{}] Deleted cursor ledger {}", ledger.getName(), name, cursorLedger.getId()); } else { log.warn("[{}][{}] Failed to delete ledger {}: {}", ledger.getName(), name, cursorLedger.getId(), BKException.getMessage(rc)); if (rc != BKException.Code.NoSuchLedgerExistsException) { ledger.getScheduledExecutor().schedule(safeRun(() -> { asyncDeleteCursorLedger(retry - 1); }), DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS); } } }, null); }
protected void asyncReadEntry(ReadHandle ledger, long firstEntry, long lastEntry, boolean isSlowestReader, OpReadEntry opReadEntry, Object ctx) { long timeout = config.getReadEntryTimeoutSeconds(); boolean checkTimeout = timeout > 0; if (checkTimeout) { // set readOpCount to uniquely validate if ReadEntryCallbackWrapper is already recycled long readOpCount = READ_OP_COUNT_UPDATER.incrementAndGet(this); ReadEntryCallbackWrapper readCallback = ReadEntryCallbackWrapper.create(name, ledger.getId(), firstEntry, opReadEntry, readOpCount, ctx); final ScheduledFuture<?> task = scheduledExecutor.schedule(() -> { // validate ReadEntryCallbackWrapper object is not recycled by bk-client callback (by validating // readOpCount) and fail the callback if read is not completed yet if (readCallback.readOpCount == readOpCount && ReadEntryCallbackWrapper.READ_COMPLETED_UPDATER.get(readCallback) == FALSE) { log.warn("[{}]-{} read entry timeout for {}-{} after {} sec", this.name, ledger.getId(), firstEntry, lastEntry, timeout); readCallback.readEntriesFailed(createManagedLedgerException(BKException.Code.TimeoutException), readOpCount); } }, timeout, TimeUnit.SECONDS); readCallback.task = task; entryCache.asyncReadEntry(ledger, firstEntry, lastEntry, isSlowestReader, readCallback, readOpCount); } else { entryCache.asyncReadEntry(ledger, firstEntry, lastEntry, isSlowestReader, opReadEntry, ctx); } }
protected void asyncReadEntry(ReadHandle ledger, PositionImpl position, ReadEntryCallback callback, Object ctx) { long timeout = config.getReadEntryTimeoutSeconds(); boolean checkTimeout = timeout > 0; if (checkTimeout) { // set readOpCount to uniquely validate if ReadEntryCallbackWrapper is already recycled long readOpCount = READ_OP_COUNT_UPDATER.incrementAndGet(this); ReadEntryCallbackWrapper readCallback = ReadEntryCallbackWrapper.create(name, position.getLedgerId(), position.getEntryId(), callback, readOpCount, ctx); final ScheduledFuture<?> task = scheduledExecutor.schedule(() -> { // validate ReadEntryCallbackWrapper object is not recycled by bk-client callback (by validating // readOpCount) and fail the callback if read is not completed yet if (readCallback.readOpCount == readOpCount && ReadEntryCallbackWrapper.READ_COMPLETED_UPDATER.get(readCallback) == FALSE) { log.warn("[{}]-{} read entry timeout for {} after {} sec", this.name, ledger.getId(), position, timeout); readCallback.readEntryFailed(createManagedLedgerException(BKException.Code.TimeoutException), readOpCount); } }, timeout, TimeUnit.SECONDS); readCallback.task = task; entryCache.asyncReadEntry(ledger, position, readCallback, readOpCount); } else { entryCache.asyncReadEntry(ledger, position, callback, ctx); } }
private void scheduleTask(Runnable r, long ms) { try { scheduler.schedule(r, ms, TimeUnit.MILLISECONDS); } catch (RejectedExecutionException ree) { logger.error("Task {} scheduled in {} ms is rejected : ", new Object[]{r, ms, ree}); } }
private void scheduleTask(Runnable r, long ms) { if (duplicatedLogFound.get()) { logger.error("Scheduler is halted for federated namespace {} as duplicated log found", namespace); return; } try { scheduler.schedule(r, ms, TimeUnit.MILLISECONDS); } catch (RejectedExecutionException ree) { logger.error("Task {} scheduled in {} ms is rejected : ", new Object[]{r, ms, ree}); } }
private void scheduleDeferredTrimming(CompletableFuture<?> promise) { scheduledExecutor.schedule(safeRun(() -> trimConsumedLedgersInBackground(promise)), 100, TimeUnit.MILLISECONDS); }
@Override public void onFailure(Throwable cause) { if (cause instanceof LogNotFoundException || cause instanceof LogSegmentNotFoundException || cause instanceof UnexpectedException) { // indicate some inconsistent behavior, abort METADATA_EXCEPTION_UPDATER.compareAndSet(BKLogReadHandler.this, null, (IOException) cause); // notify the reader that read handler is in error state notifyReaderOnError(cause); return; } scheduler.schedule(new Runnable() { @Override public void run() { onSegmentsUpdated(segments); } }, conf.getZKRetryBackoffMaxMillis(), TimeUnit.MILLISECONDS); }
private void updateLedgersListAfterRollover(MetaStoreCallback<Void> callback) { if (!ledgersListMutex.tryLock()) { // Defer update for later scheduledExecutor.schedule(() -> updateLedgersListAfterRollover(callback), 100, TimeUnit.MILLISECONDS); return; } if (log.isDebugEnabled()) { log.debug("[{}] Updating ledgers ids with new ledger. version={}", name, ledgersStat); } store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, callback); }
private void asyncDeleteLedger(long ledgerId, long retry) { if (retry <= 0) { log.warn("[{}] Failed to delete ledger after retries {}", name, ledgerId); return; } bookKeeper.asyncDeleteLedger(ledgerId, (rc, ctx) -> { if (rc == BKException.Code.NoSuchLedgerExistsException) { log.warn("[{}] Ledger was already deleted {}", name, ledgerId); } else if (rc != BKException.Code.OK) { log.error("[{}] Error deleting ledger {}", name, ledgerId, BKException.getMessage(rc)); scheduledExecutor.schedule(safeRun(() -> { asyncDeleteLedger(ledgerId, retry - 1); }), DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS); } else { if (log.isDebugEnabled()) { log.debug("[{}] Deleted ledger {}", name, ledgerId); } } }, null); }
@Override public void onFailure(Throwable cause) { if (cause instanceof LogNotFoundException || cause instanceof LogSegmentNotFoundException || cause instanceof UnexpectedException) { // indicate some inconsistent behavior, abort METADATA_EXCEPTION_UPDATER.compareAndSet(BKLogReadHandler.this, null, (IOException) cause); // notify the reader that read handler is in error state notifyReaderOnError(cause); FutureUtils.completeExceptionally(promise, cause); return; } scheduler.schedule(new Runnable() { @Override public void run() { asyncStartFetchLogSegments(promise); } }, conf.getZKRetryBackoffMaxMillis(), TimeUnit.MILLISECONDS); }
/** * Create ledger async and schedule a timeout task to check ledger-creation is complete else it fails the callback * with TimeoutException. * * @param bookKeeper * @param config * @param digestType * @param cb * @param emptyMap */ protected void asyncCreateLedger(BookKeeper bookKeeper, ManagedLedgerConfig config, DigestType digestType, CreateCallback cb, Map<Object, Object> emptyMap) { AtomicBoolean ledgerCreated = new AtomicBoolean(false); bookKeeper.asyncCreateLedger(config.getEnsembleSize(), config.getWriteQuorumSize(), config.getAckQuorumSize(), digestType, config.getPassword(), cb, ledgerCreated, Collections.emptyMap()); scheduledExecutor.schedule(() -> { if (!ledgerCreated.get()) { ledgerCreated.set(true); cb.createComplete(BKException.Code.TimeoutException, null, null); } }, config.getMetadataOperationsTimeoutSeconds(), TimeUnit.SECONDS); }
private void asyncDeleteLedger(final LedgerHandle lh, int retry) { if (lh == null || retry <= 0) { if (lh != null) { log.warn("[{}-{}] Failed to delete ledger after retries {}", ledger.getName(), name, lh.getId()); } return; } ledger.mbean.startCursorLedgerDeleteOp(); bookkeeper.asyncDeleteLedger(lh.getId(), (rc, ctx) -> { ledger.mbean.endCursorLedgerDeleteOp(); if (rc != BKException.Code.OK) { log.warn("[{}] Failed to delete ledger {}: {}", ledger.getName(), lh.getId(), BKException.getMessage(rc)); if (rc != BKException.Code.NoSuchLedgerExistsException) { ledger.getScheduledExecutor().schedule(safeRun(() -> { asyncDeleteLedger(lh, retry - 1); }), DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS); } return; } else { log.info("[{}][{}] Successfully closed & deleted ledger {} in cursor", ledger.getName(), name, lh.getId()); } }, null); }