private void failDiskOffThread(final IOException e) { parent.jobRunner.queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { parent.failOnDiskError(e); return true; } }); }
@Override public void run() { jobRunner.queueNormalOrDrop(writeMetadataJob); }
private void failOffThread(final FetchException e) { parent.jobRunner.queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { parent.fail(e); return true; } }); }
public void failedBlock() { jobRunner.queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { fetcher.onFailedBlock(); return false; } }); }
public void restartedAfterDataCorruption(boolean wasCorrupt) { jobRunner.queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { maybeClearCooldown(); fetcher.restartedAfterDataCorruption(); return false; } }); }
@Override public void run() { jobRunner.queueNormalOrDrop(writeMetadataJob); }
/** Called on a normal non-truncation completion. Frees the storage file off-thread. */ private void closeOffThread() { jobRunner.queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { // ATOMICITY/DURABILITY: This will run after the checkpoint after completion. // So after restart, even if the checkpoint failed, we will be in a valid state. // This is why this is queue() not queueInternal(). close(); return true; } }); }
private void callSuccessOffThread() { jobRunner.queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { synchronized(SplitFileFetcherStorage.this) { // Race conditions are possible, make sure we only call it once. if(succeeded) return false; succeeded = true; } fetcher.onSuccess(); return true; } }); }
public void failOnDiskError(final IOException e) { Logger.error(this, "Failing on disk error: "+e, e); jobRunner.queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { fetcher.failOnDiskError(e); return true; } }); }
public void failOnDiskError(final ChecksumFailedException e) { Logger.error(this, "Failing on unrecoverable corrupt data: "+e, e); jobRunner.queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { fetcher.failOnDiskError(e); return true; } }); }
public synchronized void lazyWriteMetadata() { if(!persistent) return; if(LAZY_WRITE_METADATA_DELAY != 0) { // The Runnable must be the same object for de-duplication. ticker.queueTimedJob(wrapLazyWriteMetadata, "Write metadata for splitfile", LAZY_WRITE_METADATA_DELAY, false, true); } else { // Must still be off-thread, multiple segments, possible locking issues... jobRunner.queueNormalOrDrop(writeMetadataJob); } }
public void lazyWriteMetadata() { if(!persistent) return; if(LAZY_WRITE_METADATA_DELAY != 0) { // The Runnable must be the same object for de-duplication. ticker.queueTimedJob(wrapLazyWriteMetadata, "Write metadata for splitfile", LAZY_WRITE_METADATA_DELAY, false, true); } else { // Must still be off-thread, multiple segments, possible locking issues... jobRunner.queueNormalOrDrop(writeMetadataJob); } }
/** Fail the request, off-thread. The callback will call cancel etc, so it won't immediately * shut down the storage. * @param e */ public void fail(final FetchException e) { if(logMINOR) Logger.minor(this, "Failing "+this+" with error "+e+" and codes "+errors); jobRunner.queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { fetcher.fail(e); return true; } }); }
@Override public void onFailure(final LowLevelPutException e, ClientContext context) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { try { ((SendableInsert) request).onFailure(e, token, context); } finally { sched.removeRunningInsert((SendableInsert)(request), token.getKey()); // Something might be waiting for a request to complete (e.g. if we have two requests for the same key), // so wake the starter thread. } sched.wakeStarter(); return false; } }); }
/** Notify clients by calling innerNotifyClients off-thread. */ public final void notifyClients(ClientContext context) { context.getJobRunner(persistent()).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { innerNotifyClients(context); return false; } }); }
@Override public void onSplitfileCompatibilityMode(final CompatibilityMode min, final CompatibilityMode max, final byte[] customSplitfileKey, final boolean dontCompress, final boolean bottomLayer, final boolean definitiveAnyway, ClientContext context) { context.getJobRunner(persistent()).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { ctx.eventProducer.produceEvent(new SplitfileCompatibilityModeEvent(min, max, customSplitfileKey, dontCompress, bottomLayer || definitiveAnyway), context); return false; } }); }
@Override public void clearWakeupTime(ClientContext context) { super.clearWakeupTime(context); if(this.parent instanceof WantsCooldownCallback) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { ((WantsCooldownCallback)parent).clearCooldown(getClientGetState()); return false; } }); } }
/** Called when we have some idea of the length of the final data */ @Override public void onExpectedSize(final long size, ClientContext context) { if(finalizedMetadata) return; if(finalBlocksRequired != 0) return; expectedSize = size; context.getJobRunner(persistent()).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { ctx.eventProducer.produceEvent(new ExpectedFileSizeEvent(size), context); return false; } }); }
@Override public boolean reduceWakeupTime(final long wakeupTime, ClientContext context) { boolean ret = super.reduceWakeupTime(wakeupTime, context); if(this.parent instanceof WantsCooldownCallback) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { ((WantsCooldownCallback)parent).enterCooldown(getClientGetState(), wakeupTime, context); return false; } }); } return ret; }
/** * Notify clients that some part of the request has been sent to the network i.e. we have finished * checking the datastore for at least some part of the request. Sent once only for any given request. */ @Override protected void innerToNetwork(ClientContext context) { context.getJobRunner(persistent()).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { ctx.eventProducer.produceEvent(new SendingToNetworkEvent(), context); return false; } }); }