@Override public void onFailure(final LowLevelPutException e, ClientContext context) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { try { ((SendableInsert) request).onFailure(e, token, context); } finally { sched.removeRunningInsert((SendableInsert)(request), token.getKey()); // Something might be waiting for a request to complete (e.g. if we have two requests for the same key), // so wake the starter thread. } sched.wakeStarter(); return false; } }); }
@Override public void onFetchSuccess(ClientContext context) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { try { sched.succeeded((SendableGet)request, false); } finally { sched.removeFetchingKey(key); } // Something might be waiting for a request to complete (e.g. if we have two requests for the same key), // so wake the starter thread. sched.wakeStarter(); return false; } }); }
@Override public void onFailed(final InsertException e) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { unregisterSender(); raf.close(); raf.free(); originalData.close(); if(freeData) originalData.free(); cb.onFailure(e, SplitFileInserter.this, context); return true; } }); }
@Override public void onInsertSuccess(final ClientKey key, ClientContext context) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { try { ((SendableInsert) request).onSuccess(token, key, context); } finally { sched.removeRunningInsert((SendableInsert)(request), token.getKey()); } // Something might be waiting for a request to complete (e.g. if we have two requests for the same key), // so wake the starter thread. sched.wakeStarter(); return false; } }); }
@Override public void onFailure(final LowLevelGetException e, ClientContext context) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { try { ((SendableGet) request).onFailure(e, token, context); } finally { sched.removeFetchingKey(key); } // Something might be waiting for a request to complete (e.g. if we have two requests for the same key), // so wake the starter thread. sched.wakeStarter(); return false; } }); }
@Override public void onSucceeded(final Metadata metadata) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { if(logMINOR) Logger.minor(this, "Succeeding on "+SplitFileInserter.this); unregisterSender(); if(!(ctx.earlyEncode || ctx.getCHKOnly)) { reportMetadata(metadata); } cb.onSuccess(SplitFileInserter.this, context); raf.close(); raf.free(); originalData.close(); if(freeData) originalData.free(); return true; } }); }
@Override public void onHasKeys() { if(ctx.earlyEncode || ctx.getCHKOnly) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { try { Metadata metadata = storage.encodeMetadata(); reportMetadata(metadata); if(ctx.getCHKOnly) onSucceeded(metadata); } catch (IOException e) { storage.fail(new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null)); } catch (MissingKeyException e) { storage.fail(new InsertException(InsertExceptionMode.BUCKET_ERROR, "Lost one or more keys", e, null)); } return false; } }); } }
/** Notify clients by calling innerNotifyClients off-thread. */ public final void notifyClients(ClientContext context) { context.getJobRunner(persistent()).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { innerNotifyClients(context); return false; } }); }
@Override public void onSplitfileCompatibilityMode(final CompatibilityMode min, final CompatibilityMode max, final byte[] customSplitfileKey, final boolean dontCompress, final boolean bottomLayer, final boolean definitiveAnyway, ClientContext context) { context.getJobRunner(persistent()).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { ctx.eventProducer.produceEvent(new SplitfileCompatibilityModeEvent(min, max, customSplitfileKey, dontCompress, bottomLayer || definitiveAnyway), context); return false; } }); }
@Override public void clearWakeupTime(ClientContext context) { super.clearWakeupTime(context); if(this.parent instanceof WantsCooldownCallback) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { ((WantsCooldownCallback)parent).clearCooldown(getClientGetState()); return false; } }); } }
/** Called when we have some idea of the length of the final data */ @Override public void onExpectedSize(final long size, ClientContext context) { if(finalizedMetadata) return; if(finalBlocksRequired != 0) return; expectedSize = size; context.getJobRunner(persistent()).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { ctx.eventProducer.produceEvent(new ExpectedFileSizeEvent(size), context); return false; } }); }
/** * Notify clients that some part of the request has been sent to the network i.e. we have finished * checking the datastore for at least some part of the request. Sent once only for any given request. */ @Override protected void innerToNetwork(ClientContext context) { context.getJobRunner(persistent()).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { ctx.eventProducer.produceEvent(new SendingToNetworkEvent(), context); return false; } }); }
@Override public boolean reduceWakeupTime(final long wakeupTime, ClientContext context) { boolean ret = super.reduceWakeupTime(wakeupTime, context); if(this.parent instanceof WantsCooldownCallback) { context.getJobRunner(persistent).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { ((WantsCooldownCallback)parent).enterCooldown(getClientGetState(), wakeupTime, context); return false; } }); } return ret; }
@Override public void onHashes(HashResult[] hashes, ClientContext context) { synchronized(this) { if(this.hashes != null) { if(!HashResult.strictEquals(hashes, this.hashes)) Logger.error(this, "Two sets of hashes?!"); return; } this.hashes = hashes; } HashResult[] clientHashes = hashes; if(persistent()) clientHashes = HashResult.copy(hashes); final HashResult[] h = clientHashes; context.getJobRunner(persistent()).queueNormalOrDrop(new PersistentJob() { @Override public boolean run(ClientContext context) { ctx.eventProducer.produceEvent(new ExpectedHashesEvent(h), context); return false; } }); }
checkCompatibleExtension(mime); context.getJobRunner(persistent()).queueNormalOrDrop(new PersistentJob() {
CHKBlock block = clientBlock.getBlock(); final ClientCHK key = clientBlock.getClientKey(); context.getJobRunner(request.isPersistent()).queueNormalOrDrop(new PersistentJob() { return true; } catch (final IOException e) { context.getJobRunner(request.isPersistent()).queueNormalOrDrop(new PersistentJob() {
context.getJobRunner(persistent).queueInternal(new PersistentJob() {
context.getJobRunner(persistent), context.ticker, context.getChkInsertScheduler(realTime).fetchingKeys(), topDontCompress, topRequiredBlocks, topTotalBlocks, origDataSize, origCompressedDataSize);
} else { context.getJobRunner(persistent()).queueInternal(new PersistentJob() {
originalData.onResume(context); this.storage = new SplitFileInserterStorage(raf, originalData, this, context.fastWeakRandom, context.memoryLimitedJobRunner, context.getJobRunner(true), context.ticker, context.getChkInsertScheduler(realTime).fetchingKeys(), context.persistentFG, context.persistentFileTracker, context.getPersistentMasterSecret());