@Override public ClientGetter fetchFromMetadata(Bucket initialMetadata, ClientGetCallback callback, FetchContext fctx, short prio) throws FetchException { if(initialMetadata == null) throw new NullPointerException(); ClientGetter get = new ClientGetter(callback, FreenetURI.EMPTY_CHK_URI, fctx, prio, null, null, initialMetadata); try { core.clientContext.start(get); } catch (PersistenceDisabledException e) { // Impossible } return get; }
/** * Restart the request. * @param redirect Use this URI instead of the old one. * @param filterData * @param context The database. We must be on the database thread! See ClientContext for convenience * methods. * @return True if we successfully restarted, false if we can't restart. * @throws FetchException If something went wrong. */ public boolean restart(FreenetURI redirect, boolean filterData, ClientContext context) throws FetchException { ctx.filterData = filterData; return start(true, redirect, context); }
@Override public void maybeAddToBinaryBlob(ClientCHKBlock block) { if(parent instanceof ClientGetter) { ((ClientGetter)parent).addKeyToBinaryBlob(block, context); } }
/** * Start a request. Schedule a job on the database thread if it is persistent, otherwise start it * immediately. * @param getter The request to start. * @throws FetchException If the request is transient and failed to start. * @throws DatabaseDisabledException If the request is persistent and the database is disabled. */ public void start(final ClientGetter getter) throws FetchException, PersistenceDisabledException { if(getter.persistent()) { jobRunner.queue(new PersistentJob() { @Override public boolean run(ClientContext context) { try { getter.start(context); } catch (FetchException e) { getter.clientCallback.onFailure(e, getter); } return true; } }, NativeThread.NORM_PRIORITY); } else { getter.start(this); } }
if(logMINOR) Logger.minor(this, "Prefetching content for background fetch for edition "+l+" on "+key); FetchContext fctx = new FetchContext(realFetchContext, FetchContext.IDENTICAL_MASK); final ClientGetter get = new ClientGetter(new ClientGetCallback() { get.start(context); } catch (FetchException e) { if(logMINOR) Logger.minor(this, "Prefetch failed: "+e, e);
try { ClientGetter clientGetter = client.fetch(uri, 2097152, callback, fetchContext, RequestStarter.INTERACTIVE_PRIORITY_CLASS); clientGetter.setMetaSnoop(snoop); clientGetter.restart(uri, fetchContext.filterData, node.clientCore.clientContext); } catch (FetchException fe) {
FetchContext context = client.getFetchContext(); FetchWaiter fw = new FetchWaiter((RequestClient)client); ClientGetter get = new ClientGetter(fw, uri, context, RequestStarter.INTERACTIVE_PRIORITY_CLASS, null, null, null); get.setMetaSnoop(new DumperSnoopMetadata()); get.start(n.clientCore.clientContext); FetchResult result = fw.waitForCompletion(); ClientMetadata cm = result.getMetadata();
!(revocationGetter.isCancelled() || revocationGetter.isFinished())) { if(logMINOR) Logger.minor(this, "Not queueing another revocation fetcher yet, old one still running"); reset = false; if(revocationGetter != null && logMINOR) Logger.minor(this, "revocation fetcher: cancelled="+revocationGetter.isCancelled()+", finished="+revocationGetter.isFinished()); cg = revocationGetter = new ClientGetter(this, manager.getRevocationURI(), ctxRevocation, aggressive ? RequestStarter.MAXIMUM_PRIORITY_CLASS : RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS, toCancel.cancel(core.clientContext); if(cg != null) { core.clientContext.start(cg);
Logger.minor(this, "Starting "+this+" persistent="+persistent()+" for "+uri); try { synchronized(this) { if(restart) clearCountersOnRestart(); if(overrideURI != null) uri = overrideURI; if(finished) { finalBlocksRequired = 0; finalBlocksTotal = 0; resetBlocks(); currentState = SingleFileFetcher.create(this, this, uri, ctx, actx, ctx.maxNonSplitfileRetries, 0, false, -1, true, expectedMIME = overrideMIME; if(cancelled) cancel(); currentState.schedule(context); if(cancelled) cancel(); } catch (MalformedURLException e) { throw new FetchException(FetchExceptionMode.INVALID_URI, e);
if((cg == null) || cg.isCancelled()) { if(logMINOR) Logger.minor(this, "Scheduling request for " + URI.setSuggestedEdition(availableVersion)); FreenetURI uri = URI.setSuggestedEdition(availableVersion); uri = uri.sskForUSK(); cg = new ClientGetter(this, uri, ctx, RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS, null, new BinaryBlobWriter(new FileBucket(tempBlobFile, false, false, false, false)), null); cancelled.cancel(core.clientContext);
private void verifySnoopCancelsRequestForMimeType(String mimeType, boolean cancel) { when(backgroundFetchCallback.shouldCancel(eq(uri), eq(mimeType), anyLong())).thenReturn(cancel); freenetInterface.startFetch(uri, backgroundFetchCallback); ArgumentCaptor<SnoopMetadata> snoopMetadata = forClass(SnoopMetadata.class); verify(clientGetter).setMetaSnoop(snoopMetadata.capture()); Metadata metadata = mock(Metadata.class); when(metadata.getMIMEType()).thenReturn(mimeType); assertThat(snoopMetadata.getValue().snoopMetadata(metadata, mock(ClientContext.class)), is(cancel)); }
context.uskManager.checkUSK(uri, persistent(), false); try { if (binaryBlobWriter != null && !dontFinalizeBlobWriter) binaryBlobWriter.finalizeBucket(); } catch (IOException ioe) { onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to close binary blob stream: "+ioe), null, context); return; } catch (BinaryBlobAlreadyClosedException e) { onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to close binary blob stream, already closed: "+e, e), null, context); return; onFailure(new FetchException(FetchExceptionMode.MIME_INCOMPATIBLE_WITH_EXTENSION, "No MIME type but need specific extension \""+forceCompatibleExtension+"\""), null, context); return; checkCompatibleExtension(mimeType); } catch (FetchException e) { onFailure(e, null, context); return; if(returnBucket == null) finalResult = context.getBucketFactory(persistent()).makeBucket(maxLen); else finalResult = returnBucket; if(logMINOR) Logger.minor(this, "Writing final data to "+finalResult+" return bucket is "+returnBucket); onFailure(ex, state, context, true); if(finalResult != null && finalResult != returnBucket) { finalResult.free(); context.getJobRunner(persistent()).setCheckpointASAP(); clientCallback.onSuccess(result, ClientGetter.this);
if(getter.restart(redirect, fctx.filterData, context)) { synchronized(this) { if(redirect != null) {
public void stop() { synchronized(this) { if(fetched) return; } cg.cancel(context); }
checkCompatibleExtension(mime); context.getJobRunner(persistent()).queueNormalOrDrop(new PersistentJob() {
/** * Called when no more blocks will be added to the total, and therefore we can confidently display a * percentage for the overall progress. Will notify clients with a SplitfileProgressEvent. */ @Override public void onBlockSetFinished(ClientGetState state, ClientContext context) { if(logMINOR) Logger.minor(this, "Set finished", new Exception("debug")); blockSetFinalized(context); }
@Override public boolean canRestart() { if(!finished) { Logger.minor(this, "Cannot restart because not finished for "+identifier); return false; } if(succeeded) { Logger.minor(this, "Cannot restart because succeeded for "+identifier); return false; } return getter.canRestart(); }
/** * A non-authoritative hint that a specific edition *might* exist. At the moment, * we just fetch the block. We do not fetch the contents, and it is possible that * USKFetcher's are also fetching the block. FIXME would it be more efficient to * pass it along to a USKFetcher? * @param usk * @param edition * @param context */ public void hintUpdate(USK usk, long edition, ClientContext context) { if(edition < lookupLatestSlot(usk)) return; FreenetURI uri = usk.copy(edition).getURI().sskForUSK(); final ClientGetter get = new ClientGetter(new NullClientCallback(rcBulk), uri, new FetchContext(backgroundFetchContext, FetchContext.IDENTICAL_MASK), RequestStarter.UPDATE_PRIORITY_CLASS, new NullBucket(), null, null); try { get.start(context); } catch (FetchException e) { // Ignore } }
@Test public void backgroundFetchRegistersSnoopAndRestartsTheRequest() throws Exception { freenetInterface.startFetch(uri, backgroundFetchCallback); verify(clientGetter).setMetaSnoop(any(SnoopMetadata.class)); verify(clientGetter).restart(eq(uri), anyBoolean(), any(ClientContext.class)); }
e.expectedSize = expectedSize; context.getJobRunner(persistent()).setCheckpointASAP(); else { try { start(context); } catch (FetchException e1) { e = e1;