@Override /** Choose a random key to fetch. Must not modify anything that is persisted. */ public SendableRequestItem chooseKey(KeysFetchingLocally keys, ClientContext context) { return storage.chooseRandomKey(); }
@Override public boolean run(ClientContext context) { // ATOMICITY/DURABILITY: This will run after the checkpoint after completion. // So after restart, even if the checkpoint failed, we will be in a valid state. // This is why this is queue() not queueInternal(). close(); return true; }
storage.finishedFetcher(); return; cb.onSuccess(storage.streamGenerator(), storage.clientMetadata, storage.decompressors, this, context); storage.finishedFetcher();
private void maybeComplete() { if(allSucceeded()) { callSuccessOffThread(); } else if(allFinished() && !allSucceeded()) { // Some failed. fail(new FetchException(FetchExceptionMode.SPLITFILE_ERROR, errors)); } }
/** Called when a cross-segment has finished decoding. It doesn't necessarily have a "finished" * state, except if it was cancelled. */ void finishedEncoding(SplitFileFetcherCrossSegmentStorage segment) { if(logMINOR) Logger.minor(this, "Successfully decoded "+segment+" for "+this+" for "+fetcher); if(!allFinished()) return; finishedEncoding(); }
if(allFinished() && !allSucceeded()) { fail(new FetchException(FetchExceptionMode.SPLITFILE_ERROR, errors)); } else { if(completeViaTruncation) raf.close(); maybeComplete(); return; closeOffThread();
SplitFileFetcherStorage fetcherStorage = new SplitFileFetcherStorage(m1, fcb, new ArrayList<COMPRESSOR_TYPE>(), new ClientMetadata(), false, cmode, fctx, false, salt, URI, URI, true, new byte[0], r, smallBucketFactory, smallRAFFactory, jobRunner, ticker, memoryLimitedJobRunner, checker, false, null, null, keysFetching); fetcherStorage.start(false); fetcherStorage.finishedFetcher(); fcb.waitForFree();
this.offsetSegmentStatus = offsetKeyList + storedKeysLength; byte[] generalProgress = encodeGeneralProgress(); for(int j=0;j<segLen;j++) { allocateCrossDataBlock(seg, crossSegmentRandom); allocateCrossCheckBlock(seg, crossSegmentRandom); OutputStream cos = checksumOutputStream(os); BufferedOutputStream bos = new BufferedOutputStream(cos); try { offsetOriginalDetails = offsetOriginalMetadata + metadataLength; encodedURI = encodeAndChecksumOriginalDetails(thisKey, origKey, clientDetails, isFinalFetch); this.offsetBasicSettings = offsetOriginalDetails + encodedURI.length; encodeBasicSettings(splitfileDataBlocks, splitfileCheckBlocks, crossCheckBlocks * segments.length); totalLength =
KeySalter salter = getSalter(); raf.onResume(context); this.storage = new SplitFileFetcherStorage(raf, realTimeFlag, this, blockFetchContext, context.random, context.jobRunner, context.getChkFetchScheduler(realTimeFlag).fetchingKeys(), context.ticker, if (storage.start(resumed)) { getter.schedule(context, storage.hasCheckedStore());
private void testDataBlocksOnly(TestSplitfile test) throws IOException, CHKEncodeException, FetchException, MetadataParseException { StorageCallback cb = test.createStorageCallback(); SplitFileFetcherStorage storage = test.createStorage(cb); SplitFileFetcherSegmentStorage segment = storage.segments[0]; for(int i=0;i<test.checkBlocks.length;i++) { segment.onNonFatalFailure(test.dataBlocks.length+i); } for(int i=0;i<test.dataBlocks.length;i++) { assertFalse(segment.hasStartedDecode()); assertTrue(segment.onGotKey(test.dataKeys[i].getNodeCHK(), test.encodeDataBlock(i))); cb.markDownloadedBlock(i); } cb.checkFailed(); assertTrue(segment.hasStartedDecode()); cb.checkFailed(); waitForDecode(segment); cb.checkFailed(); cb.waitForFinished(); cb.checkFailed(); test.verifyOutput(storage); cb.checkFailed(); storage.finishedFetcher(); cb.checkFailed(); waitForFinished(segment); cb.checkFailed(); cb.waitForFree(storage); cb.checkFailed(); }
storage = test.createStorage(cb, ctx, cb.getRAF()); assertTrue(i != 2); storage.start(false); } catch (FetchException e) { if(i != 2) throw e; // Already failed on the final iteration, otherwise is an error. assertEquals(storage.chooseRandomKey(), null); cb.waitForFailed();
storage = test.createStorage(cb, test.makeFetchContext(), cb.getRAF()); segment = storage.segments[0]; storage.start(false); test.verifyOutput(storage); cb.checkFailed(); storage.finishedFetcher(); cb.checkFailed(); waitForFinished(segment);
storage = new SplitFileFetcherStorage(metadata, this, decompressors, clientMetadata, topDontCompress, topCompatibilityMode, fetchContext, realTimeFlag, getSalter(), thisKey, parent.getURI(), isFinalFetch, parent.getClientDetail(checker), throw new FetchException(FetchExceptionMode.TOO_BIG, eventualLength, true, clientMetadata.getMIMEType()); getter = new SplitFileFetcherGet(this, storage); raf = storage.getRAF(); if(logMINOR) Logger.minor(this, "Created "+(persistent?"persistent" : "transient")+" download for "+
public void verifyOutput(SplitFileFetcherStorage storage) throws IOException { StreamGenerator g = storage.streamGenerator(); Bucket out = bf.makeBucket(-1); OutputStream os = out.getOutputStream(); g.writeTo(os, null); os.close(); assertTrue(BucketTools.equalBuckets(originalData, out)); out.free(); }
/** Restore a splitfile fetcher from a file. * @throws StorageFormatException * @throws IOException * @throws FetchException */ public SplitFileFetcherStorage createStorage(StorageCallback cb, FetchContext ctx, LockableRandomAccessBuffer raf) throws IOException, StorageFormatException, FetchException { assertTrue(persistent); return new SplitFileFetcherStorage(raf, false, cb, ctx, random, jobRunner, fetchingKeys, ticker, memoryLimitedJobRunner, new CRCChecksumChecker(), false, null, false, false); }
@Override public void schedule(ClientContext context) { if(storage.start(false)) getter.schedule(context, false); }
SplitFileFetcherStorage fetcherStorage = new SplitFileFetcherStorage(m1, fcb, new ArrayList<COMPRESSOR_TYPE>(), new ClientMetadata(), false, cmode, fctx, false, salt, URI, URI, true, new byte[0], r, smallBucketFactory, smallRAFFactory, jobRunner, ticker, memoryLimitedJobRunner, checker, false, null, null, keysFetching); fetcherStorage.start(false); fetcherStorage.finishedFetcher(); fcb.waitForFree();
test.verifyOutput(storage); cb.checkFailed(); storage.finishedFetcher(); cb.checkFailed(); waitForFinished(segment);
/** Called when a segment has finished encoding. It is possible that it has simply restarted; * it is not guaranteed to have encoded all blocks etc. But we still need the callback in case * e.g. we are in the process of failing, and can't proceed until all the encode jobs have * finished. */ void finishedEncoding(SplitFileFetcherSegmentStorage segment) { if(logMINOR) Logger.minor(this, "Successfully decoded "+segment+" for "+this+" for "+fetcher); if(!allFinished()) return; finishedEncoding(); }
private void verifyOutput(SplitFileFetcherStorage storage, Bucket originalData) throws IOException { StreamGenerator g = storage.streamGenerator(); Bucket out = smallBucketFactory.makeBucket(-1); OutputStream os = out.getOutputStream(); g.writeTo(os, null); os.close(); assertTrue(BucketTools.equalBuckets(originalData, out)); out.free(); }