@Override public boolean run(ClientContext context) { try { Metadata metadata = storage.encodeMetadata(); reportMetadata(metadata); if(ctx.getCHKOnly) onSucceeded(metadata); } catch (IOException e) { storage.fail(new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null)); } catch (MissingKeyException e) { storage.fail(new InsertException(InsertExceptionMode.BUCKET_ERROR, "Lost one or more keys", e, null)); } return false; }
Metadata metadata = encodeMetadata(); synchronized(this) { status = Status.SUCCEEDED;
cb.waitForHasKeys(); executor.waitForIdle(); Metadata metadata = storage.encodeMetadata(); assertTrue(storage.getStatus() == Status.ENCODED); Bucket mBucket1 = bigBucketFactory.makeBucket(-1); memoryLimitedJobRunner, jobRunner, ticker, keys, fg, persistentFileTracker, null); Metadata metadata2 = storage.encodeMetadata(); Bucket mBucket2 = bigBucketFactory.makeBucket(-1); os = new DataOutputStream(mBucket2.getOutputStream());
public void testSmallSplitfileHasKeys() throws IOException, InsertException, MissingKeyException { Random r = new Random(12121); long size = 65536; // Exact multiple, so no last block LockableRandomAccessBuffer data = generateData(r, size); HashResult[] hashes = getHashes(data); MyCallback cb = new MyCallback(); InsertContext context = baseContext.clone(); context.earlyEncode = true; KeysFetchingLocally keys = new MyKeysFetchingLocally(); SplitFileInserterStorage storage = new SplitFileInserterStorage(data, size, cb, null, new ClientMetadata(), false, null, smallRAFFactory, false, context, cryptoAlgorithm, cryptoKey, null, hashes, smallBucketFactory, checker, r, memoryLimitedJobRunner, jobRunner, ticker, keys, false, 0, 0, 0, 0); storage.start(); cb.waitForFinishedEncode(); assertEquals(storage.segments.length, 1); assertEquals(storage.segments[0].dataBlockCount, 2); assertEquals(storage.segments[0].checkBlockCount, 3); assertEquals(storage.segments[0].crossCheckBlockCount, 0); cb.waitForHasKeys(); for(int i=0;i<storage.segments[0].dataBlockCount+storage.segments[0].checkBlockCount+storage.segments[0].crossCheckBlockCount;i++) storage.segments[0].readKey(i); storage.encodeMetadata(); assertTrue(storage.getStatus() == Status.ENCODED); }
private void testEncodeAfterShutdownCrossSegment(long size) throws InsertException, IOException, MissingKeyException, StorageFormatException, ChecksumFailedException, ResumeFailedException, MetadataUnresolvedException { Random r = new Random(12121); LockableRandomAccessBuffer data = generateData(r, size); HashResult[] hashes = getHashes(data); MyCallback cb = new MyCallback(); MyKeysFetchingLocally keys = new MyKeysFetchingLocally(); SplitFileInserterStorage storage = new SplitFileInserterStorage(data, size, cb, null, new ClientMetadata(), false, null, smallRAFFactory, true, baseContext.clone(), cryptoAlgorithm, cryptoKey, null, hashes, smallBucketFactory, checker, r, memoryLimitedJobRunner, jobRunner, ticker, keys, false, 0, 0, 0, 0); executor.waitForIdle(); // Has not encoded anything. for(SplitFileInserterSegmentStorage segment : storage.segments) assert(!segment.isFinishedEncoding()); SplitFileInserterStorage resumed = new SplitFileInserterStorage(storage.getRAF(), data, cb, r, memoryLimitedJobRunner, jobRunner, ticker, keys, fg, persistentFileTracker, null); resumed.start(); cb.waitForFinishedEncode(); cb.waitForHasKeys(); executor.waitForIdle(); resumed.encodeMetadata(); assertTrue(resumed.getStatus() == Status.ENCODED); resumed.originalData.free(); resumed.getRAF().free(); }
Metadata metadata = storage.encodeMetadata(); assertTrue(storage.getStatus() == Status.ENCODED);
Metadata metadata = storage.encodeMetadata(); assertTrue(storage.getStatus() == Status.ENCODED);
Metadata metadata = storage.encodeMetadata();
cb.waitForHasKeys(); executor.waitForIdle(); resumed.encodeMetadata(); assertTrue(resumed.getStatus() == Status.ENCODED); resumed.originalData.free();