@NotNull @Override public CompositeData cancelActiveCollection() { if (!gcOp.isDone()) { executor.execute(newManagementOperation(OP_NAME, (Callable<Void>) () -> { gcOp.cancel(false); activeDeletedBlobCollector.cancelBlobCollection(); return null; })); return initiated(gcOp, "Active lucene index blobs collection cancelled").toCompositeData(); } else { return failed(OP_NAME + " not running").toCompositeData(); } }
@NotNull @Override public CompositeData cancelActiveCollection() { if (!gcOp.isDone()) { executor.execute(newManagementOperation(OP_NAME, (Callable<Void>) () -> { gcOp.cancel(false); activeDeletedBlobCollector.cancelBlobCollection(); return null; })); return initiated(gcOp, "Active lucene index blobs collection cancelled").toCompositeData(); } else { return failed(OP_NAME + " not running").toCompositeData(); } }
@Test public void resumeCancelledPurge() throws Exception { BlobDeletionCallback bdc = adbc.getBlobDeletionCallback(); for (int i = 0; i < 10; i++) { String id = "Blob" + i; bdc.deleted(id, Collections.singleton(id)); } bdc.commitProgress(COMMIT_SUCCEDED); Semaphore purgeBlocker = new Semaphore(0); blobStore.callback = () -> purgeBlocker.acquireUninterruptibly(); Thread purgeThread = new Thread(() -> { try { adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore); } catch (InterruptedException e) { e.printStackTrace(); } }); purgeThread.setDaemon(true); purgeBlocker.release(10);//allow 5 deletes purgeThread.start(); waitFor(5000, () -> blobStore.deletedChunkIds.size() >= 10); adbc.cancelBlobCollection(); purgeBlocker.release(22);//release all that's there... this is more than needed, btw. waitFor(5000, () -> blobStore.deletedChunkIds.size() >= 12); waitFor(5000, () -> !purgeThread.isAlive()); adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore); // Resume can re-attempt to delete already deleted blobs. Hence, the need for for ">=" assertEquals("All blobs must get deleted", 20, blobStore.deletedChunkIds.size()); }
@Test public void cancellablePurge() throws Exception { BlobDeletionCallback bdc = adbc.getBlobDeletionCallback(); for (int i = 0; i < 10; i++) { String id = "Blob" + i; bdc.deleted(id, Collections.singleton(id)); } bdc.commitProgress(COMMIT_SUCCEDED); Semaphore purgeBlocker = new Semaphore(0); blobStore.callback = () -> purgeBlocker.acquireUninterruptibly(); Thread purgeThread = new Thread(() -> { try { adbc.purgeBlobsDeleted(clock.getTimeIncreasing(), blobStore); } catch (InterruptedException e) { e.printStackTrace(); } }); purgeThread.setDaemon(true); purgeBlocker.release(10);//allow 5 deletes purgeThread.start(); boolean deleted5 = waitFor(5000, () -> blobStore.deletedChunkIds.size() >= 10); assertTrue("Deleted " + blobStore.deletedChunkIds.size() + " chunks", deleted5); adbc.cancelBlobCollection(); purgeBlocker.release(20);//release all that's there... this is more than needed, btw. boolean deleted6 = waitFor(5000, () -> blobStore.deletedChunkIds.size() >= 12); assertTrue("Haven't deleted another blob which was locked earlier.", deleted6); boolean cancelWorked = waitFor(5000, () -> !purgeThread.isAlive()); assertTrue("Cancel didn't let go of purge thread in 2 seconds", cancelWorked); assertTrue("Cancelling purge must return asap", blobStore.deletedChunkIds.size() == 12); }
@Override public void cancelBlobCollection() { ActiveDeletedBlobCollectorFactory.NOOP.cancelBlobCollection(); }
@Override public void cancelBlobCollection() { delegate.cancelBlobCollection(); }