public BlobIdTracker(String path, String repositoryId, ScheduledExecutorService scheduler, long snapshotDelaySecs, long snapshotIntervalSecs, SharedDataStore datastore) throws IOException { String root = concat(path, datastoreMeta); this.rootDir = new File(root); this.datastore = datastore; this.scheduler = scheduler; this.snapshotInterval = SECONDS.toMillis(snapshotIntervalSecs); try { forceMkdir(rootDir); prefix = fileNamePrefix + "-" + repositoryId; this.store = new BlobIdStore(rootDir, prefix); scheduler.scheduleAtFixedRate(new SnapshotJob(), SECONDS.toMillis(snapshotDelaySecs), SECONDS.toMillis(snapshotIntervalSecs), MILLISECONDS); this.deleteTracker = new ActiveDeletionTracker(rootDir, prefix); } catch (IOException e) { LOG.error("Error initializing blob tracker", e); close(); throw e; } }
@Override public void run() { if (!skip()) { try { snapshot(); LOG.info("Finished taking snapshot"); } catch(Exception e){ LOG.warn("Failure in taking snapshot", e); } } else { LOG.info("Skipping scheduled snapshot as it last executed within {} seconds", MILLISECONDS.toSeconds(interval)); } }
cluster1.nodeStore.runBackgroundOperations(); ScheduledFuture<?> scheduledFuture1 = newSingleThreadScheduledExecutor() .schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS); scheduledFuture1.get(); .schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS); scheduledFuture2.get(); .schedule(tracker1.new SnapshotJob(), 0, MILLISECONDS); scheduledFuture1.get(); scheduledFuture2 = newSingleThreadScheduledExecutor() .schedule(tracker2.new SnapshotJob(), 0, MILLISECONDS); scheduledFuture2.get();
DataStoreState state = init(cluster.nodeStore, 0); ScheduledFuture<?> scheduledFuture = newSingleThreadScheduledExecutor() .schedule(tracker.new SnapshotJob(), 0, MILLISECONDS); scheduledFuture.get(); .schedule(tracker.new SnapshotJob(), 0, MILLISECONDS); scheduledFuture.get();
@Test public void snapshotRetrieveIgnored() throws Exception { LOG.info("In snapshotRetrieveIgnored"); System.setProperty("oak.datastore.skipTracker", "true"); // Close and open a new object to use the system property closer.close(); this.tracker = new BlobIdTracker(root.getAbsolutePath(), repoId, 100 * 60, dataStore); this.scheduler = newSingleThreadScheduledExecutor(); closer.register(tracker); closer.register(new ExecutorCloser(scheduler)); try { Set<String> initAdd = add(tracker, range(0, 10000)); ScheduledFuture<?> scheduledFuture = scheduler.schedule(tracker.new SnapshotJob(), 0, TimeUnit.MILLISECONDS); scheduledFuture.get(); assertEquals("References file not empty", 0, tracker.store.getBlobRecordsFile().length()); Set<String> retrieved = retrieveFile(tracker, folder); assertTrue(retrieved.isEmpty()); retrieved = retrieve(tracker); assertTrue(retrieved.isEmpty()); } finally { //reset the skip tracker system prop System.clearProperty("oak.datastore.skipTracker"); } }
@Test public void gcColdStart() throws Exception { Cluster cluster = new Cluster("cluster1"); BlobStore s = cluster.blobStore; BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker(); DataStoreState state = init(cluster.nodeStore, 0); // No blobs should be found now as snapshot not done assertNotEquals(state.blobsAdded, retrieveTracked(tracker)); cluster.gc.collectGarbage(false); Set<String> existingAfterGC = iterate(s); // Check the state of the blob store after gc assertEquals(state.blobsPresent, existingAfterGC); // Tracked blobs should reflect deletions after gc assertEquals(state.blobsPresent, retrieveTracked(tracker)); // Create a snapshot ScheduledFuture<?> scheduledFuture = newSingleThreadScheduledExecutor() .schedule(tracker.new SnapshotJob(), 0, MILLISECONDS); scheduledFuture.get(); // Tracked blobs should reflect deletions after gc and the deleted should not get resurrected assertEquals(state.blobsPresent, retrieveTracked(tracker)); }
public BlobIdTracker(String path, String repositoryId, ScheduledExecutorService scheduler, long snapshotDelaySecs, long snapshotIntervalSecs, SharedDataStore datastore) throws IOException { String root = concat(path, datastoreMeta); this.rootDir = new File(root); this.datastore = datastore; this.scheduler = scheduler; this.snapshotInterval = SECONDS.toMillis(snapshotIntervalSecs); try { forceMkdir(rootDir); prefix = fileNamePrefix + "-" + repositoryId; this.store = new BlobIdStore(rootDir, prefix); scheduler.scheduleAtFixedRate(new SnapshotJob(), SECONDS.toMillis(snapshotDelaySecs), SECONDS.toMillis(snapshotIntervalSecs), MILLISECONDS); this.deleteTracker = new ActiveDeletionTracker(rootDir, prefix); } catch (IOException e) { LOG.error("Error initializing blob tracker", e); close(); throw e; } }
@Test public void externalAddOffline() throws Exception { LOG.info("In externalAddOffline"); // Close and open a new object to use the system property closer.close(); root = folder.newFolder(); File blobIdRoot = new File(root, "blobids"); blobIdRoot.mkdirs(); //Add file offline File offline = new File(blobIdRoot, "blob-offline123456.gen"); List<String> offlineLoad = range(0, 1000); FileIOUtils.writeStrings(offlineLoad.iterator(), offline, false); this.tracker = new BlobIdTracker(root.getAbsolutePath(), repoId, 100 * 60, dataStore); this.scheduler = newSingleThreadScheduledExecutor(); closer.register(tracker); closer.register(new ExecutorCloser(scheduler)); Set<String> initAdd = add(tracker, range(1001, 1005)); ScheduledFuture<?> scheduledFuture = scheduler.schedule(tracker.new SnapshotJob(), 0, TimeUnit.MILLISECONDS); scheduledFuture.get(); initAdd.addAll(offlineLoad); assertEquals(initAdd.size(), Iterators.size(tracker.get())); Set<String> retrieved = retrieve(tracker); assertEquals("Extra elements after add", initAdd, retrieved); assertTrue(read(dataStore.getAllMetadataRecords(BLOBREFERENCES.getType())).isEmpty()); }
@Test public void gcWithInlined() throws Exception { Cluster cluster = new Cluster("cluster1"); BlobStore s = cluster.blobStore; BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker(); DataStoreState state = init(cluster.nodeStore, 0); addInlined(cluster.nodeStore); ScheduledFuture<?> scheduledFuture = newSingleThreadScheduledExecutor() .schedule(tracker.new SnapshotJob(), 0, MILLISECONDS); scheduledFuture.get(); // All blobs added should be tracked now assertEquals(state.blobsAdded, retrieveTracked(tracker)); cluster.gc.collectGarbage(false); Set<String> existingAfterGC = iterate(s); // Check the state of the blob store after gc assertEquals(state.blobsPresent, existingAfterGC); // Tracked blobs should reflect deletions after gc assertEquals(state.blobsPresent, retrieveTracked(tracker)); }
@Test public void gc() throws Exception { Cluster cluster = new Cluster("cluster1"); BlobStore s = cluster.blobStore; BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker(); DataStoreState state = init(cluster.nodeStore, 0); ScheduledFuture<?> scheduledFuture = newSingleThreadScheduledExecutor() .schedule(tracker.new SnapshotJob(), 0, MILLISECONDS); scheduledFuture.get(); // All blobs added should be tracked now assertEquals(state.blobsAdded, retrieveTracked(tracker)); cluster.gc.collectGarbage(false); Set<String> existingAfterGC = iterate(s); // Check the state of the blob store after gc assertEquals(state.blobsPresent, existingAfterGC); // Tracked blobs should reflect deletions after gc assertEquals(state.blobsPresent, retrieveTracked(tracker)); }
public BlobIdTracker(String path, String repositoryId, ScheduledExecutorService scheduler, long snapshotDelaySecs, long snapshotIntervalSecs, SharedDataStore datastore) throws IOException { String root = concat(path, datastoreMeta); this.rootDir = new File(root); this.datastore = datastore; this.scheduler = scheduler; this.snapshotInterval = SECONDS.toMillis(snapshotIntervalSecs); try { forceMkdir(rootDir); prefix = fileNamePrefix + "-" + repositoryId; this.store = new BlobIdStore(rootDir, prefix); scheduler.scheduleAtFixedRate(new SnapshotJob(), SECONDS.toMillis(snapshotDelaySecs), SECONDS.toMillis(snapshotIntervalSecs), MILLISECONDS); this.deleteTracker = new ActiveDeletionTracker(rootDir, prefix); } catch (IOException e) { LOG.error("Error initializing blob tracker", e); close(); throw e; } }
@Test public void snapshotExecuteAfterRemove() throws Exception { LOG.info("In snapshotExecuteAfterRemove"); Clock clock = Clock.ACCURATE; BlobIdTracker.SnapshotJob job = tracker.new SnapshotJob(100, clock); // Mimics a call to get after add and before remove similar to the calls in GC Set<String> present = snapshotRemove(job, false); clock.waitUntil(System.currentTimeMillis() + 100); // Since already retrieved the datastore should not be empty unless the snapshot is ignored ScheduledFuture<?> scheduledFuture = scheduler.schedule(job, 0, TimeUnit.MILLISECONDS); scheduledFuture.get(); assertEquals("Elements not equal after snapshot after remove", present, read(dataStore.getAllMetadataRecords(BLOBREFERENCES.getType()))); }
@Override public void run() { if (!skip()) { try { snapshot(); LOG.info("Finished taking snapshot"); } catch(Exception e){ LOG.warn("Failure in taking snapshot", e); } } else { LOG.info("Skipping scheduled snapshot as it last executed within {} seconds", MILLISECONDS.toSeconds(interval)); } }
@Test public void snapshotBeforeRemove() throws Exception { LOG.info("In snapshotBeforeRemove"); Clock clock = Clock.ACCURATE; BlobIdTracker.SnapshotJob job = tracker.new SnapshotJob(100, clock); //Mimic an intervening snapshot between add and remove by skipping the retrieve call. Set<String> present = snapshotRemove(job, true); clock.waitUntil(System.currentTimeMillis() + 100); // Since already retrieved the datastore should not be empty unless the snapshot is ignored ScheduledFuture<?> scheduledFuture = scheduler.schedule(job, 0, TimeUnit.MILLISECONDS); scheduledFuture.get(); assertEquals("Elements not equal after snapshot after remove", present, read(dataStore.getAllMetadataRecords(BLOBREFERENCES.getType()))); }
@Override public void run() { if (!skip()) { try { snapshot(); LOG.info("Finished taking snapshot"); } catch(Exception e){ LOG.warn("Failure in taking snapshot", e); } } else { LOG.info("Skipping scheduled snapshot as it last executed within {} seconds", MILLISECONDS.toSeconds(interval)); } }
@Test public void snapshotIgnoreAfterRemove() throws Exception { LOG.info("In snapshotIgnoreAfterRemove"); BlobIdTracker.SnapshotJob job = tracker.new SnapshotJob(); snapshotRemove(job, false); // Since already retrieved the datastore should be empty unless the snapshot has actually run ScheduledFuture<?> scheduledFuture = scheduler.schedule(job, 0, TimeUnit.MILLISECONDS); scheduledFuture.get(); assertTrue("Snapshot not skipped", read(dataStore.getAllMetadataRecords(BLOBREFERENCES.getType())).isEmpty()); }
@Test public void addSnapshot() throws Exception { LOG.info("In addSnapshot"); Set<String> initAdd = add(tracker, range(0, 4)); ScheduledFuture<?> scheduledFuture = scheduler.schedule(tracker.new SnapshotJob(), 0, TimeUnit.MILLISECONDS); scheduledFuture.get(); Set<String> retrieved = retrieve(tracker); assertEquals("Extra elements after add", initAdd, retrieved); assertTrue(read(dataStore.getAllMetadataRecords(BLOBREFERENCES.getType())).isEmpty()); }
void forceSnapshot() { try { ScheduledFuture<?> scheduledFuture = scheduler.schedule(tracker.new SnapshotJob(), 0, MILLISECONDS); scheduledFuture.get(); } catch (Exception e) { log.error("Error in snapshot", e); } }
@Test public void addSnapshotRemove() throws Exception { LOG.info("In addSnapshotRemove"); snapshotRemove(tracker.new SnapshotJob(), false); }