@Override void handleRemoves(GarbageCollectableBlobStore blobStore, File removedIds, File markedRefs) throws IOException { BlobTrackingStore store = (BlobTrackingStore) blobStore; BlobIdTracker tracker = (BlobIdTracker) store.getTracker(); tracker.remove(removedIds); tracker.getDeleteTracker().reconcile(markedRefs); }
@Override public void remove(File recs) throws IOException { globalMerge(); store.removeRecords(recs); snapshot(true); }
private BlobIdTracker initTracker() throws IOException { return new BlobIdTracker(root.getAbsolutePath(), repoId, 5 * 60, dataStore); }
@Test public void externalAddOffline() throws Exception { LOG.info("In externalAddOffline"); // Close and open a new object to use the system property closer.close(); root = folder.newFolder(); File blobIdRoot = new File(root, "blobids"); blobIdRoot.mkdirs(); //Add file offline File offline = new File(blobIdRoot, "blob-offline123456.gen"); List<String> offlineLoad = range(0, 1000); FileIOUtils.writeStrings(offlineLoad.iterator(), offline, false); this.tracker = new BlobIdTracker(root.getAbsolutePath(), repoId, 100 * 60, dataStore); this.scheduler = newSingleThreadScheduledExecutor(); closer.register(tracker); closer.register(new ExecutorCloser(scheduler)); Set<String> initAdd = add(tracker, range(1001, 1005)); ScheduledFuture<?> scheduledFuture = scheduler.schedule(tracker.new SnapshotJob(), 0, TimeUnit.MILLISECONDS); scheduledFuture.get(); initAdd.addAll(offlineLoad); assertEquals(initAdd.size(), Iterators.size(tracker.get())); Set<String> retrieved = retrieve(tracker); assertEquals("Extra elements after add", initAdd, retrieved); assertTrue(read(dataStore.getAllMetadataRecords(BLOBREFERENCES.getType())).isEmpty()); }
private static void remove(BlobIdTracker tracker, File temp, Set<String> initAdd, List<String> ints) throws IOException { writeStrings(ints.iterator(), temp, false); initAdd.removeAll(ints); tracker.remove(temp); }
public BlobIdTracker(String path, String repositoryId, ScheduledExecutorService scheduler, long snapshotDelaySecs, long snapshotIntervalSecs, SharedDataStore datastore) throws IOException { String root = concat(path, datastoreMeta); this.rootDir = new File(root); this.datastore = datastore; this.scheduler = scheduler; this.snapshotInterval = SECONDS.toMillis(snapshotIntervalSecs); try { forceMkdir(rootDir); prefix = fileNamePrefix + "-" + repositoryId; this.store = new BlobIdStore(rootDir, prefix); scheduler.scheduleAtFixedRate(new SnapshotJob(), SECONDS.toMillis(snapshotDelaySecs), SECONDS.toMillis(snapshotIntervalSecs), MILLISECONDS); this.deleteTracker = new ActiveDeletionTracker(rootDir, prefix); } catch (IOException e) { LOG.error("Error initializing blob tracker", e); close(); throw e; } }
private static Set<String> retrieveActiveDeleteTracked(BlobIdTracker tracker, TemporaryFolder folder) throws IOException { File f = folder.newFile(); Set<String> retrieved = readStringsAsSet( new FileInputStream(tracker.getDeleteTracker().retrieve(f.getAbsolutePath())), false); return retrieved; }
@Override public File get(String path) throws IOException { if (!SKIP_TRACKER) { globalMerge(); return store.getRecords(path); } return new File(path); }
private void snapshot() throws IOException { snapshot(false); }
touch(getSnapshotMarkerFile()); LOG.info("Updated snapshot marker"); } catch (IOException e) {
Cluster(String repoId, String path, TemporaryFolder folder) throws Exception { this.dataStore = getBlobStore(root); this.tracker = new BlobIdTracker(path, repoId, 100 * 60, dataStore); this.scheduler = newSingleThreadScheduledExecutor(); this.folder = folder; }
@Test public void consistencyCheckDeletedWithActiveDeletion() throws Exception { Cluster cluster = new Cluster("cluster1"); BlobStore s = cluster.blobStore; BlobIdTracker tracker = (BlobIdTracker) ((BlobTrackingStore) s).getTracker(); DataStoreState state = init(cluster.nodeStore, 0); // Directly delete from blobstore ArrayList<String> blobs = Lists.newArrayList(state.blobsPresent); String removedId = blobs.remove(0); ((DataStoreBlobStore) s).deleteChunks(Lists.newArrayList(removedId), 0); state.blobsPresent = Sets.newHashSet(blobs); File f = folder.newFile(); writeStrings(Lists.newArrayList(removedId).iterator(), f, false); tracker.remove(f); List<String> addlAdded = doActiveDelete(cluster.nodeStore, (DataStoreBlobStore) cluster.blobStore, tracker, folder,0, 2); List<String> addlPresent = Lists.newArrayList(addlAdded.get(2), addlAdded.get(3)); state.blobsPresent.addAll(addlPresent); state.blobsAdded.addAll(addlPresent); // Only the missing blob should be reported and not the active deletions assertEquals(1, cluster.gc.checkConsistency()); }
public BlobIdTracker(String path, String repositoryId, ScheduledExecutorService scheduler, long snapshotDelaySecs, long snapshotIntervalSecs, SharedDataStore datastore) throws IOException { String root = concat(path, datastoreMeta); this.rootDir = new File(root); this.datastore = datastore; this.scheduler = scheduler; this.snapshotInterval = SECONDS.toMillis(snapshotIntervalSecs); try { forceMkdir(rootDir); prefix = fileNamePrefix + "-" + repositoryId; this.store = new BlobIdStore(rootDir, prefix); scheduler.scheduleAtFixedRate(new SnapshotJob(), SECONDS.toMillis(snapshotDelaySecs), SECONDS.toMillis(snapshotIntervalSecs), MILLISECONDS); this.deleteTracker = new ActiveDeletionTracker(rootDir, prefix); } catch (IOException e) { LOG.error("Error initializing blob tracker", e); close(); throw e; } }
@Override public int filter(GarbageCollectableBlobStore blobStore, FileLineDifferenceIterator iter, GarbageCollectorFileState fs) throws IOException { // Write the original candidates FileIOUtils.writeStrings(iter, fs.getGcCandidates(), true); // Filter the ids actively deleted BlobTrackingStore store = (BlobTrackingStore) blobStore; BlobIdTracker tracker = (BlobIdTracker) store.getTracker(); // Move the candidates identified to a temp file File candTemp = createTempFile("candTemp", null); copyFile(fs.getGcCandidates(), candTemp); Iterator<String> filter = tracker.getDeleteTracker().filter(candTemp); try { return FileIOUtils.writeStrings(filter, fs.getGcCandidates(), true); } finally { if (filter != null && filter instanceof FileLineDifferenceIterator) { ((FileLineDifferenceIterator) filter).close(); } if (candTemp != null) { candTemp.delete(); } } } },
@Override public File get(String path) throws IOException { if (!SKIP_TRACKER) { globalMerge(); return store.getRecords(path); } return new File(path); }
private void snapshot() throws IOException { snapshot(false); }
touch(getSnapshotMarkerFile()); LOG.info("Updated snapshot marker"); } catch (IOException e) {
@Before public void setup() throws Exception { this.root = folder.newFolder(); if (dataStore == null) { dataStore = getBlobStore(folder.newFolder()); } this.repoId = randomUUID().toString(); this.tracker = new BlobIdTracker(root.getAbsolutePath(), repoId, 100 * 60, dataStore); this.scheduler = newSingleThreadScheduledExecutor(); closer.register(tracker); closer.register(new ExecutorCloser(scheduler)); }
@Override void handleRemoves(GarbageCollectableBlobStore blobStore, File removedIds, File markedRefs) throws IOException { BlobTrackingStore store = (BlobTrackingStore) blobStore; BlobIdTracker tracker = (BlobIdTracker) store.getTracker(); tracker.remove(removedIds); tracker.getDeleteTracker().reconcile(markedRefs); }
@Override public void remove(File recs) throws IOException { globalMerge(); store.removeRecords(recs); snapshot(true); }