private static Set<String> add(BlobIdStore store, List<String> ints) throws IOException { Set<String> s = newHashSet(); for (String rec : ints) { store.addRecord(rec); s.add(rec); } return s; }
@Override public void run() { try { List<String> adds = range(0, 100000); start.await(); if (!bulk) { add(store, adds); } else { store.addRecords(adds.iterator()); } done.countDown(); } catch (IOException e) { log.info("Exception in add", e); } catch (InterruptedException e) { log.info("Interrupted in add", e); } } };
/** * Opens a new generation file and a writer over it. * * @throws IOException */ private synchronized void nextGeneration() throws IOException { close(); processFile = new File(rootDir, prefix + IN_PROCESS.getFileNameSuffix()); writer = newWriter(processFile, UTF_8); LOG.info("Created new process file and writer over {} ", processFile.getAbsolutePath()); }
store.merge(refFiles, true); LOG.info("Merged all retrieved blob id files in [{}]", watch.elapsed(TimeUnit.MILLISECONDS));
snapshot(); sort(getBlobRecordsFile()); sort(recs); LOG.trace("Sorted files"); FileLineDifferenceIterator iterator = null; try { iterator = new FileLineDifferenceIterator(recs, getBlobRecordsFile(), null); writeStrings(iterator, temp, false); } finally { File blobRecs = getBlobRecordsFile(); move(temp, blobRecs); LOG.trace("removed records");
snapshot(); sort(getBlobRecordsFile()); sort(recs); LOG.trace("Sorted files"); FileLineDifferenceIterator iterator = null; try { iterator = new FileLineDifferenceIterator(recs, getBlobRecordsFile(), null); writeStrings(iterator, temp, false); } finally { File blobRecs = getBlobRecordsFile(); move(temp, blobRecs); LOG.trace("removed records");
snapshot(); sort(getBlobRecordsFile()); sort(recs); LOG.trace("Sorted files"); FileLineDifferenceIterator iterator = null; try { iterator = new FileLineDifferenceIterator(recs, getBlobRecordsFile(), null); writeStrings(iterator, temp, false); } finally { File blobRecs = getBlobRecordsFile(); move(temp, blobRecs); LOG.trace("removed records");
store.merge(refFiles, true); LOG.info("Merged all retrieved blob id files in [{}]", watch.elapsed(TimeUnit.MILLISECONDS));
store.merge(refFiles, true); LOG.info("Merged all retrieved blob id files in [{}]", watch.elapsed(TimeUnit.MILLISECONDS));
store.snapshot(); LOG.debug("Completed snapshot in [{}]", watch.elapsed(TimeUnit.MILLISECONDS)); File recs = store.getBlobRecordsFile(); datastore.addMetadataRecord(recs, (prefix + instanceId + System.currentTimeMillis() + mergedFileSuffix)); LOG.info("Added blob id metadata record in DataStore in [{}]", watch.elapsed(TimeUnit.MILLISECONDS));
store.snapshot(); LOG.debug("Completed snapshot in [{}]", watch.elapsed(TimeUnit.MILLISECONDS)); File recs = store.getBlobRecordsFile(); datastore.addMetadataRecord(recs, (prefix + instanceId + System.currentTimeMillis() + mergedFileSuffix)); LOG.info("Added blob id metadata record in DataStore in [{}]", watch.elapsed(TimeUnit.MILLISECONDS));
store.snapshot(); LOG.debug("Completed snapshot in [{}]", watch.elapsed(TimeUnit.MILLISECONDS)); File recs = store.getBlobRecordsFile(); datastore.addMetadataRecord(recs, (prefix + instanceId + System.currentTimeMillis() + mergedFileSuffix)); LOG.info("Added blob id metadata record in DataStore in [{}]", watch.elapsed(TimeUnit.MILLISECONDS));
/** * Merges the given files with the references file and deletes the files. * * @param refFiles files to merge * @param doSort whether to sort while merging * @throws IOException */ protected void merge(List<File> refFiles, boolean doSort) throws IOException { refLock.lock(); try { if (refFiles != null && !refFiles.isEmpty()) { File merged = new File(rootDir, prefix + REFS.getFileNameSuffix()); append(refFiles, merged, true); LOG.debug("Merged files into references {}", refFiles); // Clear the references as not needed refFiles.clear(); } if (doSort) { sort(getBlobRecordsFile()); } } finally { refLock.unlock(); } }
/** * Retrieves all the reference files available in the DataStore and merges * them to the local store and then returns an iterator over it. * This way the ids returned are as recent as the snapshots taken on all * instances/repositories connected to the DataStore. * <p> * The iterator returned ia a Closeable instance and should be closed by calling #close(). * * @return iterator over all the blob ids available * @throws IOException */ @Override public Iterator<String> get() throws IOException { try { if (!SKIP_TRACKER) { globalMerge(); return store.getRecords(); } return Collections.emptyIterator(); } catch (IOException e) { LOG.error("Error in retrieving blob records iterator", e); throw e; } }
/** * Retrieves all the reference files available in the DataStore and merges * them to the local store and then returns an iterator over it. * This way the ids returned are as recent as the snapshots taken on all * instances/repositories connected to the DataStore. * <p> * The iterator returned ia a Closeable instance and should be closed by calling #close(). * * @return iterator over all the blob ids available * @throws IOException */ @Override public Iterator<String> get() throws IOException { try { if (!SKIP_TRACKER) { globalMerge(); return store.getRecords(); } return Collections.emptyIterator(); } catch (IOException e) { LOG.error("Error in retrieving blob records iterator", e); throw e; } }
/** * Merges the given files with the references file and deletes the files. * * @param refFiles files to merge * @param doSort whether to sort while merging * @throws IOException */ protected void merge(List<File> refFiles, boolean doSort) throws IOException { refLock.lock(); try { if (refFiles != null && !refFiles.isEmpty()) { File merged = new File(rootDir, prefix + REFS.getFileNameSuffix()); append(refFiles, merged, true); LOG.debug("Merged files into references {}", refFiles); // Clear the references as not needed refFiles.clear(); } if (doSort) { sort(getBlobRecordsFile()); } } finally { refLock.unlock(); } }
@Test public void snapshotRetrieveIgnored() throws Exception { LOG.info("In snapshotRetrieveIgnored"); System.setProperty("oak.datastore.skipTracker", "true"); // Close and open a new object to use the system property closer.close(); this.tracker = new BlobIdTracker(root.getAbsolutePath(), repoId, 100 * 60, dataStore); this.scheduler = newSingleThreadScheduledExecutor(); closer.register(tracker); closer.register(new ExecutorCloser(scheduler)); try { Set<String> initAdd = add(tracker, range(0, 10000)); ScheduledFuture<?> scheduledFuture = scheduler.schedule(tracker.new SnapshotJob(), 0, TimeUnit.MILLISECONDS); scheduledFuture.get(); assertEquals("References file not empty", 0, tracker.store.getBlobRecordsFile().length()); Set<String> retrieved = retrieveFile(tracker, folder); assertTrue(retrieved.isEmpty()); retrieved = retrieve(tracker); assertTrue(retrieved.isEmpty()); } finally { //reset the skip tracker system prop System.clearProperty("oak.datastore.skipTracker"); } }
/** * Merges the given files with the references file and deletes the files. * * @param refFiles files to merge * @param doSort whether to sort while merging * @throws IOException */ protected void merge(List<File> refFiles, boolean doSort) throws IOException { refLock.lock(); try { if (refFiles != null && !refFiles.isEmpty()) { File merged = new File(rootDir, prefix + REFS.getFileNameSuffix()); append(refFiles, merged, true); LOG.debug("Merged files into references {}", refFiles); // Clear the references as not needed refFiles.clear(); } if (doSort) { sort(getBlobRecordsFile()); } } finally { refLock.unlock(); } }
@Test public void addBulkAdd() throws IOException { final BlobIdStore store = tracker.store; final Set<String> initAdd = add(store, range(0, 4)); // Add new ids from a file File temp = folder.newFile(); List<String> newAdd = range(5, 9); initAdd.addAll(newAdd); writeStrings(newAdd.iterator(), temp, false); store.addRecords(temp); store.snapshot(); Set<String> retrieved = retrieve(store); assertEquals("Incorrect elements after bulk add from file", initAdd, retrieved); newAdd = range(10, 14); initAdd.addAll(newAdd); store.addRecords(newAdd.iterator()); store.snapshot(); retrieved = retrieve(store); assertEquals("Incorrect elements after bulk add from iterator", initAdd, retrieved); }
@Test public void addSnapshotConcurrentRetrieve() throws IOException, InterruptedException { final BlobIdStore store = tracker.store; final CountDownLatch start = new CountDownLatch(1); final CountDownLatch done = new CountDownLatch(2); Set<String> initAdd = add(store, range(0, 100000)); final Set<String> retrieves = newHashSet(); Thread retrieveThread = retrieveThread(store, retrieves, start, done); Thread snapshotThread = snapshotThread(store, start, done); snapshotThread.start(); retrieveThread.start(); start.countDown(); done.await(); if (retrieves.isEmpty()) { // take a snapshot to ensure that all adds accounted if snapshot finished last store.snapshot(); retrieves.addAll(retrieve(store)); } assertEquals("Incorrect elements after concurrent snapshot/retrieve", initAdd, retrieves); }