@Override public void run() { time = ACCURATE.getTime(); } }, FAST_CLOCK_INTERVAL, FAST_CLOCK_INTERVAL, TimeUnit.MILLISECONDS);
private void updateLastModified(String name) { try { // Update file timestamp manually to mimic last updated time updates without sleeping CLOCK.waitUntil(CLOCK.getTime() + TimeUnit.SECONDS.toMillis(2)); File f = new File(directory, name); f.setLastModified(CLOCK.getTimeIncreasing()); } catch (InterruptedException ie) { // ignored } }
public Counters(Clock clock) { long time = clock.getTime(); this.clock = clock; this.loginTime = time; this.accessTime = time; }
@Override public void cleaned(long reclaimed, long current) { lastCleanup = clock.getTime(); lastReclaimedSize = reclaimed; lastRepositorySize = current; }
@Override public String toString() { return String.format("%s|%s|%s", blobId, clock.getTime(), Joiner.on("|").join(ids)); } }
@Override protected long getTime() { return clock.getTime(); } }
public PropertyIndexUpdateCallback(String indexPath, NodeBuilder builder, NodeState rootState, Clock clock) { this.builder = builder; this.indexPath = indexPath; this.updateTime = clock.getTime(); this.uniquenessConstraintValidator = new UniquenessConstraintValidator(indexPath, builder, rootState); }
/** * Same as {@link ClusterNodeInfoDocument#isRecoveryNeeded(long)}. * * @deprecated use {@link ClusterNodeInfoDocument#isRecoveryNeeded(long)} * instead. */ public boolean isRecoveryNeeded(@NotNull ClusterNodeInfoDocument nodeInfo) { return nodeInfo.isRecoveryNeeded(clock.getTime()); } }
public TimeLapsedBlobStore(long startTime) { this.startTime = clock.getTime(); store = Maps.newHashMap(); metadata = Maps.newHashMap(); }
@Override public String createCheckpoint(long lifetime) { Map<String, String> props = Maps.newHashMap(); props.put(CREATION_DATE, String.valueOf(clock.getTime())); String checkpoint = nodeStore.checkpoint(lifetime, props); return checkpoint; }
public void refresh(boolean keepChanges) { sessionCounters.refreshTime = clock.getTime(); sessionCounters.refreshCount++; if (keepChanges && hasPendingChanges()) { root.rebase(); } else { root.refresh(); } if (permissionProvider != null) { permissionProvider.refresh(); } }
private void advanceClock(long time, TimeUnit unit) throws InterruptedException { Clock c = ns.getClock(); c.waitUntil(c.getTime() + unit.toMillis(time)); }
@Test public void noGc() throws Exception { log.info("Starting noGc()"); long afterSetupTime = clock.getTime(); log.info("after setup time {}", afterSetupTime); Set<String> existingAfterGC = executeGarbageCollection(cluster, cluster.getCollector(afterSetupTime - cluster.startReferenceTime + 2), false); assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsAdded, existingAfterGC).isEmpty()); assertStats(cluster.statsProvider, 1, 0, 0, cluster.blobStoreState.blobsAdded.size() - cluster.blobStoreState.blobsPresent.size(), NAME); }
@Test public void gcCheckpointHeldNoAddition() throws Exception { log.info("Staring gcCheckpointHeldNoAddition()"); long afterSetupTime = clock.getTime(); log.info("afterSetupTime {}", afterSetupTime); checkpointMBean.createCheckpoint(100); long maxGcAge = checkpointMBean.getOldestCheckpointCreationTimestamp() - afterSetupTime; Set<String> existingAfterGC = executeGarbageCollection(cluster, cluster.getCollector(maxGcAge), false); assertTrue(Sets.symmetricDifference(cluster.blobStoreState.blobsPresent, existingAfterGC).isEmpty()); }
@Test public void checkConsistency() throws Exception { log.info("Starting checkConsistency()"); long afterSetupTime = clock.getTime(); log.info("after setup time {}", afterSetupTime); MarkSweepGarbageCollector collector = cluster.getCollector(0); long missing = collector.checkConsistency(); assertEquals(0, missing); assertStats(cluster.statsProvider, 1, 0, 0, 0, CONSISTENCY_NAME); assertStatsBean(collector.getConsistencyOperationStats(), 1, 0, 0); }
@Test public void leaseUpdate() throws Exception { clock.waitUntil(clock.getTime() + ClusterNodeInfo.DEFAULT_LEASE_UPDATE_INTERVAL_MILLIS * 2); assertTrue(nodeStore.renewClusterIdLease()); verify(statsCollector).doneLeaseUpdate(anyLong()); }
@Test public void gc() throws Exception { createGarbage(); clock.waitUntil(clock.getTime() + TimeUnit.HOURS.toMillis(1)); VersionGarbageCollector gc = ns.getVersionGarbageCollector(); gc.gc(30, TimeUnit.MINUTES); List<String> messages = getDeleteMessages(); assertThat(messages.size(), greaterThan(0)); for (String msg : messages) { assertThat(getNumDeleted(msg), lessThan(BATCH_SIZE + 1)); } }
@Test public void disbaled() throws Exception{ handler.setClock(clock); handler.indexUpdateFailed("async", "/oak:index/foo", new Exception()); clock.waitUntil(clock.getTime() + handler.getCorruptIntervalMillis() + 1); assertTrue(handler.getCorruptIndexData("async").containsKey("/oak:index/foo")); handler.setCorruptInterval(0, TimeUnit.SECONDS); //With timeout set to zero no corrupt index should be reported assertFalse(handler.getCorruptIndexData("async").containsKey("/oak:index/foo")); }
private void expireLease(ClusterNodeInfo info) throws InterruptedException { // let lease expire clock.waitUntil(info.getLeaseEndTime() + ClusterNodeInfo.DEFAULT_LEASE_UPDATE_INTERVAL_MILLIS); // check if expired -> recovery is needed MissingLastRevSeeker util = new MissingLastRevSeeker(store, clock); String key = String.valueOf(info.getId()); ClusterNodeInfoDocument infoDoc = store.find(Collection.CLUSTER_NODES, key); assertNotNull(infoDoc); assertTrue(infoDoc.isRecoveryNeeded(clock.getTime())); }
@Test public void acquireRecoveryLockOnAlreadyLocked() throws Exception { ClusterNodeInfo.getInstance(store, NOOP, null, null, 1); // expire the lease clock.waitUntil(clock.getTime() + DEFAULT_LEASE_DURATION_MILLIS + 1); ClusterNodeInfo.getInstance(store, NOOP, null, null, 2); assertTrue(seeker.acquireRecoveryLock(1, 2)); assertFalse(seeker.acquireRecoveryLock(1, 3)); }