/** * Modify the configuration of the window cache. * <p> * The new configuration is applied immediately, and the existing cache is * cleared. * * @param cfg * the new window cache configuration. * @throws java.lang.IllegalArgumentException * the cache configuration contains one or more invalid * settings, usually too low of a limit. */ public static void reconfigure(DfsBlockCacheConfig cfg) { cache = new DfsBlockCache(cfg); }
int slot = slot(key, position); HashEntry e1 = table.get(slot); DfsBlock v = scan(e1, key, position); if (v != null && v.contains(key, requestedPosition)) { ctx.stats.blockCacheHit++; getStat(statHit, key).incrementAndGet(); return v; reserveSpace(blockSize, key); ReentrantLock regionLock = lockFor(key, position); regionLock.lock(); try { HashEntry e2 = table.get(slot); if (e2 != e1) { v = scan(e2, key, position); if (v != null) { ctx.stats.blockCacheHit++; getStat(statHit, key).incrementAndGet(); creditSpace(blockSize, key); return v; getStat(statMiss, key).incrementAndGet(); boolean credit = true; try { } finally { if (credit) creditSpace(blockSize, key);
private DfsBlock getOrLoadBlock(long pos) throws IOException { long s = toBlockStart(pos); DfsBlock b = cache.get(packKey, s); if (b != null) return b; byte[] d = new byte[blockSize]; for (int p = 0; p < blockSize;) { int n = out.read(s + p, ByteBuffer.wrap(d, p, blockSize - p)); if (n <= 0) throw new EOFException(JGitText.get().unexpectedEofInPack); p += n; } b = new DfsBlock(packKey, s, d); cache.put(b); return b; }
<T> Ref<T> put(DfsStreamKey key, long pos, int size, T v) { int slot = slot(key, pos); HashEntry e1 = table.get(slot); Ref<T> ref = scanRef(e1, key, pos); if (ref != null) return ref; reserveSpace(size, key); ReentrantLock regionLock = lockFor(key, pos); regionLock.lock(); try { HashEntry e2 = table.get(slot); if (e2 != e1) { ref = scanRef(e2, key, pos); if (ref != null) { creditSpace(size, key); return ref; ref.hot = true; for (;;) { HashEntry n = new HashEntry(clean(e2), ref); if (table.compareAndSet(slot, e2, n)) break; e2 = table.get(slot); addToClock(ref, 0); } finally { regionLock.unlock();
clockLock.lock(); try { long live = LongStream.of(getCurrentSize()).sum() + reserve; if (maxBytes < live) { Ref prev = clockHand; dead.value = null; live -= dead.size; getStat(liveBytes, dead.key).addAndGet(-dead.size); getStat(statEvict, dead.key).incrementAndGet(); } while (maxBytes < live); clockHand = prev; getStat(liveBytes, key).addAndGet(reserve); } finally { clockLock.unlock();
long alignToBlock(long pos) { int size = blockSize; if (size == 0) size = cache.getBlockSize(); return (pos / size) * size; }
/** * Construct a reader for an existing reftable. * * @param desc * description of the reftable within the DFS. */ public DfsReftable(DfsPackDescription desc) { this(DfsBlockCache.getInstance(), desc); }
private ReentrantLock lockFor(DfsStreamKey key, long position) { return loadLocks[(hash(key.hash, position) >>> 1) % loadLocks.length]; }
DfsBlock getOrLoadBlock(long pos, DfsReader ctx) throws IOException { return cache.getOrLoad(this, pos, ctx, null); }
private PackList scanPacksImpl(PackList old) throws IOException { DfsBlockCache cache = DfsBlockCache.getInstance(); Map<DfsPackDescription, DfsPackFile> forReuse = reuseMap(old); List<DfsPackDescription> scanned = listPacks(); Collections.sort(scanned); List<DfsPackFile> list = new ArrayList<DfsPackFile>(scanned.size()); boolean foundNew = false; for (DfsPackDescription dsc : scanned) { DfsPackFile oldPack = forReuse.remove(dsc); if (oldPack != null) { list.add(oldPack); } else { list.add(cache.getOrCreate(dsc, null)); foundNew = true; } } for (DfsPackFile p : forReuse.values()) p.close(); if (list.isEmpty()) return new PackListImpl(NO_PACKS.packs); if (!foundNew) { old.clearDirty(); return old; } return new PackListImpl(list.toArray(new DfsPackFile[list.size()])); }
private boolean notInCache(long pos) { return cache.get(file.key, file.alignToBlock(pos)) == null; }
/** * Modify the configuration of the window cache. * <p> * The new configuration is applied immediately, and the existing cache is * cleared. * * @param cfg * the new window cache configuration. * @throws IllegalArgumentException * the cache configuration contains one or more invalid * settings, usually too low of a limit. */ public static void reconfigure(DfsBlockCacheConfig cfg) { DfsBlockCache nc = new DfsBlockCache(cfg); DfsBlockCache oc = cache; cache = nc; if (oc != null) { for (DfsPackFile pack : oc.getPackFiles()) pack.key.cachedSize.set(0); } }
private static HashEntry clean(HashEntry top) { while (top != null && top.ref.next == null) top = top.next; if (top == null) return null; HashEntry n = clean(top.next); return n == top.next ? top : new HashEntry(n, top.ref); }
/** * Get 0..100, defining how full the cache is. * * @return 0..100, defining how full the cache is. */ public long getFillPercentage() { return LongStream.of(getCurrentSize()).sum() * 100 / maxBytes; }
<T> Ref<T> put(DfsStreamKey key, long pos, int size, T v) { int slot = slot(key, pos); HashEntry e1 = table.get(slot); Ref<T> ref = scanRef(e1, key, pos); if (ref != null) return ref; reserveSpace(size, key); ReentrantLock regionLock = lockFor(key, pos); regionLock.lock(); try { HashEntry e2 = table.get(slot); if (e2 != e1) { ref = scanRef(e2, key, pos); if (ref != null) { creditSpace(size, key); return ref; ref.hot = true; for (;;) { HashEntry n = new HashEntry(clean(e2), ref); if (table.compareAndSet(slot, e2, n)) break; e2 = table.get(slot); addToClock(ref, 0); } finally { regionLock.unlock();
int blockSize(ReadableChannel rc) { // If the block alignment is not yet known, discover it. Prefer the // larger size from either the cache or the file itself. int size = blockSize; if (size == 0) { size = rc.blockSize(); if (size <= 0) size = cache.getBlockSize(); else if (size < cache.getBlockSize()) size = (cache.getBlockSize() / size) * size; blockSize = size; } return size; }
private PackList scanPacksImpl(PackList old) throws IOException { DfsBlockCache cache = DfsBlockCache.getInstance(); Map<DfsPackDescription, DfsPackFile> packs = packMap(old); Map<DfsPackDescription, DfsReftable> reftables = reftableMap(old);
private int slot(DfsStreamKey key, long position) { return (hash(key.hash, position) >>> 1) % tableSize; }