@Override public String toString() { return "PackChunk[" + getChunkKey() + "]"; }
@Override public String toString() { return "PackChunk[" + getChunkKey() + "]"; }
private void chunkIsReady(PackChunk chunk) { ChunkKey key = chunk.getChunkKey(); ready.put(key, chunk); bytesReady += chunk.getTotalSize(); if (status.put(key, Status.READY) == Status.WAITING) notifyAll(); }
private void chunkIsReady(PackChunk chunk) { ChunkKey key = chunk.getChunkKey(); ready.put(key, chunk); bytesReady += chunk.getTotalSize(); if (status.put(key, Status.READY) == Status.WAITING) notifyAll(); }
private void prune() { while (maxBytes < curBytes) { Node n = lruTail; if (n == null) break; PackChunk c = n.chunk; curBytes -= c.getTotalSize(); byKey.remove(c.getChunkKey()); remove(n); } }
private void prune() { while (maxBytes < curBytes) { Node n = lruTail; if (n == null) break; PackChunk c = n.chunk; curBytes -= c.getTotalSize(); byKey.remove(c.getChunkKey()); remove(n); } }
void put(PackChunk chunk) { Node n = byKey.get(chunk.getChunkKey()); if (n != null && n.chunk == chunk) { hit(n); return; } curBytes += chunk.getTotalSize(); prune(); n = new Node(); n.chunk = chunk; byKey.put(chunk.getChunkKey(), n); first(n); }
void put(PackChunk chunk) { Node n = byKey.get(chunk.getChunkKey()); if (n != null && n.chunk == chunk) { hit(n); return; } curBytes += chunk.getTotalSize(); prune(); n = new Node(); n.chunk = chunk; byKey.put(chunk.getChunkKey(), n); first(n); }
synchronized ChunkAndOffset find(RepositoryKey repo, AnyObjectId objId) { for (PackChunk c : ready.values()) { int p = c.findOffset(repo, objId); if (0 <= p) return new ChunkAndOffset(useReadyChunk(c.getChunkKey()), p); } return null; }
@Override protected int readDatabase(byte[] dst, int pos, int cnt) throws IOException { int n = dbChunk.read(dbPtr, dst, pos, cnt); if (0 < n) { dbPtr += n; return n; } // ChunkMeta for fragments is delayed writing, so it isn't available // on the chunk if the chunk was read-back from the database. Use // our copy of ChunkMeta instead of the PackChunk's copy. ChunkMeta meta = chunkMeta.get(dbChunk.getChunkKey()); if (meta == null) return 0; ChunkKey next = ChunkMetaUtil.getNextFragment(meta, dbChunk.getChunkKey()); if (next == null) return 0; seekChunk(next, false); n = dbChunk.read(0, dst, pos, cnt); dbPtr = n; return n; }
synchronized ChunkAndOffset find(RepositoryKey repo, AnyObjectId objId) { for (PackChunk c : ready.values()) { int p = c.findOffset(repo, objId); if (0 <= p) return new ChunkAndOffset(useReadyChunk(c.getChunkKey()), p); } return null; }
void copyEntireChunkAsIs(PackOutputStream out, DhtObjectToPack obj, boolean validate) throws IOException { if (validate && !isValid()) { if (obj != null) throw new CorruptObjectException(obj, MessageFormat.format( DhtText.get().corruptChunk, getChunkKey())); else throw new DhtException(MessageFormat.format( DhtText.get().corruptChunk, getChunkKey())); } // Do not copy the trailer onto the output stream. out.write(dataBuf, dataPtr, dataLen - TRAILER_SIZE); }
void copyEntireChunkAsIs(PackOutputStream out, DhtObjectToPack obj, boolean validate) throws IOException { if (validate && !isValid()) { if (obj != null) throw new CorruptObjectException(obj, MessageFormat.format( DhtText.get().corruptChunk, getChunkKey())); else throw new DhtException(MessageFormat.format( DhtText.get().corruptChunk, getChunkKey())); } // Do not copy the trailer onto the output stream. out.write(dataBuf, dataPtr, dataLen - TRAILER_SIZE); }
private PackChunk useReadyChunk(ChunkKey key) { PackChunk chunk = ready.remove(key); status.put(chunk.getChunkKey(), Status.DONE); bytesReady -= chunk.getTotalSize(); if (automaticallyPushHints) { push(chunk.getMeta()); maybeStartGet(); } return chunk; }
private PackChunk useReadyChunk(ChunkKey key) { PackChunk chunk = ready.remove(key); status.put(chunk.getChunkKey(), Status.DONE); bytesReady -= chunk.getTotalSize(); if (automaticallyPushHints) { push(chunk.getMeta()); maybeStartGet(); } return chunk; }
@Override protected int readDatabase(byte[] dst, int pos, int cnt) throws IOException { int n = dbChunk.read(dbPtr, dst, pos, cnt); if (0 < n) { dbPtr += n; return n; } // ChunkMeta for fragments is delayed writing, so it isn't available // on the chunk if the chunk was read-back from the database. Use // our copy of ChunkMeta instead of the PackChunk's copy. ChunkMeta meta = chunkMeta.get(dbChunk.getChunkKey()); if (meta == null) return 0; ChunkKey next = ChunkMetaUtil.getNextFragment(meta, dbChunk.getChunkKey()); if (next == null) return 0; seekChunk(next, false); n = dbChunk.read(0, dst, pos, cnt); dbPtr = n; return n; }
static ObjectLoader read(PackChunk pc, int pos, final DhtReader ctx, final int typeHint) throws IOException { try { return read1(pc, pos, ctx, typeHint, true /* use recentChunks */); } catch (DeltaChainCycleException cycleFound) { // A cycle can occur if recentChunks cache was used by the reader // to satisfy an OBJ_REF_DELTA, but the chunk that was chosen has // a reverse delta back onto an object already being read during // this invocation. Its not as uncommon as it sounds, as the Git // wire protocol can sometimes copy an object the repository already // has when dealing with reverts or cherry-picks. // // Work around the cycle by disabling the recentChunks cache for // this resolution only. This will force the DhtReader to re-read // OBJECT_INDEX and consider only the oldest chunk for any given // object. There cannot be a cycle if the method only walks along // the oldest chunks. try { ctx.getStatistics().deltaChainCycles++; return read1(pc, pos, ctx, typeHint, false /* no recentChunks */); } catch (DeltaChainCycleException cannotRecover) { throw new DhtException(MessageFormat.format( DhtText.get().cycleInDeltaChain, pc.getChunkKey(), Integer.valueOf(pos))); } } }
void push(DhtReader ctx, Collection<RevCommit> roots) { // Approximate walk by using hints from the most recent commit. // Since the commits were recently parsed by the reader, we can // ask the reader for their chunk locations and most likely get // cache hits. int time = -1; PackChunk chunk = null; for (RevCommit cmit : roots) { if (time < cmit.getCommitTime()) { ChunkAndOffset p = ctx.getChunkGently(cmit); if (p != null && p.chunk.getMeta() != null) { time = cmit.getCommitTime(); chunk = p.chunk; } } } if (chunk != null) { synchronized (this) { status.put(chunk.getChunkKey(), Status.DONE); push(chunk.getMeta()); } } }
ChunkKey findChunk(AnyObjectId objId) throws DhtException { if (objId instanceof RefDataUtil.IdWithChunk) return ((RefDataUtil.IdWithChunk) objId).getChunkKey(); ChunkKey key = repository.getRefDatabase().findChunk(objId); if (key != null) return key; ChunkAndOffset r = recentChunks.find(repo, objId); if (r != null) return r.chunk.getChunkKey(); for (ObjectInfo link : find(objId)) return link.getChunkKey(); return null; }
ChunkKey findChunk(AnyObjectId objId) throws DhtException { if (objId instanceof RefDataUtil.IdWithChunk) return ((RefDataUtil.IdWithChunk) objId).getChunkKey(); ChunkKey key = repository.getRefDatabase().findChunk(objId); if (key != null) return key; ChunkAndOffset r = recentChunks.find(repo, objId); if (r != null) return r.chunk.getChunkKey(); for (ObjectInfo link : find(objId)) return link.getChunkKey(); return null; }