private void initOutDesc(DfsObjDatabase objdb) throws IOException { if (outDesc == null) { outDesc = objdb.newPack(COMPACT, estimatePackSize()); } }
private void writeReftable() throws IOException { if (reftableConfig != null) { DfsPackDescription pack = objdb.newPack(GC); newPackDesc.add(pack); newPackStats.add(null); writeReftable(pack); } }
/** * Generate a new unique name for a pack file. * * <p> * Default implementation of this method would be equivalent to * {@code newPack(source).setEstimatedPackSize(estimatedPackSize)}. But the * clients can override this method to use the given * {@code estomatedPackSize} value more efficiently in the process of * creating a new * {@link org.eclipse.jgit.internal.storage.dfs.DfsPackDescription} object. * * @param source * where the pack stream is created. * @param estimatedPackSize * the estimated size of the pack. * @return a unique name for the pack file. Must not collide with any other * pack file name in the same DFS. * @throws java.io.IOException * a new unique pack description cannot be generated. */ protected DfsPackDescription newPack(PackSource source, long estimatedPackSize) throws IOException { DfsPackDescription pack = newPack(source); pack.setEstimatedPackSize(estimatedPackSize); return pack; }
/** {@inheritDoc} */ @Override protected void onPackHeader(long objectCount) throws IOException { if (objectCount == 0) { isEmptyPack = true; currBuf = new byte[256]; return; } packDsc = objdb.newPack(DfsObjDatabase.PackSource.RECEIVE); out = objdb.writeFile(packDsc, PACK); packKey = packDsc.getStreamKey(PACK); int size = out.blockSize(); if (size <= 0) size = blockCache.getBlockSize(); else if (size < blockCache.getBlockSize()) size = (blockCache.getBlockSize() / size) * size; blockSize = size; currBuf = new byte[blockSize]; }
private void applyUpdates(RevWalk rw, List<ReceiveCommand> pending) throws IOException { List<Ref> newRefs = toNewRefs(rw, pending); long updateIndex = nextUpdateIndex(); Set<DfsPackDescription> prune = Collections.emptySet(); DfsPackDescription pack = odb.newPack(PackSource.INSERT); try (DfsOutputStream out = odb.writeFile(pack, REFTABLE)) { ReftableConfig cfg = DfsPackCompactor .configureReftable(reftableConfig, out); ReftableWriter.Stats stats; if (refdb.compactDuringCommit() && newRefs.size() * AVG_BYTES <= cfg.getRefBlockSize() && canCompactTopOfStack(cfg)) { ByteArrayOutputStream tmp = new ByteArrayOutputStream(); write(tmp, cfg, updateIndex, newRefs, pending); stats = compactTopOfStack(out, cfg, tmp.toByteArray()); prune = toPruneTopOfStack(); } else { stats = write(out, cfg, updateIndex, newRefs, pending); } pack.addFileExt(REFTABLE); pack.setReftableStats(stats); } odb.commitPack(Collections.singleton(pack), prune); odb.addReftable(pack, prune); refdb.clearCache(); }
private void beginPack() throws IOException { objectList = new BlockList<>(); objectMap = new ObjectIdOwnerMap<>(); cache = DfsBlockCache.getInstance(); rollback = true; packDsc = db.newPack(DfsObjDatabase.PackSource.INSERT); DfsOutputStream dfsOut = db.writeFile(packDsc, PACK); packDsc.setBlockSize(PACK, dfsOut.blockSize()); packOut = new PackStream(dfsOut); packKey = packDsc.getStreamKey(PACK); // Write the header as though it were a single object pack. byte[] buf = packOut.hdrBuf; System.arraycopy(Constants.PACK_SIGNATURE, 0, buf, 0, 4); NB.encodeInt32(buf, 4, 2); // Always use pack version 2. NB.encodeInt32(buf, 8, 1); // Always assume 1 object. packOut.write(buf, 0, 12); }
private void initOutDesc(DfsObjDatabase objdb) throws IOException { if (outDesc == null) { outDesc = objdb.newPack(COMPACT, estimatePackSize()); } }
private DfsPackDescription writePack(PackSource source, PackWriter pw, ProgressMonitor pm, long estimatedPackSize) throws IOException { DfsPackDescription pack = repo.getObjectDatabase().newPack(source, estimatedPackSize);
private void writeReftable() throws IOException { if (reftableConfig != null) { DfsPackDescription pack = objdb.newPack(GC); newPackDesc.add(pack); newPackStats.add(null); writeReftable(pack); } }
/** * Generate a new unique name for a pack file. * * <p> * Default implementation of this method would be equivalent to * {@code newPack(source).setEstimatedPackSize(estimatedPackSize)}. But the * clients can override this method to use the given * {@code estomatedPackSize} value more efficiently in the process of * creating a new * {@link org.eclipse.jgit.internal.storage.dfs.DfsPackDescription} object. * * @param source * where the pack stream is created. * @param estimatedPackSize * the estimated size of the pack. * @return a unique name for the pack file. Must not collide with any other * pack file name in the same DFS. * @throws java.io.IOException * a new unique pack description cannot be generated. */ protected DfsPackDescription newPack(PackSource source, long estimatedPackSize) throws IOException { DfsPackDescription pack = newPack(source); pack.setEstimatedPackSize(estimatedPackSize); return pack; }
@Override protected void onPackHeader(long objectCount) throws IOException { if (objectCount == 0) { isEmptyPack = true; currBuf = new byte[256]; return; } packDsc = objdb.newPack(DfsObjDatabase.PackSource.RECEIVE); packKey = new DfsPackKey(); out = objdb.writeFile(packDsc, PACK); int size = out.blockSize(); if (size <= 0) size = blockCache.getBlockSize(); else if (size < blockCache.getBlockSize()) size = (blockCache.getBlockSize() / size) * size; blockSize = size; currBuf = new byte[blockSize]; }
private void beginPack() throws IOException { objectList = new BlockList<PackedObjectInfo>(); objectMap = new ObjectIdOwnerMap<PackedObjectInfo>(); cache = DfsBlockCache.getInstance(); rollback = true; packDsc = db.newPack(DfsObjDatabase.PackSource.INSERT); packOut = new PackStream(db.writeFile(packDsc, PACK)); packKey = new DfsPackKey(); // Write the header as though it were a single object pack. byte[] buf = packOut.hdrBuf; System.arraycopy(Constants.PACK_SIGNATURE, 0, buf, 0, 4); NB.encodeInt32(buf, 4, 2); // Always use pack version 2. NB.encodeInt32(buf, 8, 1); // Always assume 1 object. packOut.write(buf, 0, 12); }
/** {@inheritDoc} */ @Override protected void onPackHeader(long objectCount) throws IOException { if (objectCount == 0) { isEmptyPack = true; currBuf = new byte[256]; return; } packDsc = objdb.newPack(DfsObjDatabase.PackSource.RECEIVE); out = objdb.writeFile(packDsc, PACK); packKey = packDsc.getStreamKey(PACK); int size = out.blockSize(); if (size <= 0) size = blockCache.getBlockSize(); else if (size < blockCache.getBlockSize()) size = (blockCache.getBlockSize() / size) * size; blockSize = size; currBuf = new byte[blockSize]; }
private void applyUpdates(RevWalk rw, List<ReceiveCommand> pending) throws IOException { List<Ref> newRefs = toNewRefs(rw, pending); long updateIndex = nextUpdateIndex(); Set<DfsPackDescription> prune = Collections.emptySet(); DfsPackDescription pack = odb.newPack(PackSource.INSERT); try (DfsOutputStream out = odb.writeFile(pack, REFTABLE)) { ReftableConfig cfg = DfsPackCompactor .configureReftable(reftableConfig, out); ReftableWriter.Stats stats; if (refdb.compactDuringCommit() && newRefs.size() * AVG_BYTES <= cfg.getRefBlockSize() && canCompactTopOfStack(cfg)) { ByteArrayOutputStream tmp = new ByteArrayOutputStream(); write(tmp, cfg, updateIndex, newRefs, pending); stats = compactTopOfStack(out, cfg, tmp.toByteArray()); prune = toPruneTopOfStack(); } else { stats = write(out, cfg, updateIndex, newRefs, pending); } pack.addFileExt(REFTABLE); pack.setReftableStats(stats); } odb.commitPack(Collections.singleton(pack), prune); odb.addReftable(pack, prune); refdb.clearCache(); }
private void beginPack() throws IOException { objectList = new BlockList<>(); objectMap = new ObjectIdOwnerMap<>(); cache = DfsBlockCache.getInstance(); rollback = true; packDsc = db.newPack(DfsObjDatabase.PackSource.INSERT); DfsOutputStream dfsOut = db.writeFile(packDsc, PACK); packDsc.setBlockSize(PACK, dfsOut.blockSize()); packOut = new PackStream(dfsOut); packKey = packDsc.getStreamKey(PACK); // Write the header as though it were a single object pack. byte[] buf = packOut.hdrBuf; System.arraycopy(Constants.PACK_SIGNATURE, 0, buf, 0, 4); NB.encodeInt32(buf, 4, 2); // Always use pack version 2. NB.encodeInt32(buf, 8, 1); // Always assume 1 object. packOut.write(buf, 0, 12); }
DfsPackDescription pack = objdb.newPack(COMPACT); try { writePack(objdb, pack, pw, pm);
private DfsPackDescription writePack(PackSource source, PackWriter pw, ProgressMonitor pm) throws IOException { DfsPackDescription pack = repo.getObjectDatabase().newPack(source); newPackDesc.add(pack);
private DfsPackDescription writePack(PackSource source, PackWriter pw, ProgressMonitor pm, long estimatedPackSize) throws IOException { DfsPackDescription pack = repo.getObjectDatabase().newPack(source, estimatedPackSize);