/** {@inheritDoc} */ @Override public BatchRefUpdate newBatchUpdate() { DfsObjDatabase odb = getRepository().getObjectDatabase(); return new ReftableBatchRefUpdate(this, odb); }
List<ReceiveCommand> pending = getPending(); if (pending.isEmpty()) { return; setPushOptions(options); if (!checkObjectExistence(rw, pending)) { return; if (!checkNonFastForwards(rw, pending)) { return; try { Reftable table = refdb.reader(); if (!checkExpected(table, pending)) { return; if (!checkConflicting(pending)) { return; if (!blockUntilTimestamps(MAX_WAIT)) { return; applyUpdates(rw, pending); for (ReceiveCommand cmd : pending) { cmd.setResult(OK);
private void applyUpdates(RevWalk rw, List<ReceiveCommand> pending) throws IOException { List<Ref> newRefs = toNewRefs(rw, pending); long updateIndex = nextUpdateIndex(); Set<DfsPackDescription> prune = Collections.emptySet(); DfsPackDescription pack = odb.newPack(PackSource.INSERT); try (DfsOutputStream out = odb.writeFile(pack, REFTABLE)) { ReftableConfig cfg = DfsPackCompactor .configureReftable(reftableConfig, out); ReftableWriter.Stats stats; if (refdb.compactDuringCommit() && newRefs.size() * AVG_BYTES <= cfg.getRefBlockSize() && canCompactTopOfStack(cfg)) { ByteArrayOutputStream tmp = new ByteArrayOutputStream(); write(tmp, cfg, updateIndex, newRefs, pending); stats = compactTopOfStack(out, cfg, tmp.toByteArray()); prune = toPruneTopOfStack(); } else { stats = write(out, cfg, updateIndex, newRefs, pending); } pack.addFileExt(REFTABLE); pack.setReftableStats(stats); } odb.commitPack(Collections.singleton(pack), prune); odb.addReftable(pack, prune); refdb.clearCache(); }
private List<ReceiveCommand> getPending() { return ReceiveCommand.filter(getCommands(), NOT_ATTEMPTED); }
private List<ReceiveCommand> getPending() { return ReceiveCommand.filter(getCommands(), NOT_ATTEMPTED); }
List<ReceiveCommand> pending = getPending(); if (pending.isEmpty()) { return; setPushOptions(options); if (!checkObjectExistence(rw, pending)) { return; if (!checkNonFastForwards(rw, pending)) { return; try { Reftable table = refdb.reader(); if (!checkExpected(table, pending)) { return; if (!checkConflicting(pending)) { return; if (!blockUntilTimestamps(MAX_WAIT)) { return; applyUpdates(rw, pending); for (ReceiveCommand cmd : pending) { cmd.setResult(OK);
private void applyUpdates(RevWalk rw, List<ReceiveCommand> pending) throws IOException { List<Ref> newRefs = toNewRefs(rw, pending); long updateIndex = nextUpdateIndex(); Set<DfsPackDescription> prune = Collections.emptySet(); DfsPackDescription pack = odb.newPack(PackSource.INSERT); try (DfsOutputStream out = odb.writeFile(pack, REFTABLE)) { ReftableConfig cfg = DfsPackCompactor .configureReftable(reftableConfig, out); ReftableWriter.Stats stats; if (refdb.compactDuringCommit() && newRefs.size() * AVG_BYTES <= cfg.getRefBlockSize() && canCompactTopOfStack(cfg)) { ByteArrayOutputStream tmp = new ByteArrayOutputStream(); write(tmp, cfg, updateIndex, newRefs, pending); stats = compactTopOfStack(out, cfg, tmp.toByteArray()); prune = toPruneTopOfStack(); } else { stats = write(out, cfg, updateIndex, newRefs, pending); } pack.addFileExt(REFTABLE); pack.setReftableStats(stats); } odb.commitPack(Collections.singleton(pack), prune); odb.addReftable(pack, prune); refdb.clearCache(); }
/** {@inheritDoc} */ @Override public BatchRefUpdate newBatchUpdate() { DfsObjDatabase odb = getRepository().getObjectDatabase(); return new ReftableBatchRefUpdate(this, odb); }