/** * Initialize DFS fsck. * * @param repository * the dfs repository to check. */ public DfsFsck(DfsRepository repository) { repo = repository; objdb = repo.getObjectDatabase(); }
/** {@inheritDoc} */ @Override public BatchRefUpdate newBatchUpdate() { DfsObjDatabase odb = getRepository().getObjectDatabase(); return new ReftableBatchRefUpdate(this, odb); }
@Override boolean exists() throws IOException { DfsObjDatabase odb = getRepository().getObjectDatabase(); return odb.getReftables().length > 0; }
/** * Initialize a garbage collector. * * @param repository * repository objects to be packed will be read from. */ public DfsGarbageCollector(DfsRepository repository) { repo = repository; refdb = repo.getRefDatabase(); objdb = repo.getObjectDatabase(); newPackDesc = new ArrayList<>(4); newPackStats = new ArrayList<>(4); newPackObj = new ArrayList<>(4); packConfig = new PackConfig(repo); packConfig.setIndexVersion(2); }
/** {@inheritDoc} */ @Override public void scanForRepoChanges() throws IOException { getRefDatabase().refresh(); getObjectDatabase().clearCache(); }
/** * Obtain a handle to the stack of reftables. * * @return (possibly cached) handle to the stack. * @throws java.io.IOException * if tables cannot be opened. */ protected ReftableStack stack() throws IOException { lock.lock(); try { if (tableStack == null) { DfsObjDatabase odb = getRepository().getObjectDatabase(); if (ctx == null) { ctx = odb.newReader(); } tableStack = ReftableStack.open(ctx, Arrays.asList(odb.getReftables())); } return tableStack; } finally { lock.unlock(); } }
/** * Compact the pack files together. * * @param pm * progress monitor to receive updates on as packing may take a * while, depending on the size of the repository. * @throws java.io.IOException * the packs cannot be compacted. */ public void compact(ProgressMonitor pm) throws IOException { if (pm == null) { pm = NullProgressMonitor.INSTANCE; } DfsObjDatabase objdb = repo.getObjectDatabase(); try (DfsReader ctx = objdb.newReader()) { if (reftableConfig != null && !srcReftables.isEmpty()) { compactReftables(ctx); } compactPacks(ctx, pm); List<DfsPackDescription> commit = getNewPacks(); Collection<DfsPackDescription> remove = toPrune(); if (!commit.isEmpty() || !remove.isEmpty()) { objdb.commitPack(commit, remove); } } finally { rw = null; } }
private void compactReftables(DfsReader ctx) throws IOException { DfsObjDatabase objdb = repo.getObjectDatabase(); Collections.sort(srcReftables, objdb.reftableComparator()); try (ReftableStack stack = ReftableStack.open(ctx, srcReftables)) { initOutDesc(objdb); ReftableCompactor compact = new ReftableCompactor(); compact.addAll(stack.readers()); compact.setIncludeDeletes(true); writeReftable(objdb, outDesc, compact); } }
/** * Initialize DFS fsck. * * @param repository * the dfs repository to check. */ public DfsFsck(DfsRepository repository) { repo = repository; objdb = repo.getObjectDatabase(); }
/** * Automatically select pack and reftables to be included, and add them. * <p> * Packs are selected based on size, smaller packs get included while bigger * ones are omitted. * * @return {@code this} * @throws java.io.IOException * existing packs cannot be read. */ public DfsPackCompactor autoAdd() throws IOException { DfsObjDatabase objdb = repo.getObjectDatabase(); for (DfsPackFile pack : objdb.getPacks()) { DfsPackDescription d = pack.getPackDescription(); if (d.getFileSize(PACK) < autoAddSize) add(pack); else exclude(pack); } if (reftableConfig != null) { for (DfsReftable table : objdb.getReftables()) { DfsPackDescription d = table.getPackDescription(); if (d.getPackSource() != GC && d.getFileSize(REFTABLE) < autoAddSize) { add(table); } } } return this; }
private void compactPacks(DfsReader ctx, ProgressMonitor pm) throws IOException, IncorrectObjectTypeException { DfsObjDatabase objdb = repo.getObjectDatabase(); PackConfig pc = new PackConfig(repo); pc.setIndexVersion(2);
private DfsPackDescription writePack(PackSource source, PackWriter pw, ProgressMonitor pm, long estimatedPackSize) throws IOException { DfsPackDescription pack = repo.getObjectDatabase().newPack(source, estimatedPackSize);
@Override boolean exists() throws IOException { DfsObjDatabase odb = getRepository().getObjectDatabase(); return odb.getReftables().length > 0; }
/** * Initialize a garbage collector. * * @param repository * repository objects to be packed will be read from. */ public DfsGarbageCollector(DfsRepository repository) { repo = repository; refdb = repo.getRefDatabase(); objdb = repo.getObjectDatabase(); newPackDesc = new ArrayList<DfsPackDescription>(4); newPackStats = new ArrayList<PackStatistics>(4); newPackObj = new ArrayList<ObjectIdSet>(4); packConfig = new PackConfig(repo); packConfig.setIndexVersion(2); }
/** {@inheritDoc} */ @Override public BatchRefUpdate newBatchUpdate() { DfsObjDatabase odb = getRepository().getObjectDatabase(); return new ReftableBatchRefUpdate(this, odb); }
/** * Initialize a garbage collector. * * @param repository * repository objects to be packed will be read from. */ public DfsGarbageCollector(DfsRepository repository) { repo = repository; refdb = repo.getRefDatabase(); objdb = repo.getObjectDatabase(); newPackDesc = new ArrayList<>(4); newPackStats = new ArrayList<>(4); newPackObj = new ArrayList<>(4); packConfig = new PackConfig(repo); packConfig.setIndexVersion(2); }
/** {@inheritDoc} */ @Override public void scanForRepoChanges() throws IOException { getRefDatabase().refresh(); getObjectDatabase().clearCache(); }
/** * Obtain a handle to the stack of reftables. * * @return (possibly cached) handle to the stack. * @throws java.io.IOException * if tables cannot be opened. */ protected ReftableStack stack() throws IOException { lock.lock(); try { if (tableStack == null) { DfsObjDatabase odb = getRepository().getObjectDatabase(); if (ctx == null) { ctx = odb.newReader(); } tableStack = ReftableStack.open(ctx, Arrays.asList(odb.getReftables())); } return tableStack; } finally { lock.unlock(); } }
@Override public void scanForRepoChanges() throws IOException { getRefDatabase().refresh(); getObjectDatabase().clearCache(); }
private void compactReftables(DfsReader ctx) throws IOException { DfsObjDatabase objdb = repo.getObjectDatabase(); Collections.sort(srcReftables, objdb.reftableComparator()); try (ReftableStack stack = ReftableStack.open(ctx, srcReftables)) { initOutDesc(objdb); ReftableCompactor compact = new ReftableCompactor(); compact.addAll(stack.readers()); compact.setIncludeDeletes(true); writeReftable(objdb, outDesc, compact); } }