/** * Get the description of the pack. * * @return the description of the pack. */ public DfsPackDescription getPackDescription() { return pack.getPackDescription(); }
private int objectsBefore() { int cnt = 0; for (DfsPackFile p : packsBefore) cnt += p.getPackDescription().getObjectCount(); return cnt; }
private Set<DfsPackDescription> toPrune() { Set<DfsPackDescription> toPrune = new HashSet<>(); for (DfsPackFile pack : packsBefore) { toPrune.add(pack.getPackDescription()); } if (reftableConfig != null) { for (DfsReftable table : reftablesBefore) { toPrune.add(table.getPackDescription()); } } for (DfsPackFile pack : expiredGarbagePacks) { toPrune.add(pack.getPackDescription()); } return toPrune; }
private long estimatePackSize() { // Every pack file contains 12 bytes of header and 20 bytes of trailer. // Include the final pack file header and trailer size here and ignore // the same from individual pack files. long size = 32; for (DfsPackFile pack : srcPacks) { size += pack.getPackDescription().getFileSize(PACK) - 32; } return size; }
/** @return last modified time of all packs, in milliseconds. */ public long getLastModified() { if (lastModified < 0) { long max = 0; for (DfsPackFile pack : packs) { max = Math.max(max, pack.getPackDescription().getLastModified()); } lastModified = max; } return lastModified; }
/** {@inheritDoc} */ @Override public boolean wasDeltaAttempted() { switch (pack.getPackDescription().getPackSource()) { case GC: case GC_REST: case GC_TXN: return true; default: return false; } } }
/** * Get all of the source packs that fed into this compaction. * * @return all of the source packs that fed into this compaction. */ public Collection<DfsPackDescription> getSourcePacks() { Set<DfsPackDescription> src = new HashSet<>(); for (DfsPackFile pack : srcPacks) { src.add(pack.getPackDescription()); } for (DfsReftable table : srcReftables) { src.add(table.getPackDescription()); } return src; }
private List<DfsPackFile> sortPacksForSelectRepresentation() throws IOException { DfsPackFile[] packs = db.getPacks(); List<DfsPackFile> sorted = new ArrayList<>(packs.length); for (DfsPackFile p : packs) { if (p.getPackDescription().getPackSource() != UNREACHABLE_GARBAGE) { sorted.add(p); } } Collections.sort(sorted, PACK_SORT_FOR_REUSE); return sorted; }
private List<DfsPackFile> garbagePacksForSelectRepresentation() throws IOException { DfsPackFile[] packs = db.getPacks(); List<DfsPackFile> garbage = new ArrayList<>(packs.length); for (DfsPackFile p : packs) { if (p.getPackDescription().getPackSource() == UNREACHABLE_GARBAGE) { garbage.add(p); } } return garbage; }
private Collection<DfsPackDescription> toPrune() { Set<DfsPackDescription> packs = new HashSet<>(); for (DfsPackFile pack : srcPacks) { packs.add(pack.getPackDescription()); } Set<DfsPackDescription> reftables = new HashSet<>(); for (DfsReftable table : srcReftables) { reftables.add(table.getPackDescription()); } for (Iterator<DfsPackDescription> i = packs.iterator(); i.hasNext();) { DfsPackDescription d = i.next(); if (d.hasFileExt(REFTABLE) && !reftables.contains(d)) { i.remove(); } } for (Iterator<DfsPackDescription> i = reftables.iterator(); i.hasNext();) { DfsPackDescription d = i.next(); if (d.hasFileExt(PACK) && !packs.contains(d)) { i.remove(); } } Set<DfsPackDescription> toPrune = new HashSet<>(); toPrune.addAll(packs); toPrune.addAll(reftables); return toPrune; }
private void readPacksBefore() throws IOException { DfsPackFile[] packs = objdb.getPacks(); packsBefore = new ArrayList<>(packs.length); expiredGarbagePacks = new ArrayList<>(packs.length); long now = SystemReader.getInstance().getCurrentTime(); for (DfsPackFile p : packs) { DfsPackDescription d = p.getPackDescription(); if (d.getPackSource() != UNREACHABLE_GARBAGE) { packsBefore.add(p); } else if (packIsExpiredGarbage(d, now)) { expiredGarbagePacks.add(p); } else if (packIsCoalesceableGarbage(d, now)) { packsBefore.add(p); } } }
private void checkPacks(ProgressMonitor pm, FsckError errors) throws IOException, FileNotFoundException { try (DfsReader ctx = objdb.newReader()) { for (DfsPackFile pack : objdb.getPacks()) { DfsPackDescription packDesc = pack.getPackDescription(); if (packDesc.getPackSource() == PackSource.UNREACHABLE_GARBAGE) { continue; } try (ReadableChannel rc = objdb.openFile(packDesc, PACK)) { verifyPack(pm, errors, ctx, pack, rc); } catch (MissingObjectException e) { errors.getMissingObjects().add(e.getObjectId()); } catch (CorruptPackIndexException e) { errors.getCorruptIndices().add(new CorruptIndex( pack.getPackDescription().getFileName(INDEX), e.getErrorType())); } } } checkGitModules(pm, errors); }
private void verifyPack(ProgressMonitor pm, FsckError errors, DfsReader ctx, DfsPackFile pack, ReadableChannel ch) throws IOException, CorruptPackIndexException { FsckPackParser fpp = new FsckPackParser(objdb, ch); fpp.setObjectChecker(objChecker); fpp.overwriteObjectCount(pack.getPackDescription().getObjectCount()); fpp.parse(pm); errors.getCorruptObjects().addAll(fpp.getCorruptObjects()); fpp.verifyIndex(pack.getPackIndex(ctx)); }
void remove(DfsPackFile pack) { synchronized (packCache) { packCache.remove(pack.getPackDescription()); } }
/** * Automatically select pack and reftables to be included, and add them. * <p> * Packs are selected based on size, smaller packs get included while bigger * ones are omitted. * * @return {@code this} * @throws java.io.IOException * existing packs cannot be read. */ public DfsPackCompactor autoAdd() throws IOException { DfsObjDatabase objdb = repo.getObjectDatabase(); for (DfsPackFile pack : objdb.getPacks()) { DfsPackDescription d = pack.getPackDescription(); if (d.getFileSize(PACK) < autoAddSize) add(pack); else exclude(pack); } if (reftableConfig != null) { for (DfsReftable table : objdb.getReftables()) { DfsPackDescription d = table.getPackDescription(); if (d.getPackSource() != GC && d.getFileSize(REFTABLE) < autoAddSize) { add(table); } } } return this; }
private int objectsBefore() { int cnt = 0; for (DfsPackFile p : packsBefore) cnt += p.getPackDescription().getObjectCount(); return cnt; }
private long estimatePackSize() { // Every pack file contains 12 bytes of header and 20 bytes of trailer. // Include the final pack file header and trailer size here and ignore // the same from individual pack files. long size = 32; for (DfsPackFile pack : srcPacks) { size += pack.getPackDescription().getFileSize(PACK) - 32; } return size; }
private static long mostRecentGC(DfsPackFile[] packs) { long r = 0; for (DfsPackFile p : packs) { DfsPackDescription d = p.getPackDescription(); if (d.getPackSource() == GC || d.getPackSource() == GC_REST) { r = Math.max(r, d.getLastModified()); } } return r; }
PackIndex oldIdx = oldPack.getPackIndex(ctx); PackReverseIndex oldRevIdx = oldPack.getReverseIdx(ctx); long maxOffset = oldPack.getPackDescription().getFileSize(PACK) - 20; // pack size - trailer size. for (PackIndex.MutableEntry ent : oldIdx) {
private void verifyPack(ProgressMonitor pm, FsckError errors, DfsReader ctx, DfsPackFile pack, ReadableChannel ch) throws IOException, CorruptPackIndexException { FsckPackParser fpp = new FsckPackParser(objdb, ch); fpp.setObjectChecker(objChecker); fpp.overwriteObjectCount(pack.getPackDescription().getObjectCount()); fpp.parse(pm); errors.getCorruptObjects().addAll(fpp.getCorruptObjects()); fpp.verifyIndex(pack.getPackIndex(ctx)); }