/** * Whether this object is too large to obtain as a byte array. * * @return true if this object is too large to obtain as a byte array. * Objects over a certain threshold should be accessed only by their * {@link #openStream()} to prevent overflowing the JVM heap. */ public boolean isLarge() { try { getCachedBytes(); return false; } catch (LargeObjectException tooBig) { return true; } }
byte[] getCachedBytes(RevObject obj, ObjectLoader ldr) throws LargeObjectException, MissingObjectException, IOException { try { return ldr.getCachedBytes(5 * MB); } catch (LargeObjectException tooBig) { tooBig.setObjectId(obj); throw tooBig; } }
/** * Create the stream from an existing loader's cached bytes. * * @param loader * the loader. */ public SmallStream(ObjectLoader loader) { this(loader.getType(), loader.getCachedBytes()); }
@Override public byte[] getCachedBytes() { return delegate().getCachedBytes(); }
private static byte[] read(ObjectReader or, AnyObjectId blobId) throws MissingObjectException, IncorrectObjectTypeException, IOException { ObjectLoader loader = or.open(blobId, Constants.OBJ_BLOB); return loader.getCachedBytes(Integer.MAX_VALUE); }
/** * Obtain a copy of the bytes of this object. * <p> * Unlike {@link #getCachedBytes()} this method returns an array that might * be modified by the caller. * * @return the bytes of this object. * @throws org.eclipse.jgit.errors.LargeObjectException * if the object won't fit into a byte array, because * {@link #isLarge()} returns true. Callers should use * {@link #openStream()} instead to access the contents. */ public final byte[] getBytes() throws LargeObjectException { return cloneArray(getCachedBytes()); }
private void checkObjectCollision(AnyObjectId obj, int type, byte[] data) throws IOException { try { final ObjectLoader ldr = readCurs.open(obj, type); final byte[] existingData = ldr.getCachedBytes(data.length); if (!Arrays.equals(data, existingData)) { throw new IOException(MessageFormat.format( JGitText.get().collisionOn, obj.name())); } } catch (MissingObjectException notLocal) { // This is OK, we don't have a copy of the object locally // but the API throws when we try to read it as usually its // an error to read something that doesn't exist. } }
private TreeVisit newTreeVisit(RevObject obj) throws LargeObjectException, MissingObjectException, IncorrectObjectTypeException, IOException { TreeVisit tv = freeVisit; if (tv != null) { freeVisit = tv.parent; tv.ptr = 0; tv.namePtr = 0; tv.nameEnd = 0; tv.pathLen = 0; } else { tv = new TreeVisit(); } tv.obj = obj; tv.buf = reader.open(obj, OBJ_TREE).getCachedBytes(); return tv; }
/** * Reset this parser to walk through the given tree. * * @param reader * reader to use during repository access. * @param id * identity of the tree being parsed; used only in exception * messages if data corruption is found. * @throws MissingObjectException * the object supplied is not available from the repository. * @throws org.eclipse.jgit.errors.IncorrectObjectTypeException * the object supplied as an argument is not actually a tree and * cannot be parsed as though it were a tree. * @throws java.io.IOException * a loose object or pack file could not be read. */ public void reset(ObjectReader reader, AnyObjectId id) throws IncorrectObjectTypeException, IOException { reset(reader.open(id, OBJ_TREE).getCachedBytes()); }
void hash(ObjectLoader obj) throws MissingObjectException, IOException, TableFullException { if (obj.isLarge()) { hashLargeObject(obj); } else { byte[] raw = obj.getCachedBytes(); hash(raw, 0, raw.length); } }
static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId) throws IOException { // PackWriter should have already pruned objects that // are above the big file threshold, so our chances of // the object being below it are very good. We really // shouldn't be here, unless the implementation is odd. return or.open(objId).getCachedBytes(config.getBigFileThreshold()); }
@Override public RevObject next() throws MissingObjectException, IncorrectObjectTypeException, IOException { if (objItr.hasNext()) return objItr.next(); if (!lItr.next()) return null; ObjectId id = lItr.getObjectId(); ObjectLoader ldr = lItr.open(); RevObject r = objects.get(id); if (r == null) r = parseNew(id, ldr); else if (r instanceof RevCommit) { byte[] raw = ldr.getCachedBytes(); ((RevCommit) r).parseCanonical(RevWalk.this, raw); } else if (r instanceof RevTag) { byte[] raw = ldr.getCachedBytes(); ((RevTag) r).parseCanonical(RevWalk.this, raw); } else r.flags |= PARSED; return r; }
ObjectId dataId = get(id); if (dataId != null) return reader.open(dataId).getCachedBytes(sizeLimit); else return null;
private byte[] readTree(AnyObjectId id) throws MissingObjectException, IncorrectObjectTypeException, IOException { TreeWithData tree = treeCache.get(id); if (tree != null) return tree.buf; ObjectLoader ldr = reader.open(id, OBJ_TREE); byte[] buf = ldr.getCachedBytes(Integer.MAX_VALUE); treeCache.add(new TreeWithData(id, buf)); return buf; }
private static String readContentAsNormalizedString(DirCacheEntry entry, ObjectReader reader) throws MissingObjectException, IOException { ObjectLoader open = reader.open(entry.getObjectId()); byte[] cachedBytes = open.getCachedBytes(); return FS.detect().normalize(RawParseUtils.decode(cachedBytes)); }
void loadText(ObjectReader reader) throws IOException { ObjectLoader ldr = LfsFactory.getInstance().applySmudgeFilter(sourceRepository, reader.open(sourceBlob, Constants.OBJ_BLOB), LfsFactory.getAttributesForPath(sourceRepository, sourcePath.getPath(), sourceCommit) .get(Constants.ATTR_DIFF)); sourceText = new RawText(ldr.getCachedBytes(Integer.MAX_VALUE)); }
private Ref toRef(ObjectReader reader, DirCacheEntry e, String name) throws IOException { int mode = e.getRawMode(); if (mode == TYPE_GITLINK) { ObjectId id = e.getObjectId(); return new ObjectIdRef.PeeledNonTag(PACKED, name, id); } if (mode == TYPE_SYMLINK) { ObjectId id = e.getObjectId(); String n = pendingBlobs != null ? pendingBlobs.get(id) : null; if (n == null) { byte[] bin = reader.open(id, OBJ_BLOB).getCachedBytes(); n = RawParseUtils.decode(bin); } Ref dst = new ObjectIdRef.Unpeeled(NEW, n, null); return new SymbolicRef(name, dst); } return null; // garbage file or something; not a reference. }
private static Ref toRef(ObjectReader reader, int mode, CanonicalTreeParser p) throws IOException { if (mode == TYPE_GITLINK) { String name = refName(p, false); ObjectId id = p.getEntryObjectId(); return new ObjectIdRef.PeeledNonTag(PACKED, name, id); } else if (mode == TYPE_SYMLINK) { ObjectId id = p.getEntryObjectId(); byte[] bin = reader.open(id, OBJ_BLOB) .getCachedBytes(MAX_SYMLINK_BYTES); String dst = RawParseUtils.decode(bin); Ref trg = new ObjectIdRef.Unpeeled(NEW, dst, null); String name = refName(p, false); return new SymbolicRef(name, trg); } return null; }
visit.data = ldr.getCachedBytes(Integer.MAX_VALUE); visit.id = baseId; final int typeCode = ldr.getType();
@Override public RemoteFile readFileWithMode(String uri, String ref, String path) throws GitAPIException, IOException { File dir = FileUtils.createTempDir("jgit_", ".git", null); //$NON-NLS-1$ //$NON-NLS-2$ try (Git git = Git.cloneRepository().setBare(true).setDirectory(dir) .setURI(uri).call()) { Repository repo = git.getRepository(); ObjectId refCommitId = sha1(uri, ref); if (refCommitId == null) { throw new InvalidRefNameException(MessageFormat .format(JGitText.get().refNotResolved, ref)); } RevCommit commit = repo.parseCommit(refCommitId); TreeWalk tw = TreeWalk.forPath(repo, path, commit.getTree()); // TODO(ifrade): Cope better with big files (e.g. using // InputStream instead of byte[]) return new RemoteFile( tw.getObjectReader().open(tw.getObjectId(0)) .getCachedBytes(Integer.MAX_VALUE), tw.getFileMode(0)); } finally { FileUtils.delete(dir, FileUtils.RECURSIVE); } } }