/** {@inheritDoc} */ @Override public ObjectInserter newInserter() { return new DfsInserter(this); }
/** {@inheritDoc} */ @Override public ObjectId insert(int type, long len, InputStream in) throws IOException { byte[] buf = insertBuffer(len); if (len <= buf.length) { IO.readFully(in, buf, 0, (int) len); return insert(type, buf, 0, (int) len); } long offset = beginObject(type, len); SHA1 md = digest(); md.update(Constants.encodedTypeString(type)); md.update((byte) ' '); md.update(Constants.encodeASCII(len)); md.update((byte) 0); while (0 < len) { int n = in.read(buf, 0, (int) Math.min(buf.length, len)); if (n <= 0) throw new EOFException(); md.update(buf, 0, n); packOut.compress.write(buf, 0, n); len -= n; } packOut.compress.finish(); return endObject(md.toObjectId(), offset); }
private long beginObject(int type, long len) throws IOException { if (packOut == null) beginPack(); long offset = packOut.getCount(); packOut.beginObject(type, len); return offset; }
/** {@inheritDoc} */ @Override public ObjectId insert(int type, byte[] data, int off, int len) throws IOException { ObjectId id = idFor(type, data, off, len); if (objectMap != null && objectMap.contains(id)) return id; // Ignore unreachable (garbage) objects here. if (checkExisting && db.has(id, true)) return id; long offset = beginObject(type, len); packOut.compress.write(data, off, len); packOut.compress.finish(); return endObject(id, offset); }
/** {@inheritDoc} */ @Override public void flush() throws IOException { if (packDsc == null) return; if (packOut == null) throw new IOException(); byte[] packHash = packOut.writePackFooter(); packDsc.addFileExt(PACK); packDsc.setFileSize(PACK, packOut.getCount()); packOut.close(); packOut = null; sortObjectsById(); PackIndex index = writePackIndex(packDsc, packHash, objectList); db.commitPack(Collections.singletonList(packDsc), null); rollback = false; DfsPackFile p = new DfsPackFile(cache, packDsc); if (index != null) p.setPackIndex(index); db.addPack(p); clear(); }
PackIndex writePackIndex(DfsPackDescription pack, byte[] packHash, List<PackedObjectInfo> list) throws IOException { pack.setIndexVersion(INDEX_VERSION); pack.setObjectCount(list.size()); // If there are less than 58,000 objects, the entire index fits in under // 2 MiB. Callers will probably need the index immediately, so buffer // the index in process and load from the buffer. PackIndex packIndex = null; try (TemporaryBuffer.Heap buf = maybeGetTemporaryBuffer(list); DfsOutputStream os = db.writeFile(pack, INDEX); CountingOutputStream cnt = new CountingOutputStream(os)) { if (buf != null) { index(buf, packHash, list); packIndex = PackIndex.read(buf.openInputStream()); buf.writeTo(cnt, null); } else { index(cnt, packHash, list); } pack.addFileExt(INDEX); pack.setBlockSize(INDEX, os.blockSize()); pack.setFileSize(INDEX, cnt.getCount()); } return packIndex; }
private byte[] insertBuffer(long len) { byte[] buf = buffer(); if (len <= buf.length) return buf; if (len < db.getReaderOptions().getStreamFileThreshold()) { try { return new byte[(int) len]; } catch (OutOfMemoryError noMem) { return buf; } } return buf; }
/** {@inheritDoc} */ @Override public void close() { if (packOut != null) { try { packOut.close(); } catch (IOException err) { // Ignore a close failure, the pack should be removed. } finally { packOut = null; } } if (rollback && packDsc != null) { try { db.rollbackPack(Collections.singletonList(packDsc)); } finally { packDsc = null; rollback = false; } } clear(); }
PackIndex writePackIndex(DfsPackDescription pack, byte[] packHash, List<PackedObjectInfo> list) throws IOException { pack.setIndexVersion(INDEX_VERSION); pack.setObjectCount(list.size()); // If there are less than 58,000 objects, the entire index fits in under // 2 MiB. Callers will probably need the index immediately, so buffer // the index in process and load from the buffer. TemporaryBuffer.Heap buf = null; PackIndex packIndex = null; if (list.size() <= 58000) { buf = new TemporaryBuffer.Heap(2 << 20); index(buf, packHash, list); packIndex = PackIndex.read(buf.openInputStream()); } DfsOutputStream os = db.writeFile(pack, INDEX); try { CountingOutputStream cnt = new CountingOutputStream(os); if (buf != null) buf.writeTo(cnt, null); else index(cnt, packHash, list); pack.addFileExt(INDEX); pack.setFileSize(INDEX, cnt.getCount()); } finally { os.close(); } return packIndex; }
@Override public ObjectId insert(int type, byte[] data, int off, int len) throws IOException { ObjectId id = idFor(type, data, off, len); if (objectMap != null && objectMap.contains(id)) return id; // Ignore unreachable (garbage) objects here. if (checkExisting && db.has(id, true)) return id; long offset = beginObject(type, len); packOut.compress.write(data, off, len); packOut.compress.finish(); return endObject(id, offset); }
/** {@inheritDoc} */ @Override public void flush() throws IOException { if (packDsc == null) return; if (packOut == null) throw new IOException(); byte[] packHash = packOut.writePackFooter(); packDsc.addFileExt(PACK); packDsc.setFileSize(PACK, packOut.getCount()); packOut.close(); packOut = null; sortObjectsById(); PackIndex index = writePackIndex(packDsc, packHash, objectList); db.commitPack(Collections.singletonList(packDsc), null); rollback = false; DfsPackFile p = new DfsPackFile(cache, packDsc); if (index != null) p.setPackIndex(index); db.addPack(p); clear(); }
PackIndex writePackIndex(DfsPackDescription pack, byte[] packHash, List<PackedObjectInfo> list) throws IOException { pack.setIndexVersion(INDEX_VERSION); pack.setObjectCount(list.size()); // If there are less than 58,000 objects, the entire index fits in under // 2 MiB. Callers will probably need the index immediately, so buffer // the index in process and load from the buffer. PackIndex packIndex = null; try (TemporaryBuffer.Heap buf = maybeGetTemporaryBuffer(list); DfsOutputStream os = db.writeFile(pack, INDEX); CountingOutputStream cnt = new CountingOutputStream(os)) { if (buf != null) { index(buf, packHash, list); packIndex = PackIndex.read(buf.openInputStream()); buf.writeTo(cnt, null); } else { index(cnt, packHash, list); } pack.addFileExt(INDEX); pack.setBlockSize(INDEX, os.blockSize()); pack.setFileSize(INDEX, cnt.getCount()); } return packIndex; }
private byte[] insertBuffer(long len) { byte[] buf = buffer(); if (len <= buf.length) return buf; if (len < db.getReaderOptions().getStreamFileThreshold()) { try { return new byte[(int) len]; } catch (OutOfMemoryError noMem) { return buf; } } return buf; }
/** {@inheritDoc} */ @Override public void close() { if (packOut != null) { try { packOut.close(); } catch (IOException err) { // Ignore a close failure, the pack should be removed. } finally { packOut = null; } } if (rollback && packDsc != null) { try { db.rollbackPack(Collections.singletonList(packDsc)); } finally { packDsc = null; rollback = false; } } clear(); }
@Override public ObjectId insert(int type, long len, InputStream in) throws IOException { byte[] buf = insertBuffer(len); if (len <= buf.length) { IO.readFully(in, buf, 0, (int) len); return insert(type, buf, 0, (int) len); } long offset = beginObject(type, len); MessageDigest md = digest(); md.update(Constants.encodedTypeString(type)); md.update((byte) ' '); md.update(Constants.encodeASCII(len)); md.update((byte) 0); while (0 < len) { int n = in.read(buf, 0, (int) Math.min(buf.length, len)); if (n <= 0) throw new EOFException(); md.update(buf, 0, n); packOut.compress.write(buf, 0, n); len -= n; } packOut.compress.finish(); return endObject(ObjectId.fromRaw(md.digest()), offset); }
/** {@inheritDoc} */ @Override public ObjectId insert(int type, byte[] data, int off, int len) throws IOException { ObjectId id = idFor(type, data, off, len); if (objectMap != null && objectMap.contains(id)) return id; // Ignore unreachable (garbage) objects here. if (checkExisting && db.has(id, true)) return id; long offset = beginObject(type, len); packOut.compress.write(data, off, len); packOut.compress.finish(); return endObject(id, offset); }
@Override public void flush() throws IOException { if (packDsc == null) return; if (packOut == null) throw new IOException(); byte[] packHash = packOut.writePackFooter(); packDsc.addFileExt(PACK); packDsc.setFileSize(PACK, packOut.getCount()); packOut.close(); packOut = null; sortObjectsById(); PackIndex index = writePackIndex(packDsc, packHash, objectList); db.commitPack(Collections.singletonList(packDsc), null); rollback = false; DfsPackFile p = cache.getOrCreate(packDsc, packKey); if (index != null) p.setPackIndex(index); db.addPack(p); clear(); }
@Override public ObjectInserter newInserter() { return new DfsInserter(this); }
private long beginObject(int type, long len) throws IOException { if (packOut == null) beginPack(); long offset = packOut.getCount(); packOut.beginObject(type, len); return offset; }
private byte[] insertBuffer(long len) { byte[] buf = buffer(); if (len <= buf.length) return buf; if (len < db.getReaderOptions().getStreamFileThreshold()) { try { return new byte[(int) len]; } catch (OutOfMemoryError noMem) { return buf; } } return buf; }