/** * If the compressed size of the current entry is included in the entry header * and there are any outstanding bytes in the underlying stream, then * this returns true. * * @return true, if current entry is determined to have outstanding bytes, false otherwise */ private boolean currentEntryHasOutstandingBytes() { return current.bytesReadFromStream <= current.entry.getCompressedSize() && !current.hasDataDescriptor; }
private boolean isTooLageForZip32(final ZipArchiveEntry zipArchiveEntry){ return zipArchiveEntry.getSize() >= ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZIP64_MAGIC; }
/** * Read all data of the current entry from the underlying stream * that hasn't been read, yet. */ private void drainCurrentEntryData() throws IOException { long remaining = current.entry.getCompressedSize() - current.bytesReadFromStream; while (remaining > 0) { final long n = in.read(buf.array(), 0, (int) Math.min(buf.capacity(), remaining)); if (n < 0) { throw new EOFException("Truncated ZIP entry: " + ArchiveUtils.sanitize(current.entry.getName())); } count(n); remaining -= n; } }
/** * Expose the raw stream of the archive entry (compressed form). * * <p>This method does not relate to how/if we understand the payload in the * stream, since we really only intend to move it on to somewhere else.</p> * * @param ze The entry to get the stream for * @return The raw input stream containing (possibly) compressed data. * @since 1.11 */ public InputStream getRawInputStream(final ZipArchiveEntry ze) { if (!(ze instanceof Entry)) { return null; } final long start = ze.getDataOffset(); return createBoundedInputStream(start, ze.getCompressedSize()); }
/** * Whether to addd a Zip64 extended information extra field to the * local file header. * * <p>Returns true if</p> * * <ul> * <li>mode is Always</li> * <li>or we already know it is going to be needed</li> * <li>or the size is unknown and we can ensure it won't hurt * other implementations if we add it (i.e. we can erase its * usage</li> * </ul> */ private boolean shouldAddZip64Extra(final ZipArchiveEntry entry, final Zip64Mode mode) { return mode == Zip64Mode.Always || entry.getSize() >= ZIP64_MAGIC || entry.getCompressedSize() >= ZIP64_MAGIC || (entry.getSize() == ArchiveEntry.SIZE_UNKNOWN && channel != null && mode != Zip64Mode.Never); }
private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException { final EntryMetaData entryMetaData = metaData.get(ze); final boolean needsZip64Extra = hasZip64Extra(ze) || ze.getCompressedSize() >= ZIP64_MAGIC || ze.getSize() >= ZIP64_MAGIC || entryMetaData.offset >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always; if (needsZip64Extra && zip64Mode == Zip64Mode.Never) { // must be the offset that is too big, otherwise an // exception would have been throw in putArchiveEntry or // closeArchiveEntry throw new Zip64RequiredException(Zip64RequiredException .ARCHIVE_TOO_BIG_MESSAGE); } handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra); return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra); }
/** * Whether the compressed size for the entry is either known or * not required by the compression method being used. */ private boolean supportsCompressedSizeFor(final ZipArchiveEntry entry) { return entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN || entry.getMethod() == ZipEntry.DEFLATED || entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode() || (entry.getGeneralPurposeBit().usesDataDescriptor() && allowStoredEntriesWithDataDescriptor && entry.getMethod() == ZipEntry.STORED); }
/** * If the entry needs Zip64 extra information inside the central * directory then configure its data. */ private void handleZip64Extra(final ZipArchiveEntry ze, final long lfhOffset, final boolean needsZip64Extra) { if (needsZip64Extra) { final Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze); if (ze.getCompressedSize() >= ZIP64_MAGIC || ze.getSize() >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) { z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize())); z64.setSize(new ZipEightByteInteger(ze.getSize())); } else { // reset value that may have been set for LFH z64.setCompressedSize(null); z64.setSize(null); } if (lfhOffset >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) { z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset)); } ze.setExtra(); } }
new BufferedInputStream(createBoundedInputStream(start, ze.getCompressedSize())); //NOSONAR switch (ZipMethod.getMethodByCode(ze.getMethod())) { case STORED:
&& getSize() == other.getSize() && getCrc() == other.getCrc() && getCompressedSize() == other.getCompressedSize() && Arrays.equals(getCentralDirectoryExtra(), other.getCentralDirectoryExtra())
if (ze.getCompressedSize() >= ZIP64_MAGIC || ze.getSize() >= ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) { ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET); } else { putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET); putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET);
/** * Writes the data descriptor entry. * @param ze the entry to write * @throws IOException on error */ protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException { if (!usesDataDescriptor(ze.getMethod(), false)) { return; } writeCounted(DD_SIG); writeCounted(ZipLong.getBytes(ze.getCrc())); if (!hasZip64Extra(ze)) { writeCounted(ZipLong.getBytes(ze.getCompressedSize())); writeCounted(ZipLong.getBytes(ze.getSize())); } else { writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize())); writeCounted(ZipEightByteInteger.getBytes(ze.getSize())); } }
/** * Adds an archive entry with a raw input stream. * * If crc, size and compressed size are supplied on the entry, these values will be used as-is. * Zip64 status is re-established based on the settings in this stream, and the supplied value * is ignored. * * The entry is put and closed immediately. * * @param entry The archive entry to add * @param rawStream The raw input stream of a different entry. May be compressed/encrypted. * @throws IOException If copying fails */ public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream) throws IOException { final ZipArchiveEntry ae = new ZipArchiveEntry(entry); if (hasZip64Extra(ae)) { // Will be re-added as required. this may make the file generated with this method // somewhat smaller than standard mode, // since standard mode is unable to remove the zip 64 header. ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); } final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN; putArchiveEntry(ae, is2PhaseSource); copyFromZipInputStream(rawStream); closeCopiedEntry(is2PhaseSource); }
ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET); } else if (phased) { putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); } else if (zipMethod == DEFLATED || channel != null) {
/** * Throws an exception if the size is unknown for a stored entry * that is written to a non-seekable output or the entry is too * big to be written without Zip64 extra but the mode has been set * to Never. */ private void validateSizeInformation(final Zip64Mode effectiveMode) throws ZipException { // Size/CRC not required if SeekableByteChannel is used if (entry.entry.getMethod() == STORED && channel == null) { if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) { throw new ZipException("uncompressed size is required for" + " STORED method when not writing to a" + " file"); } if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) { throw new ZipException("crc checksum is required for STORED" + " method when not writing to a file"); } entry.entry.setCompressedSize(entry.entry.getSize()); } if ((entry.entry.getSize() >= ZIP64_MAGIC || entry.entry.getCompressedSize() >= ZIP64_MAGIC) && effectiveMode == Zip64Mode.Never) { throw new Zip64RequiredException(Zip64RequiredException .getEntryTooBigMessage(entry.entry)); } }
if (z64 != null) { final boolean hasUncompressedSize = ze.getSize() == ZIP64_MAGIC; final boolean hasCompressedSize = ze.getCompressedSize() == ZIP64_MAGIC; final boolean hasRelativeHeaderOffset = ze.getLocalHeaderOffset() == ZIP64_MAGIC; ze.setCompressedSize(z64.getCompressedSize().getLongValue()); } else if (hasUncompressedSize) { z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize()));
writeOut(ZipLong.getBytes(entry.entry.getCrc())); if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) { writeOut(ZipLong.getBytes(entry.entry.getCompressedSize())); writeOut(ZipLong.getBytes(entry.entry.getSize())); } else { writeOut(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize()));
compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize()); } else if (entry.entry.getMethod() == STORED && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) {
if (current.entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN) { if (ZipUtil.canHandleEntryData(current.entry) && m != ZipMethod.STORED && m != ZipMethod.DEFLATED) { InputStream bis = new BoundedInputStream(in, current.entry.getCompressedSize()); switch (m) { case UNSHRINKING:
/** * Whether the compressed size for the entry is either known or * not required by the compression method being used. */ private boolean supportsCompressedSizeFor(final ZipArchiveEntry entry) { return entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN || entry.getMethod() == ZipEntry.DEFLATED || entry.getMethod() == ZipMethod.ENHANCED_DEFLATED.getCode() || (entry.getGeneralPurposeBit().usesDataDescriptor() && allowStoredEntriesWithDataDescriptor && entry.getMethod() == ZipEntry.STORED); }