@Override public Object getFieldValue(final String fieldName) { switch (fieldName) { case ContentClaimSchema.RESOURCE_CLAIM: return resourceClaimFieldMap; case ContentClaimSchema.CONTENT_CLAIM_LENGTH: return contentClaim.getLength(); case ContentClaimSchema.CONTENT_CLAIM_OFFSET: return contentClaimOffset; case ContentClaimSchema.RESOURCE_CLAIM_OFFSET: return contentClaim.getOffset(); default: return null; } }
private void serializeContentClaim(final ContentClaim claim, final long offset, final DataOutputStream out) throws IOException { if (claim == null) { out.write(0); } else { out.write(1); final ResourceClaim resourceClaim = claim.getResourceClaim(); writeString(resourceClaim.getId(), out); writeString(resourceClaim.getContainer(), out); writeString(resourceClaim.getSection(), out); out.writeLong(claim.getOffset()); out.writeLong(claim.getLength()); out.writeLong(offset); out.writeBoolean(resourceClaim.isLossTolerant()); } }
if (claim.getLength() > 0) { throw new IllegalArgumentException("Cannot write to " + claim + " because it has already been written to.");
@Override public InputStream read(final ContentClaim claim) throws IOException { if (claim == null) { return new ByteArrayInputStream(new byte[0]); } final Path path = getPath(claim, true); final FileInputStream fis = new FileInputStream(path.toFile()); if (claim.getOffset() > 0L) { try { StreamUtils.skip(fis, claim.getOffset()); } catch (IOException ioe) { IOUtils.closeQuietly(fis); throw ioe; } } // A claim length of -1 indicates that the claim is still being written to and we don't know // the length. In this case, we don't limit the Input Stream. If the Length has been populated, though, // it is possible that the Length could then be extended. However, we do want to avoid ever allowing the // stream to read past the end of the Content Claim. To accomplish this, we use a LimitedInputStream but // provide a LongSupplier for the length instead of a Long value. this allows us to continue reading until // we get to the end of the Claim, even if the Claim grows. This may happen, for instance, if we obtain an // InputStream for this claim, then read from it, write more to the claim, and then attempt to read again. In // such a case, since we have written to that same Claim, we should still be able to read those bytes. if (claim.getLength() >= 0) { return new LimitedInputStream(fis, claim::getLength); } else { return fis; } }
out.writeUTF(resourceClaim.getSection()); out.writeLong(claim.getOffset()); out.writeLong(claim.getLength()); out.writeLong(flowFile.getContentClaimOffset()); out.writeBoolean(resourceClaim.isLossTolerant());
@Override public void close() throws IOException { if (closed) { return; } closed = true; writeRecursionSet.remove(sourceFlowFile); final long bytesWritten = countingOut.getBytesWritten(); StandardProcessSession.this.bytesWritten += bytesWritten; final OutputStream removed = openOutputStreams.remove(sourceFlowFile); if (removed == null) { LOG.error("Closed Session's OutputStream but there was no entry for it in the map; sourceFlowFile={}; map={}", sourceFlowFile, openOutputStreams); } flush(); removeTemporaryClaim(record); final FlowFileRecord newFile = new StandardFlowFileRecord.Builder() .fromFlowFile(record.getCurrent()) .contentClaim(updatedClaim) .contentClaimOffset(Math.max(0, updatedClaim.getLength() - bytesWritten)) .size(bytesWritten) .build(); record.setWorking(newFile); } };
.fromFlowFile(record.getCurrent()) .contentClaim(newClaim) .contentClaimOffset(Math.max(0, newClaim.getLength() - writtenToFlowFile)) .size(writtenToFlowFile) .build();
.fromFlowFile(record.getCurrent()) .contentClaim(newClaim) .contentClaimOffset(Math.max(0L, newClaim.getLength() - writtenToFlowFile)) .size(writtenToFlowFile) .build();
@Override public Object getFieldValue(final String fieldName) { switch (fieldName) { case ContentClaimSchema.RESOURCE_CLAIM: return resourceClaimFieldMap; case ContentClaimSchema.CONTENT_CLAIM_LENGTH: return contentClaim.getLength(); case ContentClaimSchema.CONTENT_CLAIM_OFFSET: return contentClaimOffset; case ContentClaimSchema.RESOURCE_CLAIM_OFFSET: return contentClaim.getOffset(); default: return null; } }
private void serializeContentClaim(final ContentClaim claim, final long offset, final DataOutputStream out) throws IOException { if (claim == null) { out.write(0); } else { out.write(1); final ResourceClaim resourceClaim = claim.getResourceClaim(); writeString(resourceClaim.getId(), out); writeString(resourceClaim.getContainer(), out); writeString(resourceClaim.getSection(), out); out.writeLong(claim.getOffset()); out.writeLong(claim.getLength()); out.writeLong(offset); out.writeBoolean(resourceClaim.isLossTolerant()); } }