@Override public boolean equals(final Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (!(obj instanceof ContentClaim)) { return false; } final ContentClaim other = (ContentClaim) obj; if (offset != other.getOffset()) { return false; } return resourceClaim.equals(other.getResourceClaim()); }
private void serializeContentClaim(final ContentClaim claim, final long offset, final DataOutputStream out) throws IOException { if (claim == null) { out.write(0); } else { out.write(1); final ResourceClaim resourceClaim = claim.getResourceClaim(); writeString(resourceClaim.getId(), out); writeString(resourceClaim.getContainer(), out); writeString(resourceClaim.getSection(), out); out.writeLong(claim.getOffset()); out.writeLong(claim.getLength()); out.writeLong(offset); out.writeBoolean(resourceClaim.isLossTolerant()); } }
@Override public Object getFieldValue(final String fieldName) { switch (fieldName) { case ContentClaimSchema.RESOURCE_CLAIM: return resourceClaimFieldMap; case ContentClaimSchema.CONTENT_CLAIM_LENGTH: return contentClaim.getLength(); case ContentClaimSchema.CONTENT_CLAIM_OFFSET: return contentClaimOffset; case ContentClaimSchema.RESOURCE_CLAIM_OFFSET: return contentClaim.getOffset(); default: return null; } }
private int getClaimantCount(final ContentClaim claim) { if (claim == null) { return 0; } final ResourceClaim resourceClaim = claim.getResourceClaim(); if (resourceClaim == null) { return 0; } return claimManager.getClaimantCount(resourceClaim); }
claimCache.flush(currClaim.getResourceClaim()); .fromFlowFile(record.getCurrent()) .contentClaim(newClaim) .contentClaimOffset(Math.max(0L, newClaim.getLength() - writtenToFlowFile)) .size(writtenToFlowFile) .build();
if (claim.getLength() > 0) { throw new IllegalArgumentException("Cannot write to " + claim + " because it has already been written to.");
return 1; } else if (claim1 != null && claim2 != null) { final int claimComparison = claim1.compareTo(claim2); if (claimComparison != 0) { return claimComparison;
public void flush(final ContentClaim contentClaim) throws IOException { if (contentClaim == null) { return; } flush(contentClaim.getResourceClaim()); }
@Override public void close() throws IOException { if (closed) { return; } closed = true; writeRecursionSet.remove(sourceFlowFile); final long bytesWritten = countingOut.getBytesWritten(); StandardProcessSession.this.bytesWritten += bytesWritten; final OutputStream removed = openOutputStreams.remove(sourceFlowFile); if (removed == null) { LOG.error("Closed Session's OutputStream but there was no entry for it in the map; sourceFlowFile={}; map={}", sourceFlowFile, openOutputStreams); } flush(); removeTemporaryClaim(record); final FlowFileRecord newFile = new StandardFlowFileRecord.Builder() .fromFlowFile(record.getCurrent()) .contentClaim(updatedClaim) .contentClaimOffset(Math.max(0, updatedClaim.getLength() - bytesWritten)) .size(bytesWritten) .build(); record.setWorking(newFile); } };
@Override public int compareTo(final ContentClaim o) { final int resourceComp = resourceClaim.compareTo(o.getResourceClaim()); if (resourceComp != 0) { return resourceComp; } return Long.compare(offset, o.getOffset()); }
private boolean isDestructable(final ContentClaim claim) { if (claim == null) { return false; } final ResourceClaim resourceClaim = claim.getResourceClaim(); if (resourceClaim == null) { return false; } return !resourceClaim.isInUse(); }
} else { out.writeBoolean(true); final ResourceClaim resourceClaim = claim.getResourceClaim(); out.writeUTF(resourceClaim.getId()); out.writeUTF(resourceClaim.getContainer()); out.writeUTF(resourceClaim.getSection()); out.writeLong(claim.getOffset()); out.writeLong(claim.getLength()); out.writeLong(flowFile.getContentClaimOffset()); out.writeBoolean(resourceClaim.isLossTolerant());
@Override public InputStream read(final ContentClaim claim) throws IOException { if (claim == null) { return new ByteArrayInputStream(new byte[0]); } final Path path = getPath(claim, true); final FileInputStream fis = new FileInputStream(path.toFile()); if (claim.getOffset() > 0L) { try { StreamUtils.skip(fis, claim.getOffset()); } catch (IOException ioe) { IOUtils.closeQuietly(fis); throw ioe; } } // A claim length of -1 indicates that the claim is still being written to and we don't know // the length. In this case, we don't limit the Input Stream. If the Length has been populated, though, // it is possible that the Length could then be extended. However, we do want to avoid ever allowing the // stream to read past the end of the Content Claim. To accomplish this, we use a LimitedInputStream but // provide a LongSupplier for the length instead of a Long value. this allows us to continue reading until // we get to the end of the Claim, even if the Claim grows. This may happen, for instance, if we obtain an // InputStream for this claim, then read from it, write more to the claim, and then attempt to read again. In // such a case, since we have written to that same Claim, we should still be able to read those bytes. if (claim.getLength() >= 0) { return new LimitedInputStream(fis, claim::getLength); } else { return fis; } }
.fromFlowFile(record.getCurrent()) .contentClaim(newClaim) .contentClaimOffset(Math.max(0, newClaim.getLength() - writtenToFlowFile)) .size(writtenToFlowFile) .build();
final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); builder.setCurrentContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize());
@Override public int decrementClaimantCount(final ContentClaim claim) { if (claim == null) { return 0; } return resourceClaimManager.decrementClaimantCount(claim.getResourceClaim()); }
private void serializeContentClaim(final ContentClaim claim, final long offset, final DataOutputStream out) throws IOException { if (claim == null) { out.write(0); } else { out.write(1); final ResourceClaim resourceClaim = claim.getResourceClaim(); writeString(resourceClaim.getId(), out); writeString(resourceClaim.getContainer(), out); writeString(resourceClaim.getSection(), out); out.writeLong(claim.getOffset()); out.writeLong(claim.getLength()); out.writeLong(offset); out.writeBoolean(resourceClaim.isLossTolerant()); } }
@Override public Object getFieldValue(final String fieldName) { switch (fieldName) { case ContentClaimSchema.RESOURCE_CLAIM: return resourceClaimFieldMap; case ContentClaimSchema.CONTENT_CLAIM_LENGTH: return contentClaim.getLength(); case ContentClaimSchema.CONTENT_CLAIM_OFFSET: return contentClaimOffset; case ContentClaimSchema.RESOURCE_CLAIM_OFFSET: return contentClaim.getOffset(); default: return null; } }
private void updateEventContentClaims(final ProvenanceEventBuilder builder, final FlowFile flowFile, final StandardRepositoryRecord repoRecord) { final ContentClaim originalClaim = repoRecord.getOriginalClaim(); if (originalClaim == null) { builder.setCurrentContentClaim(null, null, null, null, 0L); } else { final ResourceClaim resourceClaim = originalClaim.getResourceClaim(); builder.setCurrentContentClaim( resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), repoRecord.getOriginal().getContentClaimOffset() + originalClaim.getOffset(), repoRecord.getOriginal().getSize()); builder.setPreviousContentClaim( resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), repoRecord.getOriginal().getContentClaimOffset() + originalClaim.getOffset(), repoRecord.getOriginal().getSize()); } }