private void ensureNotSealed(RollingSegmentHandle handle) throws StreamSegmentSealedException { if (handle.isSealed()) { throw new StreamSegmentSealedException(handle.getSegmentName()); } }
private <T> T throwException(String segmentName, Exception e) throws StreamSegmentException { if (e instanceof NoSuchFileException || e instanceof FileNotFoundException) { throw new StreamSegmentNotExistsException(segmentName); } if (e instanceof FileAlreadyExistsException) { throw new StreamSegmentExistsException(segmentName); } if (e instanceof IndexOutOfBoundsException) { throw new IllegalArgumentException(e.getMessage()); } if (e instanceof AccessControlException || e instanceof AccessDeniedException || e instanceof NonWritableChannelException) { throw new StreamSegmentSealedException(segmentName, e); } throw Exceptions.sneakyThrow(e); }
@GuardedBy("lock") @SneakyThrows(IOException.class) private void writeInternal(long startOffset, InputStream data, int length) throws BadOffsetException, StreamSegmentSealedException { Exceptions.checkArgument(length >= 0, "length", "bad length"); if (startOffset != this.length) { throw new BadOffsetException(this.name, this.length, startOffset); } if (this.sealed) { throw new StreamSegmentSealedException(this.name); } long offset = startOffset; ensureAllocated(offset, length); int writtenBytes = 0; while (writtenBytes < length) { OffsetLocation ol = getOffsetLocation(offset); int readBytes = data.read(this.data.get(ol.bufferSequence), ol.bufferOffset, Math.min(length - writtenBytes, BUFFER_SIZE - ol.bufferOffset)); if (readBytes < 0) { throw new IOException("reached end of stream while still expecting data"); } writtenBytes += readBytes; offset += readBytes; } this.length = Math.max(this.length, startOffset + length); }
private <T> T throwException(String segmentName, Exception e) throws StreamSegmentException { if (e instanceof S3Exception) { S3Exception s3Exception = (S3Exception) e; String errorCode = Strings.nullToEmpty(s3Exception.getErrorCode()); if (errorCode.equals("NoSuchKey")) { throw new StreamSegmentNotExistsException(segmentName); } if (errorCode.equals("PreconditionFailed")) { throw new StreamSegmentExistsException(segmentName); } if (errorCode.equals("InvalidRange") || errorCode.equals("InvalidArgument") || errorCode.equals("MethodNotAllowed") || s3Exception.getHttpCode() == HttpStatus.SC_REQUESTED_RANGE_NOT_SATISFIABLE) { throw new IllegalArgumentException(segmentName, e); } if (errorCode.equals("AccessDenied")) { throw new StreamSegmentSealedException(segmentName, e); } } if (e instanceof IndexOutOfBoundsException) { throw new ArrayIndexOutOfBoundsException(e.getMessage()); } throw Exceptions.sneakyThrow(e); }
/** * Pre-processes a StreamSegmentSealOperation. * After this method returns, the operation will have its SegmentLength property set to the current length of the Segment. * * @param operation The Operation. * @throws StreamSegmentSealedException If the Segment is already sealed. * @throws StreamSegmentMergedException If the Segment is merged into another. * @throws IllegalArgumentException If the operation is for a different Segment. */ void preProcessOperation(StreamSegmentSealOperation operation) throws StreamSegmentSealedException, StreamSegmentMergedException { ensureSegmentId(operation); if (this.merged) { // We do not allow any operation after merging (since after merging the Stream disappears). throw new StreamSegmentMergedException(this.name); } if (this.sealed) { // We do not allow re-sealing an already sealed stream. throw new StreamSegmentSealedException(this.name); } if (!this.recoveryMode) { // Assign entry StreamSegment Length. operation.setStreamSegmentOffset(this.length); } }
/** * Translates HDFS specific Exceptions to Pravega-equivalent Exceptions. * * @param segmentName Name of the stream segment on which the exception occurs. * @param e The exception to be translated. * @return The exception to be thrown. */ static <T> StreamSegmentException convertException(String segmentName, Throwable e) { if (e instanceof RemoteException) { e = ((RemoteException) e).unwrapRemoteException(); } if (e instanceof PathNotFoundException || e instanceof FileNotFoundException) { return new StreamSegmentNotExistsException(segmentName, e); } else if (e instanceof FileAlreadyExistsException || e instanceof AlreadyBeingCreatedException) { return new StreamSegmentExistsException(segmentName, e); } else if (e instanceof AclException) { return new StreamSegmentSealedException(segmentName, e); } else { throw Exceptions.sneakyThrow(e); } }
/** * Pre-processes a UpdateAttributesOperation. * After this method returns, the given operation will have its AttributeUpdates set to the current values of * those attributes. * * @param operation The operation to pre-process. * @throws StreamSegmentSealedException If the Segment is sealed. * @throws StreamSegmentMergedException If the Segment is merged into another. * @throws IllegalArgumentException If the operation is for a different Segment. * @throws BadAttributeUpdateException If at least one of the AttributeUpdates is invalid given the current attribute * values of the segment. */ void preProcessOperation(UpdateAttributesOperation operation) throws StreamSegmentSealedException, StreamSegmentMergedException, BadAttributeUpdateException { ensureSegmentId(operation); if (this.merged) { // We do not allow any operation after merging (since after merging the Segment disappears). throw new StreamSegmentMergedException(this.name); } if (this.sealed) { throw new StreamSegmentSealedException(this.name); } if (!this.recoveryMode) { preProcessAttributes(operation.getAttributeUpdates()); } }
throw new CompletionException(new StreamSegmentSealedException("attributes_" + streamSegmentId, ex));
throw new StreamSegmentSealedException(handle.getSegmentName());
@Override public void concat(SegmentHandle target, long offset, String sourceSegment) throws StreamSegmentException { ensureInitializedAndNotClosed(); long traceId = LoggerHelpers.traceEnter(log, "concat", target, offset, sourceSegment); target = asWritableHandle(target); // Check for target offset and whether it is sealed. FileStatus fileStatus = null; try { fileStatus = findStatusForSegment(target.getSegmentName(), true); if (isSealed(fileStatus.getPath())) { throw new StreamSegmentSealedException(target.getSegmentName()); } else if (getEpoch(fileStatus) > this.epoch) { throw new StorageNotPrimaryException(target.getSegmentName()); } else if (fileStatus.getLen() != offset) { throw new BadOffsetException(target.getSegmentName(), fileStatus.getLen(), offset); } FileStatus sourceFile = findStatusForSegment(sourceSegment, true); Preconditions.checkState(isSealed(sourceFile.getPath()), "Cannot concat segment '%s' into '%s' because it is not sealed.", sourceSegment, target.getSegmentName()); // Concat source file into target. this.fileSystem.concat(fileStatus.getPath(), new Path[]{sourceFile.getPath()}); } catch (IOException ex) { throw HDFSExceptionHelpers.convertException(sourceSegment, ex); } LoggerHelpers.traceLeave(log, "concat", traceId, target, offset, sourceSegment); }
/** * Triggers all the Future Reads in the given collection. * * @param futureReads The Future Reads to trigger. */ private void triggerFutureReads(Collection<FutureReadResultEntry> futureReads) { for (FutureReadResultEntry r : futureReads) { ReadResultEntry entry = getSingleReadResultEntry(r.getStreamSegmentOffset(), r.getRequestedReadLength()); assert entry != null : "Serving a StorageReadResultEntry with a null result"; assert !(entry instanceof FutureReadResultEntry) : "Serving a FutureReadResultEntry with another FutureReadResultEntry."; log.trace("{}: triggerFutureReads (Offset = {}, Type = {}).", this.traceObjectId, r.getStreamSegmentOffset(), entry.getType()); if (entry.getType() == ReadResultEntryType.EndOfStreamSegment) { // We have attempted to read beyond the end of the stream. Fail the read request with the appropriate message. r.fail(new StreamSegmentSealedException(String.format("StreamSegment has been sealed at offset %d. There can be no more reads beyond this offset.", this.metadata.getLength()))); } else { if (!entry.getContent().isDone()) { // Normally, all Future Reads are served from Cache, since they reflect data that has just been appended. // However, it's possible that after recovery, we get a read for some data that we do not have in the // cache (but it's not a tail read) - this data exists in Storage but our StorageLength has not yet been // updated. As such, the only solution we have is to return a FutureRead which will be satisfied when // the Writer updates the StorageLength (and trigger future reads). In that scenario, entry we get // will likely not be auto-fetched, so we need to request the content. entry.requestContent(this.config.getStorageReadDefaultTimeout()); } CompletableFuture<ReadResultEntryContents> entryContent = entry.getContent(); entryContent.thenAccept(r::complete); Futures.exceptionListener(entryContent, r::fail); } } }
/** * Executes a read from Storage using the current, given state of the Segment. */ private CompletableFuture<Void> performRead(SegmentProperties segmentInfo) { // Calculate the last offset we read up to. long lastReadOffset; synchronized (this.readBuffer) { lastReadOffset = this.readBufferOffset + this.readBuffer.getLength(); } long diff = segmentInfo.getLength() - lastReadOffset; if (diff <= 0) { if (segmentInfo.isSealed()) { // Segment has been sealed; no point in looping anymore. return Futures.failedFuture(new StreamSegmentSealedException(this.segmentName)); } else { // No change in the segment. return Futures.delayedFuture(this.waitDuration, SegmentStoreReader.this.executor); } } else { byte[] buffer = new byte[(int) Math.min(Integer.MAX_VALUE, diff)]; return SegmentStoreReader.this.storage .openRead(segmentName) .thenCompose(handle -> SegmentStoreReader.this.storage.read( handle, lastReadOffset, buffer, 0, buffer.length, SegmentStoreReader.this.testConfig.getTimeout())) .thenComposeAsync(bytesRead -> { processRead(buffer, bytesRead); return truncateIfPossible(segmentInfo.getLength()); }, SegmentStoreReader.this.executor); } }
throw new StreamSegmentSealedException(this.name);
status = findStatusForSegment(handle.getSegmentName(), true); if (isSealed(status.getPath())) { throw new StreamSegmentSealedException(handle.getSegmentName());
throw new StreamSegmentSealedException(this.name);
private Void doWrite(SegmentHandle handle, long offset, InputStream data, int length) throws StreamSegmentException { Preconditions.checkArgument(!handle.isReadOnly(), "handle must not be read-only."); long traceId = LoggerHelpers.traceEnter(log, "write", handle.getSegmentName(), offset, length); SegmentProperties si = doGetStreamSegmentInfo(handle.getSegmentName()); if (si.isSealed()) { throw new StreamSegmentSealedException(handle.getSegmentName()); } if (si.getLength() != offset) { throw new BadOffsetException(handle.getSegmentName(), si.getLength(), offset); } client.putObject(this.config.getBucket(), this.config.getRoot() + handle.getSegmentName(), Range.fromOffsetLength(offset, length), data); LoggerHelpers.traceLeave(log, "write", traceId); return null; }