private boolean dataWaitingToGoInBuffer() { return outstandingRequest != null && Futures.isSuccessful(outstandingRequest) && buffer.capacityAvailable() > 0; }
/** * Waits for the provided future to be complete, and returns true if it was successful, false if it failed * or did not complete. * * @param timeout The maximum number of milliseconds to block * @param f The future to wait for. * @param <T> The Type of the future's result. * @return True if the given CompletableFuture is completed and successful within the given timeout. */ public static <T> boolean await(CompletableFuture<T> f, long timeout) { Exceptions.handleInterrupted(() -> { try { f.get(timeout, TimeUnit.MILLISECONDS); } catch (TimeoutException | ExecutionException e) { // Not handled here. } }); return isSuccessful(f); }
private void closeConnection(Exception exceptionToInflightRequests) { if (closed.get()) { log.info("Closing connection to segment: {}", segmentId); } else { log.info("Closing connection to segment {} with exception: {}", segmentId, exceptionToInflightRequests); } CompletableFuture<ClientConnection> c; synchronized (lock) { c = connection; connection = null; } if (c != null && Futures.isSuccessful(c)) { try { c.getNow(null).close(); } catch (Exception e) { log.warn("Exception tearing down connection: ", e); } } failAllInflight(exceptionToInflightRequests); }
private void closeIndices(Collection<Long> segmentIds, boolean cleanCache) { synchronized (this.attributeIndices) { if (segmentIds == null) { segmentIds = new ArrayList<>(this.attributeIndices.keySet()); } for (long streamSegmentId : segmentIds) { CompletableFuture<AttributeIndex> indexFuture = this.attributeIndices.remove(streamSegmentId); if (indexFuture == null) { continue; } if (Futures.isSuccessful(indexFuture)) { // Already initialized. We should try as much as we can to clean up synchronously to prevent concurrent // calls from creating new indices which could be affected by us cleaning the cache at the same time. closeIndex((SegmentAttributeBTreeIndex) indexFuture.join(), cleanCache); } else { // Close it when we're done initializing. indexFuture.thenAcceptAsync(index -> closeIndex((SegmentAttributeBTreeIndex) index, cleanCache), this.executor); } } } }
/** * Creates a new instance of the RedirectedReadResultEntry class. * * @param entry The CompletableReadResultEntry to wrap. * @param offsetAdjustment The amount to adjust the offset by. This value, added to the entry's SegmentOffset, should * equal the offset in the Parent Segment where the read is thought to be at. * @param retryGetEntry A BiFunction to invoke when needing to retry an entry. First argument: offset, Second: length. * @param redirectedSegmentId The Id of the redirected Segment's read index. */ RedirectedReadResultEntry(CompletableReadResultEntry entry, long offsetAdjustment, GetEntry retryGetEntry, long redirectedSegmentId) { this.firstEntry = Preconditions.checkNotNull(entry, "entry"); this.adjustedOffset = entry.getStreamSegmentOffset() + offsetAdjustment; Preconditions.checkArgument(this.adjustedOffset >= 0, "Given offset adjustment would result in a negative offset."); this.retryGetEntry = Preconditions.checkNotNull(retryGetEntry, "retryGetEntry"); this.redirectedSegmentId = redirectedSegmentId; if (Futures.isSuccessful(entry.getContent())) { this.result = entry.getContent(); } else { this.result = new CompletableFuture<>(); linkFirstEntryToResult(); } }
@Override @Synchronized public int bytesInBuffer() { int result = buffer.dataAvailable(); boolean atEnd = receivedEndOfSegment || receivedTruncated || (outstandingRequest != null && outstandingRequest.isCompletedExceptionally()); if (outstandingRequest != null && Futures.isSuccessful(outstandingRequest)) { SegmentRead request = outstandingRequest.join(); result += request.getData().remaining(); atEnd |= request.isEndOfSegment(); } if (result <= 0 && atEnd) { result = -1; } log.trace("bytesInBuffer {} on segment {} status is {}", result, getSegmentId(), this); return result; }
@Test public void completeWhenRegisteredDelayedMultiple() throws Exception { CompletableFuture<Void> testFuture = new CompletableFuture<>(); handler.completeWhenRegistered(testFuture); CompletableFuture<Void> testFuture1 = new CompletableFuture<>(); handler.completeWhenRegistered(testFuture1); handler.channelRegistered(ctx); Assert.assertTrue(Futures.isSuccessful(testFuture)); testFuture1.get(); //wait until additional future is complete. Assert.assertTrue(Futures.isSuccessful(testFuture1)); } }
/** * Verifies that CancellationToken.NONE has no effect. */ @Test public void testNonCancellableToken() { final int futureCount = 10; CancellationToken token = CancellationToken.NONE; ArrayList<CompletableFuture<Void>> futures = new ArrayList<>(); Predicate<Integer> isAlreadyCompleted = i -> i % 2 == 0; // Every other future is already completed. for (int i = 0; i < futureCount; i++) { val f = new CompletableFuture<Void>(); if (isAlreadyCompleted.test(i)) { f.complete(null); } futures.add(f); token.register(f); } token.requestCancellation(); for (int i = 0; i < futures.size(); i++) { if (isAlreadyCompleted.test(i)) { Assert.assertTrue("Already completed future was cancelled.", Futures.isSuccessful(futures.get(i))); } else { Assert.assertFalse("Non-completed future was completed.", futures.get(i).isDone()); } } } }
@Test public void completeWhenRegisteredDelayed() throws Exception { CompletableFuture<Void> testFuture = new CompletableFuture<>(); handler.completeWhenRegistered(testFuture); handler.channelRegistered(ctx); Assert.assertTrue(Futures.isSuccessful(testFuture)); }
@Test public void completeWhenRegisteredNormal() throws Exception { handler.channelRegistered(ctx); CompletableFuture<Void> testFuture = new CompletableFuture<>(); handler.completeWhenRegistered(testFuture); Assert.assertTrue(Futures.isSuccessful(testFuture)); }
/** * Tests that RequestCancellation cancels futures. */ @Test public void testRequestCancellation() { final int futureCount = 10; CancellationToken token = new CancellationToken(); ArrayList<CompletableFuture<Void>> futures = new ArrayList<>(); Predicate<Integer> isAlreadyCompleted = i -> i % 2 == 0; // Every other future is already completed. for (int i = 0; i < futureCount; i++) { val f = new CompletableFuture<Void>(); if (isAlreadyCompleted.test(i)) { f.complete(null); } futures.add(f); token.register(f); } token.requestCancellation(); for (int i = 0; i < futures.size(); i++) { if (isAlreadyCompleted.test(i)) { Assert.assertTrue("Already completed future was cancelled.", Futures.isSuccessful(futures.get(i))); } else { AssertExtensions.assertThrows( "Future was not cancelled.", futures.get(i)::join, ex -> ex instanceof CancellationException); } } }
@Override public boolean processEntry(ReadResultEntry e) { try { Assert.assertTrue("Received Entry that is not ready to serve data yet.", Futures.isSuccessful(e.getContent())); ReadResultEntryContents c = e.getContent().join(); byte[] data = new byte[c.getLength()]; StreamHelpers.readAll(c.getData(), data, 0, data.length); int idx = readEntryCount.getAndIncrement(); AssertExtensions.assertLessThan("Read too many entries.", entries.size(), idx); byte[] expected = entries.get(idx); Assert.assertArrayEquals(String.format("Unexpected read contents after reading %d entries.", idx + 1), expected, data); readCount.incrementAndGet(); } catch (Exception ex) { processError(ex); return false; } return true; }
/** * Tests the basic ability to dequeue items using take() as they are added. */ @Test public void testAddSingleTake() throws Exception { @Cleanup BlockingDrainingQueue<Integer> queue = new BlockingDrainingQueue<>(); for (int i = 0; i < ITEM_COUNT; i++) { queue.add(i); val takeResult = queue.take(MAX_READ_COUNT); Assert.assertTrue("take() returned an incomplete Future when data is available.", Futures.isSuccessful(takeResult)); val entries = takeResult.join(); Assert.assertEquals("Unexpected number of items polled.", 1, entries.size()); Assert.assertEquals("Unexpected value polled from queue.", i, (int) entries.peek()); } val remainingItems = queue.take(1); Assert.assertFalse("take() did not return an incomplete future when queue was empty.", remainingItems.isDone()); }
/** * Tests the basic ability to dequeue items as a batch using take(). */ @Test public void testAddMultiTake() throws Exception { @Cleanup BlockingDrainingQueue<Integer> queue = new BlockingDrainingQueue<>(); populate(queue); for (int i = 0; i < ITEM_COUNT; i += MAX_READ_COUNT) { val takeResult = queue.take(MAX_READ_COUNT); Assert.assertTrue("take() returned an incomplete Future when data is available.", Futures.isSuccessful(takeResult)); val entries = takeResult.join(); int expectedCount = Math.min(MAX_READ_COUNT, ITEM_COUNT - i); Assert.assertEquals("Unexpected number of items polled.", expectedCount, entries.size()); int expectedValue = i; for (int value : entries) { Assert.assertEquals("Unexpected value polled from queue.", expectedValue, value); expectedValue++; } } }
@Test(timeout = 10000) public void testWrongOffsetReturned() throws ConnectionFailedException { Segment segment = new Segment("scope", "testWrongOffsetReturned", 0); byte[] good = new byte[] { 0, 1, 2, 3, 4 }; byte[] bad = new byte[] { 9, 8, 7, 6 }; PravegaNodeUri endpoint = new PravegaNodeUri("localhost", SERVICE_PORT); MockConnectionFactoryImpl connectionFactory = new MockConnectionFactoryImpl(); MockController controller = new MockController(endpoint.getEndpoint(), endpoint.getPort(), connectionFactory); @Cleanup AsyncSegmentInputStreamImpl in = new AsyncSegmentInputStreamImpl(controller, connectionFactory, segment, ""); ClientConnection c = mock(ClientConnection.class); connectionFactory.provideConnection(endpoint, c); CompletableFuture<SegmentRead> readFuture = in.read(1234, 5678); AssertExtensions.assertBlocks(() -> readFuture.get(), () -> { ReplyProcessor processor = connectionFactory.getProcessor(endpoint); processor.segmentRead(new WireCommands.SegmentRead(segment.getScopedName(), 1235, false, false, ByteBuffer.wrap(bad))); processor.segmentRead(new WireCommands.SegmentRead(segment.getScopedName(), 1234, false, false, ByteBuffer.wrap(good))); }); verify(c).sendAsync(Mockito.eq(new WireCommands.ReadSegment(segment.getScopedName(), 1234, 5678, "")), Mockito.any(ClientConnection.CompletedCallback.class)); assertTrue(Futures.isSuccessful(readFuture)); assertEquals(ByteBuffer.wrap(good), readFuture.join().getData()); verifyNoMoreInteractions(c); }
@Test(timeout = 10000) public void testRead() throws ConnectionFailedException { Segment segment = new Segment("scope", "testRead", 1); PravegaNodeUri endpoint = new PravegaNodeUri("localhost", SERVICE_PORT); MockConnectionFactoryImpl connectionFactory = new MockConnectionFactoryImpl(); MockController controller = new MockController(endpoint.getEndpoint(), endpoint.getPort(), connectionFactory); @Cleanup AsyncSegmentInputStreamImpl in = new AsyncSegmentInputStreamImpl(controller, connectionFactory, segment, ""); ClientConnection c = mock(ClientConnection.class); connectionFactory.provideConnection(endpoint, c); WireCommands.SegmentRead segmentRead = new WireCommands.SegmentRead(segment.getScopedName(), 1234, false, false, ByteBuffer.allocate(0)); CompletableFuture<SegmentRead> readFuture = in.read(1234, 5678); AssertExtensions.assertBlocks(() -> readFuture.get(), () -> { ReplyProcessor processor = connectionFactory.getProcessor(endpoint); processor.segmentRead(segmentRead); }); verify(c).sendAsync(Mockito.eq(new WireCommands.ReadSegment(segment.getScopedName(), 1234, 5678, "")), Mockito.any(ClientConnection.CompletedCallback.class)); assertTrue(Futures.isSuccessful(readFuture)); assertEquals(segmentRead, readFuture.join()); verifyNoMoreInteractions(c); }
Assert.assertTrue("Queue did not unblock after adding a value.", Futures.isSuccessful(takeResult)); Queue<Integer> result = takeResult.join(); Assert.assertEquals("Unexpected number of items polled.", 1, result.size());
verify(c).sendAsync(Mockito.eq(new WireCommands.ReadSegment(segment.getScopedName(), 1234, 5678, "")), Mockito.any(ClientConnection.CompletedCallback.class)); assertTrue(!Futures.isSuccessful(readFuture)); // verify read future completedExceptionally assertThrows(SegmentTruncatedException.class, () -> readFuture.get()); verifyNoMoreInteractions(c); verify(c).sendAsync(Mockito.eq(new WireCommands.ReadSegment(segment.getScopedName(), 5656, 5678, "")), Mockito.any(ClientConnection.CompletedCallback.class)); assertTrue(Futures.isSuccessful(readFuture2)); assertEquals(segmentRead, readFuture2.join()); verifyNoMoreInteractions(c);
/** * Tests the ability to timeout tail reads. This does not actually test the functionality of tail reads - it just * tests that they will time out appropriately. */ @Test public void testTailReadsTimeout() { final long segmentId = 1; final String segmentName = Long.toString(segmentId); // Setup a DurableLog and start it. @Cleanup ContainerSetup setup = new ContainerSetup(executorService()); @Cleanup DurableLog durableLog = setup.createDurableLog(); durableLog.startAsync().awaitRunning(); // Create a segment, which will be used for testing later. UpdateableSegmentMetadata segmentMetadata = setup.metadata.mapStreamSegmentId(segmentName, segmentId); segmentMetadata.setLength(0); Duration shortTimeout = Duration.ofMillis(30); // Setup a read operation, and make sure it is blocked (since there is no data). CompletableFuture<Iterator<Operation>> readFuture = durableLog.read(1, 1, shortTimeout); Assert.assertFalse("read() returned a completed future when there is no data available.", Futures.isSuccessful(readFuture)); CompletableFuture<Void> controlFuture = Futures.delayedFuture(Duration.ofMillis(2000), setup.executorService); AssertExtensions.assertSuppliedFutureThrows( "Future from read() operation did not fail with a TimeoutException after the timeout expired.", () -> CompletableFuture.anyOf(controlFuture, readFuture), ex -> ex instanceof TimeoutException); }
Assert.assertTrue("BaseEntry.getContent() was not completed when requestContent was invoked.", Futures.isSuccessful(baseEntry.getContent()));