@Test public void shouldWaitForOngoingForceToCompleteBeforeForcingAgain() throws Throwable { channelCommandQueue.put( ChannelCommand.dummy ); // The 'emptyBuffer...' command will be put into the queue, and then it'll block on 'force' because the queue // will be at capacity. final BatchingTransactionAppender appender = life.add( createTransactionAppender() ); life.start(); Runnable runnable = createForceAfterAppendRunnable( appender ); Future<?> future = executor.submit( runnable ); forceSemaphore.acquire(); Thread otherThread = fork( runnable ); awaitThreadState( otherThread, MILLISECONDS_TO_WAIT, Thread.State.TIMED_WAITING ); assertThat( channelCommandQueue.take(), is( ChannelCommand.dummy ) ); assertThat( channelCommandQueue.take(), is( ChannelCommand.emptyBufferIntoChannelAndClearIt ) ); assertThat( channelCommandQueue.take(), is( ChannelCommand.force ) ); assertThat( channelCommandQueue.take(), is( ChannelCommand.emptyBufferIntoChannelAndClearIt ) ); assertThat( channelCommandQueue.take(), is( ChannelCommand.force ) ); future.get(); otherThread.join(); assertTrue( channelCommandQueue.isEmpty() ); }
public static <T> Future<T> forkFuture( Callable<T> callable ) { FutureTask<T> task = new FutureTask<>( callable ); fork( task ); return task; }
Thread thread = runInSeparateThread( () -> outer.force( IOLimiter.UNLIMITED ) ); ThreadTestUtils.awaitThreadState( actionThread, TEST_TIMEOUT, Thread.State.TIMED_WAITING ); latch.countDown();
} ).when( storageEngine ).flushAndForce( limiter ); Future<Object> forceCheckPointer = forkFuture( () ->
@Test void writeLockedPageMustBlockFileUnmapping() { assertTimeout( ofMillis( SHORT_TIMEOUT_MILLIS ), () -> { configureStandardPageCache(); PagedFile pagedFile = map( file( "a" ), filePageSize ); PageCursor cursor = pagedFile.io( 0, PF_SHARED_WRITE_LOCK ); assertTrue( cursor.next() ); Thread unmapper = fork( closePageFile( pagedFile ) ); unmapper.join( 100 ); cursor.close(); unmapper.join(); } ); }
ThreadTestUtils.awaitThreadState( actionThread, TEST_TIMEOUT, Thread.State.TIMED_WAITING ); latch.countDown(); updaterThread.join();
ThreadTestUtils.forkFuture( () ->
@Test void writerCloseWaitForMergesInMergeQueue() { assertTimeout( Duration.ofSeconds( 10 ), () -> { indexWriter = mock( IndexWriter.class ); SegmentCommitInfo segmentCommitInfo = getSegmentCommitInfo(); Mockito.when( indexWriter.getNextMerge() ).thenReturn( new TestOneMerge( segmentCommitInfo ) ).thenReturn( null ); mergeScheduler.merge( indexWriter, MergeTrigger.EXPLICIT, false ); assertEquals( 1, mergeScheduler.getWriterTaskCount() ); Thread closeSchedulerThread = ThreadTestUtils.fork( () -> mergeScheduler.close() ); ThreadTestUtils.awaitThreadState( closeSchedulerThread, TimeUnit.SECONDS.toMillis( 5 ), Thread.State.TIMED_WAITING ); mergeScheduler.getExecutionLatch().countDown(); closeSchedulerThread.join(); assertEquals( 0, mergeScheduler.getWriterTaskCount() ); } ); }
@Test void optimisticReadLockedPageMustNotBlockFileUnmapping() { assertTimeout( ofMillis( SHORT_TIMEOUT_MILLIS ), () -> { generateFileWithRecords( file( "a" ), 1, recordSize ); configureStandardPageCache(); PagedFile pagedFile = map( file( "a" ), filePageSize ); PageCursor cursor = pagedFile.io( 0, PF_SHARED_READ_LOCK ); assertTrue( cursor.next() ); // Got a read lock fork( closePageFile( pagedFile ) ).join(); cursor.close(); } ); }
ThreadTestUtils.awaitThreadState( thread, TimeUnit.SECONDS.toMillis( 5 ), Thread.State.TIMED_WAITING, Thread.State.WAITING );
ThreadTestUtils.forkFuture( () ->
for ( int i = 0; i < otherThreads.length; i++ ) otherThreads[i] = fork( runnable ); awaitThreadState( otherThread, MILLISECONDS_TO_WAIT, Thread.State.TIMED_WAITING );
Thread serverStoppingThread = ThreadTestUtils.fork( stopServerAfterStartingHasStarted( server, failure ) ); server.start();
@Test void advancingPessimisticReadLockingCursorAfterUnmappingMustThrow() { assertTimeout( ofMillis( SHORT_TIMEOUT_MILLIS ), () -> { configureStandardPageCache(); generateFileWithRecords( file( "a" ), recordsPerFilePage * 2, recordSize ); PagedFile pagedFile = map( file( "a" ), filePageSize ); PageCursor cursor = pagedFile.io( 0, PF_SHARED_READ_LOCK ); assertTrue( cursor.next() ); // Got a pessimistic read lock fork( closePageFile( pagedFile ) ).join(); assertThrows( FileIsNotMappedException.class, cursor::next ); } ); }
@Test void advancingOptimisticReadLockingCursorAfterUnmappingMustThrow() { assertTimeout( ofMillis( SHORT_TIMEOUT_MILLIS ), () -> { configureStandardPageCache(); generateFileWithRecords( file( "a" ), recordsPerFilePage * 2, recordSize ); PagedFile pagedFile = map( file( "a" ), filePageSize ); PageCursor cursor = pagedFile.io( 0, PF_SHARED_READ_LOCK ); assertTrue( cursor.next() ); // fault assertTrue( cursor.next() ); // fault + unpin page 0 assertTrue( cursor.next( 0 ) ); // potentially optimistic read lock page 0 fork( closePageFile( pagedFile ) ).join(); assertThrows( FileIsNotMappedException.class, cursor::next ); } ); }
@Test void readingAndRetryingOnPageWithOptimisticReadLockingAfterUnmappingMustNotThrow() { assertTimeout( ofMillis( SHORT_TIMEOUT_MILLIS ), () -> { configureStandardPageCache(); generateFileWithRecords( file( "a" ), recordsPerFilePage * 2, recordSize ); PagedFile pagedFile = map( file( "a" ), filePageSize ); PageCursor cursor = pagedFile.io( 0, PF_SHARED_READ_LOCK ); assertTrue( cursor.next() ); // fault assertTrue( cursor.next() ); // fault + unpin page 0 assertTrue( cursor.next( 0 ) ); // potentially optimistic read lock page 0 fork( closePageFile( pagedFile ) ).join(); pageCache.close(); pageCache = null; cursor.getByte(); cursor.shouldRetry(); assertThrows( FileIsNotMappedException.class, cursor::next ); } ); }
fork( fillPagedFileB ).join();