.name("computation") .bufferSize(BACKLOG) .build(); final Flux<String> computationStream = computationEmitterProcessor .map(i -> Integer.toString(i)); .name("persistence") .bufferSize(BACKLOG) .build(); final Flux<String> persistenceStream = persistenceEmitterProcessor .map(i -> "done " + i);
@Test public void testCustomRequestTaskThreadShare() { String expectedName = "topicProcessorRequestTaskShare"; //NOTE: the below single executor should not be used usually as requestTask assumes it immediately gets executed ExecutorService customTaskExecutor = Executors.newSingleThreadExecutor(r -> new Thread(r, expectedName)); TopicProcessor<Object> processor = TopicProcessor.builder().share(true) .executor(Executors.newCachedThreadPool()) .requestTaskExecutor(customTaskExecutor) .bufferSize(8) .waitStrategy(WaitStrategy.liteBlocking()) .autoCancel(true) .build(); processor.requestTask(Operators.cancelledSubscription()); Thread[] threads = new Thread[Thread.activeCount()]; Thread.enumerate(threads); //cleanup to avoid visibility in other tests customTaskExecutor.shutdownNow(); processor.forceShutdown(); Condition<Thread> customRequestTaskThread = new Condition<>( thread -> thread != null && expectedName.equals(thread.getName()), "a thread named \"%s\"", expectedName); Assertions.assertThat(threads) .haveExactly(1, customRequestTaskThread); }
private void setupFakeProtocolListener() { processor = TopicProcessor.<ByteBuf>builder().autoCancel(false).build(); workProcessor = WorkQueueProcessor.<ByteBuf>builder().autoCancel(false).build(); Flux<ByteBuf> bufferStream = Flux.from(processor)
/** * Create a new shared TopicProcessor using the passed backlog size, with a blockingWait * Strategy and auto-cancellation. * <p> * A Shared Processor authorizes concurrent onNext calls and is suited for multi-threaded * publisher that will fan-in data. * <p> * A new Cached ThreadExecutorPool will be implicitly created and will use the passed * name to qualify the created threads. * @param name Use a new Cached ExecutorService and assign this name to the created * threads * @param bufferSize A Backlog Size to mitigate slow subscribers * @param <E> Type of processed signals * @return a fresh processor */ public static <E> TopicProcessor<E> share(String name, int bufferSize) { return TopicProcessor.<E>builder().share(true).name(name).bufferSize(bufferSize).build(); }
/** * Create a new TopicProcessor using the provided backlog size, with a blockingWait Strategy * and auto-cancellation. <p> A new Cached ThreadExecutorPool will be implicitly created and will use the passed name to * qualify the created threads. * @param name Use a new Cached ExecutorService and assign this name to the created * threads * @param bufferSize A Backlog Size to mitigate slow subscribers * @param <E> Type of processed signals * @return the fresh TopicProcessor instance */ public static <E> TopicProcessor<E> create(String name, int bufferSize) { return TopicProcessor.<E>builder().name(name).bufferSize(bufferSize).build(); }
/** * Create a new {@link TopicProcessor} {@link Builder} with default properties. * @return new TopicProcessor builder */ public static <E> Builder<E> builder() { return new Builder<>(); }
/** * Create a new TopicProcessor using {@link Queues#SMALL_BUFFER_SIZE} backlog size, * blockingWait Strategy and auto-cancel. <p> A new Cached ThreadExecutorPool will be * implicitly created. * @param <E> Type of processed signals * @return a fresh processor */ public static <E> TopicProcessor<E> create() { return TopicProcessor.<E>builder().build(); }
@Test @Ignore public void testDiamond() throws InterruptedException, IOException { Flux<Point> points = Flux.<Double, Random>generate(Random::new, (r, sub) -> { sub.next(r.nextDouble()); return r; }).log("points") .buffer(2) .map(pairs -> new Point(pairs.get(0), pairs.get(1))) .subscribeWith(TopicProcessor.<Point>builder().name("tee").bufferSize(32).build()); Flux<InnerSample> innerSamples = points.log("inner-1") .filter(Point::isInner) .map(InnerSample::new) .log("inner-2"); Flux<OuterSample> outerSamples = points.log("outer-1") .filter(p -> !p.isInner()) .map(OuterSample::new) .log("outer-2"); Flux.merge(innerSamples, outerSamples) .publishOn(asyncGroup) .scan(new SimulationState(0l, 0l), SimulationState::withNextSample) .log("result") .map(s -> System.out.printf("After %8d samples π is approximated as %.5f", s.totalSamples, s.pi())) .take(10000) .subscribe(); System.in.read(); }
@Override public Processor<Long, Long> createIdentityProcessor(int bufferSize) { Flux<String> otherStream = Flux.just("test", "test2", "test3"); // System.out.println("Providing new downstream"); FluxProcessor<Long, Long> p = WorkQueueProcessor.<Long>builder().name("fluxion-raw-fork").bufferSize(bufferSize).build(); cumulated.set(0); cumulatedJoin.set(0); BiFunction<Long, String, Long> combinator = (t1, t2) -> t1; return FluxProcessor.wrap(p, p.groupBy(k -> k % 2 == 0) .flatMap(stream -> stream.scan((prev, next) -> next) .map(integer -> -integer) .filter(integer -> integer <= 0) .map(integer -> -integer) .bufferTimeout(1024, Duration.ofMillis(50)) .flatMap(Flux::fromIterable) .doOnNext(array -> cumulated.getAndIncrement()) .flatMap(i -> Flux.zip(Flux.just(i), otherStream, combinator))) .doOnNext(array -> cumulatedJoin.getAndIncrement()) .subscribeWith(TopicProcessor.<Long>builder().name("fluxion-raw-join").bufferSize(bufferSize).build()) .doOnError(Throwable::printStackTrace)); }
TopicProcessor<String> ring = TopicProcessor.<String>builder().name("test").bufferSize(1024).build();
@Test public void testCustomRequestTaskThreadName() { String expectedName = "topicProcessorRequestTaskCreate"; //NOTE: the below single executor should not be used usually as requestTask assumes it immediately gets executed ExecutorService customTaskExecutor = Executors.newSingleThreadExecutor(r -> new Thread(r, expectedName)); TopicProcessor<Object> processor = TopicProcessor.builder() .executor(Executors.newCachedThreadPool()) .requestTaskExecutor(customTaskExecutor) .bufferSize(8) .waitStrategy(WaitStrategy.liteBlocking()) .autoCancel(true) .build(); processor.requestTask(Operators.cancelledSubscription()); Thread[] threads = new Thread[Thread.activeCount()]; Thread.enumerate(threads); //cleanup to avoid visibility in other tests customTaskExecutor.shutdownNow(); processor.forceShutdown(); Condition<Thread> customRequestTaskThread = new Condition<>( thread -> thread != null && expectedName.equals(thread.getName()), "a thread named \"%s\"", expectedName); Assertions.assertThat(threads) .haveExactly(1, customRequestTaskThread); }
@Test(timeout = 5_000) public void testBufferSize1Shared() throws Exception { TopicProcessor<String> broadcast = TopicProcessor.<String>builder() .name("share-name") .bufferSize(1) .autoCancel(true) .share(true) .build(); int simultaneousSubscribers = 3000; CountDownLatch latch = new CountDownLatch(simultaneousSubscribers); Scheduler scheduler = Schedulers.single(); FluxSink<String> sink = broadcast.sink(); Flux<String> flux = broadcast.filter(Objects::nonNull) .publishOn(scheduler) .cache(1); for (int i = 0; i < simultaneousSubscribers; i++) { flux.subscribe(s -> latch.countDown()); } sink.next("data"); assertThat(latch.await(4, TimeUnit.SECONDS)) .overridingErrorMessage("Data not received") .isTrue(); }
@Test public void serializedSinkMultiProducerWithOnRequest() throws Exception { TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .build(); FluxSink<Integer> sink = processor.sink(); FluxSink<Integer> serializedSink = sink.onRequest(n -> { FluxSink<Integer> s = sink.next(1); assertThat(s).isInstanceOf(SerializedSink.class); s.next(2); }); assertThat(serializedSink).isInstanceOf(SerializedSink.class); StepVerifier.create(processor) .thenRequest(5) .expectNext(1, 2) .thenCancel() .verify(); } }
@Test(timeout = 5_000) public void testBufferSize1Created() throws Exception { TopicProcessor<String> broadcast = TopicProcessor.<String>builder().name("share-name").bufferSize(1).autoCancel(true).build(); int simultaneousSubscribers = 3000; CountDownLatch latch = new CountDownLatch(simultaneousSubscribers); Scheduler scheduler = Schedulers.single(); FluxSink<String> sink = broadcast.sink(); Flux<String> flux = broadcast.filter(Objects::nonNull) .publishOn(scheduler) .cache(1); for (int i = 0; i < simultaneousSubscribers; i++) { flux.subscribe(s -> latch.countDown()); } sink.next("data"); assertThat(latch.await(4, TimeUnit.SECONDS)) .overridingErrorMessage("Data not received") .isTrue(); }
/** * Create a new shared TopicProcessor using the passed backlog size, with a blockingWait * Strategy and auto-cancellation. * <p> * A Shared Processor authorizes concurrent onNext calls and is suited for multi-threaded * publisher that will fan-in data. * <p> * A new Cached ThreadExecutorPool will be implicitly created and will use the passed * name to qualify the created threads. * @param name Use a new Cached ExecutorService and assign this name to the created * threads * @param bufferSize A Backlog Size to mitigate slow subscribers * @param <E> Type of processed signals * @return a fresh processor */ public static <E> TopicProcessor<E> share(String name, int bufferSize) { return TopicProcessor.<E>builder().share(true).name(name).bufferSize(bufferSize).build(); }
@Test public void chainedTopicProcessor() throws Exception{ ExecutorService es = Executors.newFixedThreadPool(2); try { TopicProcessor<String> bc = TopicProcessor.<String>builder().executor(es).bufferSize(16).build(); int elems = 100; CountDownLatch latch = new CountDownLatch(elems); bc.subscribe(sub("spec1", latch)); Flux.range(0, elems) .map(s -> "hello " + s) .subscribe(bc); assertTrue(latch.await(5000, TimeUnit.MILLISECONDS)); } finally { es.shutdown(); } }
@Test public void testDefaultRequestTaskThreadName() { String mainName = "topicProcessorRequestTask"; String expectedName = mainName + "[request-task]"; TopicProcessor<Object> processor = TopicProcessor.builder().name(mainName).bufferSize(8).build(); processor.requestTask(Operators.cancelledSubscription()); Thread[] threads = new Thread[Thread.activeCount()]; Thread.enumerate(threads); //cleanup to avoid visibility in other tests processor.forceShutdown(); Condition<Thread> defaultRequestTaskThread = new Condition<>( thread -> thread != null && expectedName.equals(thread.getName()), "a thread named \"%s\"", expectedName); Assertions.assertThat(threads) .haveExactly(1, defaultRequestTaskThread); }
@Test public void shareOverrideAll() { ExecutorService executor = Executors.newSingleThreadExecutor(); ExecutorService requestTaskExecutor = Executors.newSingleThreadExecutor(); int bufferSize = 1024; WaitStrategy waitStrategy = WaitStrategy.busySpin(); boolean autoCancel = false; TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .share(true) .executor(executor) .requestTaskExecutor(requestTaskExecutor) .bufferSize(bufferSize) .waitStrategy(waitStrategy) .autoCancel(autoCancel) .build(); assertProcessor(processor, true, null, bufferSize, waitStrategy, autoCancel, executor, requestTaskExecutor); }
@Test public void createOverrideAll() { ExecutorService executor = Executors.newSingleThreadExecutor(); ExecutorService requestTaskExecutor = Executors.newSingleThreadExecutor(); int bufferSize = 1024; WaitStrategy waitStrategy = WaitStrategy.busySpin(); boolean autoCancel = false; TopicProcessor<Integer> processor = TopicProcessor.<Integer>builder() .executor(executor) .requestTaskExecutor(requestTaskExecutor) .bufferSize(bufferSize) .waitStrategy(waitStrategy) .autoCancel(autoCancel) .build(); assertProcessor(processor, false, null, bufferSize, waitStrategy, autoCancel, executor, requestTaskExecutor); }