@Before public void setUp() { objectMapper = JsonMapper.INSTANCE.mapper; ehCapture = newCapture(); drainCapture = newCapture(); limitHandlerCapture = newCapture(); final Schema schema = SchemaBuilder.struct().field("col1", Schema.OPTIONAL_STRING_SCHEMA).build(); final KafkaStreams kStreams = niceMock(KafkaStreams.class); kStreams.setStateListener(anyObject()); expectLastCall(); expect(kStreams.state()).andReturn(State.RUNNING); expect(queryMetadata.getRowQueue()).andReturn(rowQueue).anyTimes(); expect(queryMetadata.getResultSchema()).andReturn(schema).anyTimes(); queryMetadata.setLimitHandler(capture(limitHandlerCapture)); expectLastCall().once(); queryMetadata.setUncaughtExceptionHandler(capture(ehCapture)); expectLastCall(); replay(kStreams); }
private KafkaStreamsCustomizer customizer() { return kafkaStreams -> kafkaStreams.setStateListener(STATE_LISTENER); }
this.kafkaStreams = new KafkaStreams(topology, this.streamsConfig, this.clientSupplier); this.kafkaStreams.setStateListener(this.stateListener); this.kafkaStreams.setGlobalStateRestoreListener(this.stateRestoreListener); this.kafkaStreams.setUncaughtExceptionHandler(this.uncaughtExceptionHandler);
@Test(expected = Exception.class) public void shouldThrowExceptionForInvalidHost() throws Exception { final int port = randomFreeLocalPort(); final String host = "someInvalidHost"; kafkaStreams = WordCountInteractiveQueriesExample.createStreams( createStreamConfig(CLUSTER.bootstrapServers(), port, "one", host)); final CountDownLatch startupLatch = new CountDownLatch(1); kafkaStreams.setStateListener((newState, oldState) -> { if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) { startupLatch.countDown(); } }); kafkaStreams.start(); proxy = WordCountInteractiveQueriesExample.startRestProxy(kafkaStreams, port, host); }
kafkaStreams.setStateListener((newState, oldState) -> { if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) { startupLatch.countDown();
kafkaStreams.setStateListener((newState, oldState) -> { if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) { startupLatch.countDown();
@Test public void shouldThrowBindExceptionForUnavailablePort() throws Exception { final int port = randomFreeLocalPort(); final String host = "localhost"; kafkaStreams = WordCountInteractiveQueriesExample.createStreams( createStreamConfig(CLUSTER.bootstrapServers(), port, "one", host)); final CountDownLatch startupLatch = new CountDownLatch(1); kafkaStreams.setStateListener((newState, oldState) -> { if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) { startupLatch.countDown(); } }); kafkaStreams.start(); proxy = WordCountInteractiveQueriesExample.startRestProxy(kafkaStreams, port, host); expectedEx.expect(Exception.class); expectedEx.expectMessage("java.net.BindException: Address already in use"); // Binding to same port again will raise BindException. WordCountInteractiveQueriesExample.startRestProxy(kafkaStreams, port, host); }
public ManagedKStreams(Properties streamProperties, TopicsConfig topicsConfig, KStreamsProcessorListener testListener) { this.streamProperties = streamProperties; this.topicsConfig = topicsConfig; stateStoreName = topicsConfig.getStateStoreName(); KStreamBuilder kStreamBuilder= new KStreamBuilder(); kStreamBuilder.globalTable(topicsConfig.getProducerTopic(), stateStoreName); streams = new KafkaStreams(kStreamBuilder, streamProperties); // [ #132 ] - Improve build times by notifying test listener that we are running streams.setStateListener((newState, oldState) -> { if (!isRunning && newState == KafkaStreams.State.RUNNING) { isRunning = true; if( testListener != null) { testListener.stateStoreInitialized(); } } }); streams.setUncaughtExceptionHandler((t, e) -> log.error("KafkaStreams job failed", e)); }