@Override public void start() throws Exception { // Kafka setup for the example File dataDir = Testing.Files.createTestingDirectory("cluster"); dataDir.deleteOnExit(); kafkaCluster = new KafkaCluster() .usingDirectory(dataDir) .withPorts(2181, 9092) .addBrokers(1) .deleteDataPriorToStartup(true) .startup(); // Deploy the dashboard JsonObject consumerConfig = new JsonObject((Map) kafkaCluster.useTo() .getConsumerProperties("the_group", "the_client", OffsetResetStrategy.LATEST)); vertx.deployVerticle( DashboardVerticle.class.getName(), new DeploymentOptions().setConfig(consumerConfig) ); // Deploy the metrics collector : 3 times JsonObject producerConfig = new JsonObject((Map) kafkaCluster.useTo() .getProducerProperties("the_producer")); vertx.deployVerticle( MetricsVerticle.class.getName(), new DeploymentOptions().setConfig(producerConfig).setInstances(3) ); }
@Test @SkipLongRunning public void shouldStartClusterWithOneBrokerAndLeaveData() throws Exception { cluster.deleteDataUponShutdown(false).addBrokers(1).startup(); cluster.onEachDirectory(this::assertValidDataDirectory); cluster.shutdown(); cluster.onEachDirectory(this::assertValidDataDirectory); }
@Test public void shouldIgnoreUnparseableMessages() throws Exception { kafka.createTopic(topicName, 1, 1); .withDefault(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.brokerList()) .withDefault(ProducerConfig.CLIENT_ID_CONFIG, "intruder") .withDefault(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class)
@Before public void beforeEach() { dataDir = Testing.Files.createTestingDirectory("cluster"); cluster = new KafkaCluster().usingDirectory(dataDir) .deleteDataPriorToStartup(true) .deleteDataUponShutdown(true); }
@Test public void testPartitionsFor(TestContext ctx) throws Exception { String topicName = "testPartitionsFor"; String consumerId = topicName; kafkaCluster.createTopic(topicName, 2, 1); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Context context = vertx.getOrCreateContext(); consumer = createConsumer(context, config); Async done = ctx.async(); consumer.partitionsFor(topicName, ar -> { if (ar.succeeded()) { List<PartitionInfo> partitionInfo = ar.result(); ctx.assertEquals(2, partitionInfo.size()); } else { ctx.fail(); } done.complete(); }); }
private Properties setupConsumeWithHeaders(TestContext ctx, int numMessages, String topicName) { Async batch = ctx.async(); AtomicInteger index = new AtomicInteger(); kafkaCluster.useTo().produceStrings(numMessages, batch::complete, () -> new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.get(), Collections.singletonList(new RecordHeader("header_key" + index.get(), ("header_value" + index.getAndIncrement()).getBytes())))); batch.awaitSuccess(20000); Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); return config; }
@Before public void beforeEach() throws Exception { source = Collect.hashMapOf("server", "my-server"); setLogPosition(0); topicName = "schema-changes-topic"; File dataDir = Testing.Files.createTestingDirectory("history_cluster"); Testing.Files.delete(dataDir); // Configure the extra properties to kafka = new KafkaCluster().usingDirectory(dataDir) .deleteDataPriorToStartup(true) .deleteDataUponShutdown(true) .addBrokers(1) .withKafkaConfiguration(Collect.propertiesOf("auto.create.topics.enable", "false")) .startup(); history = new KafkaDatabaseHistory(); }
@Test @SkipLongRunning public void shouldStartClusterAndAllowProducersAndConsumersToUseIt() throws Exception { Testing.Debug.enable(); final String topicName = "topicA"; final CountDownLatch completion = new CountDownLatch(2); final int numMessages = 100; final AtomicLong messagesRead = new AtomicLong(0); // Start a cluster and create a topic ... cluster.addBrokers(1).startup(); cluster.createTopics(topicName); // Consume messages asynchronously ... Stopwatch sw = Stopwatch.reusable().start(); cluster.useTo().consumeIntegers(topicName, numMessages, 10, TimeUnit.SECONDS, completion::countDown, (key, value) -> { messagesRead.incrementAndGet(); return true; }); // Produce some messages asynchronously ... cluster.useTo().produceIntegers(topicName, numMessages, 1, completion::countDown); // Wait for both to complete ... if (completion.await(10, TimeUnit.SECONDS)) { sw.stop(); Testing.debug("Both consumer and producer completed normally in " + sw.durations()); } else { Testing.debug("Consumer and/or producer did not completed normally"); } assertThat(messagesRead.get()).isEqualTo(numMessages); }
@BeforeClass public static void setUp() throws IOException { kafkaCluster = kafkaCluster().deleteDataPriorToStartup(true).addBrokers(1).startup(); }
protected static KafkaCluster kafkaCluster() { if (kafkaCluster != null) { throw new IllegalStateException(); } dataDir = Testing.Files.createTestingDirectory("cluster"); kafkaCluster = new KafkaCluster().usingDirectory(dataDir).withPorts(2181, 9092); return kafkaCluster; }
@Test public void shouldSetServerConfigProperty() throws Exception { Properties config = new Properties(); config.put("foo", "bar"); KafkaCluster kafkaCluster = new KafkaCluster().withKafkaConfiguration(config).addBrokers(1); Field kafkaServersField = KafkaCluster.class.getDeclaredField("kafkaServers"); kafkaServersField.setAccessible(true); ConcurrentMap<Integer, KafkaServer> kafkaServers = (ConcurrentMap<Integer, KafkaServer>) kafkaServersField.get(kafkaCluster); Properties serverConfig = kafkaServers.values().iterator().next().config(); assertThat(serverConfig.get("foo")).isEqualTo("bar"); }
@Override public void stop() throws Exception { kafkaCluster.shutdown(); } }
@Test public void shouldSetClusterConfigProperty() throws Exception { Properties config = new Properties(); config.put("foo", "bar"); KafkaCluster kafkaCluster = new KafkaCluster().withKafkaConfiguration(config); Field kafkaConfigField = KafkaCluster.class.getDeclaredField("kafkaConfig"); kafkaConfigField.setAccessible(true); Properties kafkaConfig = (Properties) kafkaConfigField.get(kafkaCluster); assertThat(kafkaConfig).hasSize(1); }
@Test public void shouldStartWithEmptyTopicAndStoreDataAndRecoverAllState() throws Exception { // Create the empty topic ... kafka.createTopic(topicName, 1, 1); testHistoryTopicContent(false); }
@Test public void testPartitionsFor(TestContext ctx) throws Exception { String topicName = "testPartitionsFor"; String consumerId = topicName; kafkaCluster.createTopic(topicName, 2, 1); Properties config = kafkaCluster.useTo().getConsumerProperties(consumerId, consumerId, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); Context context = vertx.getOrCreateContext(); consumer = createConsumer(context, config); Async done = ctx.async(); consumer.partitionsFor(topicName, ar -> { if (ar.succeeded()) { List<PartitionInfo> partitionInfo = ar.result(); ctx.assertEquals(2, partitionInfo.size()); } else { ctx.fail(); } done.complete(); }); }
private Properties setupConsumeWithHeaders(TestContext ctx, int numMessages, String topicName) { Async batch = ctx.async(); AtomicInteger index = new AtomicInteger(); kafkaCluster.useTo().produceStrings(numMessages, batch::complete, () -> new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.get(), Collections.singletonList(new RecordHeader("header_key" + index.get(), ("header_value" + index.getAndIncrement()).getBytes())))); batch.awaitSuccess(20000); Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST); config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); return config; }
cluster.addBrokers(1).startup(); cluster.createTopics(topicName); cluster.useTo().consumeIntegers(topicName, numMessages, 10, TimeUnit.SECONDS, completion::countDown, (key, value) -> { messagesRead.incrementAndGet(); return true; cluster.useTo().produce("manual", new StringSerializer(), new IntegerSerializer(), produer -> { produer.write(topicName, "key1", 1); produer.write(topicName, "key2", 2);
@BeforeClass public static void setUp() throws IOException { kafkaCluster = kafkaCluster().deleteDataPriorToStartup(true).addBrokers(1).startup(); }
protected static KafkaCluster kafkaCluster() { if (kafkaCluster != null) { throw new IllegalStateException(); } dataDir = Testing.Files.createTestingDirectory("cluster"); kafkaCluster = new KafkaCluster().usingDirectory(dataDir).withPorts(2181, 9092); return kafkaCluster; }
@After public void afterEach() { cluster.shutdown(); Testing.Files.delete(dataDir); }