protected ConsumerConnector createConsumerConnector() { return Consumer.createJavaConsumerConnector(this.consumerConfig); }
@Override public boolean hasMore() { return nextIterator.hasNext() || iter.hasNext(); }
public ConsumerConfig getKafkaConsumerConfig() { Properties props = new Properties(); for (String key : defaultProps.keySet()) { props.put(key, defaultProps.get(key)); } for (String key : _kafkaConsumerProperties.keySet()) { props.put(key, _kafkaConsumerProperties.get(key)); } props.put("group.id", _groupId); props.put("zookeeper.connect", _zkBrokerUrl); return new ConsumerConfig(props); }
public SimpleKafkaConsumer(Properties props, KafkaCheckpoint checkpoint) { Config config = ConfigFactory.parseProperties(props); topic = config.getString("topic"); String zkConnect = config.getString("zookeeper.connect"); schemaRegistry = KafkaSchemaRegistryFactory.getSchemaRegistry(props); deserializer = new LiAvroDeserializer(schemaRegistry); /** TODO: Make Confluent schema registry integration configurable * HashMap<String, String> avroSerDeConfig = new HashMap<>(); * avroSerDeConfig.put("schema.registry.url", "http://localhost:8081"); * deserializer = new io.confluent.kafka.serializers.KafkaAvroDeserializer(); * deserializer.configure(avroSerDeConfig, false); * **/ Properties consumeProps = new Properties(); consumeProps.put("zookeeper.connect", zkConnect); consumeProps.put("group.id", "gobblin-tool-" + System.nanoTime()); consumeProps.put("zookeeper.session.timeout.ms", "10000"); consumeProps.put("zookeeper.sync.time.ms", "10000"); consumeProps.put("auto.commit.interval.ms", "10000"); consumeProps.put("auto.offset.reset", "smallest"); consumeProps.put("auto.commit.enable", "false"); //consumeProps.put("consumer.timeout.ms", "10000"); consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(ImmutableMap.of(topic, 1)); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this.topic); stream = streams.get(0); iterator = stream.iterator(); }
@Override public void init(SecorConfig config) throws UnknownHostException { this.mConfig = config; mConsumerConnector = Consumer.createJavaConsumerConnector(createConsumerConfig()); if (!mConfig.getKafkaTopicBlacklist().isEmpty() && !mConfig.getKafkaTopicFilter().isEmpty()) { throw new RuntimeException("Topic filter and blacklist cannot be both specified."); } TopicFilter topicFilter = !mConfig.getKafkaTopicBlacklist().isEmpty() ? new Blacklist(mConfig.getKafkaTopicBlacklist()) : new Whitelist(mConfig.getKafkaTopicFilter()); LOG.debug("Use TopicFilter {}({})", topicFilter.getClass(), topicFilter); List<KafkaStream<byte[], byte[]>> streams = mConsumerConnector.createMessageStreamsByFilter(topicFilter); KafkaStream<byte[], byte[]> stream = streams.get(0); mIterator = stream.iterator(); mKafkaMessageTimestampFactory = new KafkaMessageTimestampFactory(mConfig.getKafkaMessageTimestampClass()); }
@Test public void test() throws IOException { KafkaPusher pusher = new KafkaPusher("localhost:" + kafkaPort, TOPIC); String msg1 = "msg1"; String msg2 = "msg2"; pusher.pushMessages(Lists.newArrayList(msg1.getBytes(), msg2.getBytes())); try { Thread.sleep(1000); } catch(InterruptedException ex) { Thread.currentThread().interrupt(); } assert(iterator.hasNext()); Assert.assertEquals(new String(iterator.next().message()), msg1); assert(iterator.hasNext()); Assert.assertEquals(new String(iterator.next().message()), msg2); pusher.close(); }
@Nullable @Override public InputRow nextRow() { try { if (!nextIterator.hasNext()) { final byte[] message = iter.next().message(); if (message == null) { return null; } nextIterator = theParser.parseBatch(ByteBuffer.wrap(message)).iterator(); } return nextIterator.next(); } catch (InvalidMessageException e) { /* IF the CRC is caused within the wire transfer, this is not the best way to handel CRC. Probably it is better to shutdown the fireHose without commit and start it again. */ log.error(e, "Message failed its checksum and it is corrupt, will skip it"); return null; } }
@SuppressWarnings("unchecked") private static KafkaStream<byte[], byte[]> createMockStream(BlockingQueue<FetchedDataChunk> queue) { KafkaStream<byte[], byte[]> stream = (KafkaStream<byte[], byte[]>) Mockito.mock(KafkaStream.class); ConsumerIterator<byte[], byte[]> it = new ConsumerIterator<>(queue, -1, new DefaultDecoder(new VerifiableProperties()), new DefaultDecoder(new VerifiableProperties()), "clientId"); Mockito.when(stream.iterator()).thenReturn(it); return stream; }
/** * @return Tags to be applied to the {@link MetricContext} in this object. Called once in {@link #startUp()}. * Subclasses should override this method to add additional tags. */ protected List<Tag<?>> getTagsForMetrics() { List<Tag<?>> tags = Lists.newArrayList(); tags.add(new Tag<>(RuntimeMetrics.TOPIC, this.topic)); tags.add(new Tag<>(RuntimeMetrics.GROUP_ID, this.consumerConfig.groupId())); return tags; }
public void pushToStream(String message) { int streamNo = (int) this.nextStream.incrementAndGet() % this.queues.size(); AtomicLong offset = this.offsets.get(streamNo); BlockingQueue<FetchedDataChunk> queue = this.queues.get(streamNo); AtomicLong thisOffset = new AtomicLong(offset.incrementAndGet()); List<Message> seq = Lists.newArrayList(); seq.add(new Message(message.getBytes(Charsets.UTF_8))); ByteBufferMessageSet messageSet = new ByteBufferMessageSet(NoCompressionCodec$.MODULE$, offset, JavaConversions.asScalaBuffer(seq)); FetchedDataChunk chunk = new FetchedDataChunk(messageSet, new PartitionTopicInfo("topic", streamNo, queue, thisOffset, thisOffset, new AtomicInteger(1), "clientId"), thisOffset.get()); queue.add(chunk); }
public void shutdown() { for (BlockingQueue<FetchedDataChunk> queue : this.queues) { queue.add(ZookeeperConsumerConnector.shutdownCommand()); } }
KafkaConsumerSuite(String zkConnectString, String topic) { _topic = topic; Properties consumeProps = new Properties(); consumeProps.put("zookeeper.connect", zkConnectString); consumeProps.put("group.id", _topic+"-"+System.nanoTime()); consumeProps.put("zookeeper.session.timeout.ms", "10000"); consumeProps.put("zookeeper.sync.time.ms", "10000"); consumeProps.put("auto.commit.interval.ms", "10000"); consumeProps.put("_consumer.timeout.ms", "10000"); _consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = _consumer.createMessageStreams(ImmutableMap.of(this._topic, 1)); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this._topic); _stream = streams.get(0); _iterator = _stream.iterator(); }
private void testRecordsWritten(int totalSuccessful, String topic) throws UnsupportedEncodingException { final ConsumerIterator<byte[], byte[]> iterator = kafkaTestHelper.getIteratorForTopic(topic); for (int i = 0; i < totalSuccessful; ++i) { String message = new String(iterator.next().message(), "UTF-8"); log.debug(String.format("%d of %d: Message consumed: %s", (i+1), totalSuccessful, message)); } }
protected ConsumerConfig createConsumerConfig(Config config) { Properties props = ConfigUtils.configToProperties(config); if (!props.containsKey(GROUP_ID_KEY)) { props.setProperty(GROUP_ID_KEY, DEFAULT_GROUP_ID); } return new ConsumerConfig(props); }
@Override public boolean hasNext() { try { return mIterator.hasNext(); } catch (ConsumerTimeoutException e) { throw new LegacyConsumerTimeoutException(e); } }
KafkaConsumerSuite(String zkConnectString, String topic) { _topic = topic; Properties consumeProps = new Properties(); consumeProps.put("zookeeper.connect", zkConnectString); consumeProps.put("group.id", _topic+"-"+System.nanoTime()); consumeProps.put("zookeeper.session.timeout.ms", "10000"); consumeProps.put("zookeeper.sync.time.ms", "10000"); consumeProps.put("auto.commit.interval.ms", "10000"); consumeProps.put("_consumer.timeout.ms", "10000"); _consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps)); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = _consumer.createMessageStreams(ImmutableMap.of(this._topic, 1)); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this._topic); _stream = streams.get(0); _iterator = _stream.iterator(); }
@Override public Message next() { MessageAndMetadata<byte[], byte[]> kafkaMessage; try { kafkaMessage = mIterator.next(); } catch (ConsumerTimeoutException e) { throw new LegacyConsumerTimeoutException(e); } long timestamp = 0L; if (mConfig.useKafkaTimestamp()) { timestamp = mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(kafkaMessage); } return new Message(kafkaMessage.topic(), kafkaMessage.partition(), kafkaMessage.offset(), kafkaMessage.key(), kafkaMessage.message(), timestamp); }
final ConsumerConnector connector = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerProps)); final ConsumerIterator<byte[], byte[]> iter = stream.iterator();
public KafkaTestBase(String topic) throws InterruptedException, RuntimeException { startServer(); this.topic = topic; AdminUtils.createTopic(zkClient, topic, 1, 1, new Properties()); List<KafkaServer> servers = new ArrayList<>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); Properties consumeProps = new Properties(); consumeProps.put("zookeeper.connect", zkConnect); consumeProps.put("group.id", "testConsumer"); consumeProps.put("zookeeper.session.timeout.ms", "10000"); consumeProps.put("zookeeper.sync.time.ms", "10000"); consumeProps.put("auto.commit.interval.ms", "10000"); consumeProps.put("consumer.timeout.ms", "10000"); consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps)); Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(this.topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this.topic); stream = streams.get(0); iterator = stream.iterator(); }