Refine search
public ConsumerConfig getKafkaConsumerConfig() { Properties props = new Properties(); for (String key : defaultProps.keySet()) { props.put(key, defaultProps.get(key)); } for (String key : _kafkaConsumerProperties.keySet()) { props.put(key, _kafkaConsumerProperties.get(key)); } props.put("group.id", _groupId); props.put("zookeeper.connect", _zkBrokerUrl); return new ConsumerConfig(props); }
/** * @return Tags to be applied to the {@link MetricContext} in this object. Called once in {@link #startUp()}. * Subclasses should override this method to add additional tags. */ protected List<Tag<?>> getTagsForMetrics() { List<Tag<?>> tags = Lists.newArrayList(); tags.add(new Tag<>(RuntimeMetrics.TOPIC, this.topic)); tags.add(new Tag<>(RuntimeMetrics.GROUP_ID, this.consumerConfig.groupId())); return tags; }
public void createConsumers(final ProcessContext context) { final String topic = context.getProperty(TOPIC).evaluateAttributeExpressions().getValue(); final Properties props = new Properties(); props.setProperty("zookeeper.connect", context.getProperty(ZOOKEEPER_CONNECTION_STRING).evaluateAttributeExpressions().getValue()); props.setProperty("group.id", context.getProperty(GROUP_ID).evaluateAttributeExpressions().getValue()); props.setProperty("client.id", context.getProperty(CLIENT_NAME).getValue()); props.setProperty("auto.commit.interval.ms", String.valueOf(context.getProperty(ZOOKEEPER_COMMIT_DELAY).asTimePeriod(TimeUnit.MILLISECONDS))); context.getProperty(ZOOKEEPER_CONNECTION_STRING).evaluateAttributeExpressions().getValue(), context.getProperty(TOPIC).evaluateAttributeExpressions().getValue()); final ConsumerConfig consumerConfig = new ConsumerConfig(props); consumer = Consumer.createJavaConsumerConnector(consumerConfig);
@Override public void startStream() { Properties props = new Properties(); props.setProperty("serializer.encoding", "UTF8"); ConsumerConfig consumerConfig = new ConsumerConfig(props); consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig); Whitelist topics = new Whitelist(config.getTopic()); VerifiableProperties vprops = new VerifiableProperties(props); inStreams = consumerConnector.createMessageStreamsByFilter(topics, 1, new StringDecoder(vprops), new StringDecoder(vprops)); for (final KafkaStream stream : inStreams) { executor.submit(new KafkaPersistReaderTask(this, stream)); } }
Properties config = new Properties(); config.putAll(consumerConfig); config.setProperty("zookeeper.connect", zookeeperMap.get(cluster).iterator().next()); standardConsumer.put(cluster, kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(config)));
private ConsumerConfig createConsumerConfig() { Properties props = new Properties(); props.setProperty("zookeeper.connect", zkaddress); props.setProperty("group.id", "group1"); props.put("auto.offset.reset", "smallest"); return new ConsumerConfig(props); }
TestUtils.createTopic(zkClient, topic, 1, 1, servers, new Properties()); zkClient.close(); TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0, 5000); Properties consumerProps = new Properties(); consumerProps.put("zookeeper.connect", props.getProperty("zookeeper.connect")); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "testing.group"); consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "smallest"); ConsumerConnector connector = kafka.consumer.Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(consumerProps)); Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(topic, 1);
private void loadConfigAndInit() throws Exception { Properties kafkaProps = new Properties(); InputStream inputStream = InjectorKafkaConfig.class.getClassLoader().getResourceAsStream(CONFIG_FILE_NAME); if (inputStream != null) { kafkaProps.load(inputStream); LOGGER.info("loading injector kafka config file {}", CONFIG_FILE_NAME); ConsumerConfig consumerConfig = new ConsumerConfig(kafkaProps); // consumer this.consumer = kafka.consumer.Consumer.createJavaConsumerConnector(consumerConfig); } else { LOGGER.warn("cannot find config file {}", CONFIG_FILE_NAME); } } }
public boolean init(StepMetaInterface smi, StepDataInterface sdi) { super.init(smi, sdi); KafkaConsumerMeta meta = (KafkaConsumerMeta) smi; KafkaConsumerData data = (KafkaConsumerData) sdi; Properties properties = meta.getKafkaProperties(); Properties substProperties = new Properties(); for (Entry<Object, Object> e : properties.entrySet()) { substProperties.put(e.getKey(), environmentSubstitute(e.getValue().toString())); } if (meta.isStopOnEmptyTopic()) { // If there isn't already a provided value, set a default of 1s if (!substProperties.containsKey(CONSUMER_TIMEOUT_KEY)) { substProperties.put(CONSUMER_TIMEOUT_KEY, "1000"); } } else { if (substProperties.containsKey(CONSUMER_TIMEOUT_KEY)) { logError(Messages.getString("KafkaConsumer.WarnConsumerTimeout")); } } ConsumerConfig consumerConfig = new ConsumerConfig(substProperties); logBasic(Messages.getString("KafkaConsumer.CreateKafkaConsumer.Message", consumerConfig.zkConnect())); data.consumer = Consumer.createJavaConsumerConnector(consumerConfig); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); String topic = environmentSubstitute(meta.getTopic()); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> streamsMap = data.consumer.createMessageStreams(topicCountMap); logDebug("Received streams map: " + streamsMap); data.streamIterator = streamsMap.get(topic).get(0).iterator(); return true; }
public KafkaConsumer getConsumer(String groupId, final List<String> topics, final Session session) { if (groupId.isEmpty()) { groupId = String.format("%s-%d", session.getId(), System.currentTimeMillis()); if (configProps.containsKey("group.id")) { groupId = String.format("%s-%s", configProps.getProperty("group.id"), groupId); } } Properties sessionProps = (Properties)configProps.clone(); sessionProps.setProperty("group.id", groupId); KafkaConsumer consumer = new KafkaConsumer(new ConsumerConfig(sessionProps), executorService, outputTransform, topics, session); consumer.start(); return consumer; } }
Properties props = new Properties(); props.put("auto.commit.interval.ms", "1000"); return new ConsumerConfig(props);
/** * Ensures an initialized kafka {@link ConsumerConnector} is present. * * @param config The storm configuration passed to {@link #open(Map, TopologyContext, SpoutOutputCollector)}. * @throws IllegalArgumentException When a required configuration parameter is missing or a sanity check fails. */ protected void createConsumer(final Map<String, Object> config) { final Properties consumerConfig = createKafkaConfig(config); LOG.info("connecting kafka client to zookeeper at {} as client group {}", consumerConfig.getProperty("zookeeper.connect"), consumerConfig.getProperty("group.id")); _consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerConfig)); }
protected ConsumerConfig createConsumerConfig(Config config) { Properties props = ConfigUtils.configToProperties(config); if (!props.containsKey(GROUP_ID_KEY)) { props.setProperty(GROUP_ID_KEY, DEFAULT_GROUP_ID); } return new ConsumerConfig(props); }
private ConsumerConfig createConsumerConfig(final String zkConnect, final String groupId, final Properties properties) { final Properties consumerConfig = (Properties) properties.clone(); consumerConfig.put("zookeeper.connect", zkConnect); consumerConfig.put("group.id", groupId); return new ConsumerConfig(consumerConfig); }
private void configure(String zkUrl, String groupId) { kafkaProps.put("zookeeper.connect", zkUrl); kafkaProps.put("group.id",groupId); kafkaProps.put("auto.commit.interval.ms","1000"); kafkaProps.put("auto.offset.reset","largest"); // un-comment this if you want to commit offsets manually //kafkaProps.put("auto.commit.enable","false"); // un-comment this if you don't want to wait for data indefinitely kafkaProps.put("consumer.timeout.ms",waitTime); config = new ConsumerConfig(kafkaProps); }
final ConsumerConnector connector = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerProps));
protected void initKafka() { if (handler == null) { log.error("Exectuor can't be null!"); throw new RuntimeException("Exectuor can't be null!"); } log.info("Consumer properties:" + properties); ConsumerConfig config = new ConsumerConfig(properties); isAutoCommitOffset = config.autoCommitEnable(); log.info("Auto commit: " + isAutoCommitOffset); consumerConnector = Consumer.createJavaConsumerConnector(config); Map<String, Integer> topics = new HashMap<String, Integer>(); topics.put(topic, streamNum); StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties()); StringDecoder valueDecoder = new StringDecoder( new VerifiableProperties()); Map<String, List<KafkaStream<String, String>>> streamsMap = consumerConnector .createMessageStreams(topics, keyDecoder, valueDecoder); streams = streamsMap.get(topic); log.info("Streams:" + streams); if (streams == null || streams.isEmpty()) { log.error("Streams are empty."); throw new IllegalArgumentException("Streams are empty."); } streamThreadPool = Executors.newFixedThreadPool(streamNum); }
public static boolean canFetchData(String host, int port, String topic, int partition) { LOG.info("Fetching data from host {}, topic {}, partition {}", host, topic, partition); SimpleConsumer consumer = new SimpleConsumer(host, port, FETCH_SOCKET_TIMEOUT, ConsumerConfig.SocketBufferSize(), FETCH_CLIENT_NAME); try { long earlyOffset = getOffset(consumer, topic, partition, .minBytes(ConsumerConfig.MinFetchBytes()) .addFetch(topic, partition, readOffset, FETCH_BUFFER_SIZE) .build();
public void createConsumers(final ProcessContext context) { final String topic = context.getProperty(TOPIC).getValue(); final Properties props = new Properties(); props.setProperty("zookeeper.connect", context.getProperty(ZOOKEEPER_CONNECTION_STRING).getValue()); props.setProperty("group.id", context.getProperty(GROUP_ID).getValue()); props.setProperty("client.id", context.getProperty(CLIENT_NAME).getValue()); props.setProperty("auto.commit.interval.ms", String.valueOf(context.getProperty(ZOOKEEPER_COMMIT_DELAY).asTimePeriod(TimeUnit.MILLISECONDS))); context.getProperty(ZOOKEEPER_CONNECTION_STRING).getValue(), context.getProperty(TOPIC).getValue()); final ConsumerConfig consumerConfig = new ConsumerConfig(props); consumer = Consumer.createJavaConsumerConnector(consumerConfig);
/** * This method creates Kafka streams for a topic so that messages can be streamed to the local buffer. If the streams for the given topic have * already been initialized the returns. Information about a particular topic is stored in a HashMap. This method uses double-checked locking to * make sure only one client thread can initialize streams for a topic. Moreover, it also helps subsequent calls, to check if the topic has been * initialized, be not synchronized and hence return faster. * * @param topic The topic to initialize. */ public void initializeTopic(String topic) { if (_topics.get(topic) == null) { synchronized (this) { if (_topics.get(topic) == null) { _logger.info("Initializing streams for topic: {}", topic); Properties props = new Properties(); props.setProperty("zookeeper.connect", _configuration.getValue(Property.ZOOKEEPER_CONNECT.getName(), Property.ZOOKEEPER_CONNECT.getDefaultValue())); props.setProperty("group.id", _configuration.getValue(Property.KAFKA_CONSUMER_GROUPID.getName(), Property.KAFKA_CONSUMER_GROUPID.getDefaultValue())); props.setProperty("auto.offset.reset", _configuration.getValue(Property.KAFKA_CONSUMER_OFFSET_RESET.getName(), Property.KAFKA_CONSUMER_OFFSET_RESET.getDefaultValue())); props.setProperty("auto.commit.interval.ms", "60000"); props.setProperty("fetch.message.max.bytes", "2000000"); ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(props)); List<KafkaStream<byte[], byte[]>> streams = _createStreams(consumer, topic); Topic t = new Topic(topic, consumer, streams.size()); _topics.put(topic, t); _startStreamingMessages(topic, streams); } } } }