private static String getBootstrapServers(String zkQuorum, Map<String, Object> kafkaProps) { String brokers = (String)kafkaProps.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG); if(brokers == null) { try { return Joiner.on(",").join(KafkaUtils.INSTANCE.getBrokersFromZookeeper(zkQuorum)); } catch (Exception e) { throw new IllegalStateException("Unable to find the bootstrap servers: " + e.getMessage(), e); } } return brokers; }
public Map<String, Object> normalizeProtocol(Map<String, Object> configs) { if(configs.containsKey(SECURITY_PROTOCOL)) { String protocol = normalizeProtocol((String)configs.get(SECURITY_PROTOCOL)); configs.put(SECURITY_PROTOCOL, protocol); } return configs; }
public List<String> getBrokersFromZookeeper(CuratorFramework client) throws Exception { List<String> ret = new ArrayList<>(); for(String id : client.getChildren().forPath("/brokers/ids")) { byte[] data = client.getData().forPath("/brokers/ids/" + id); String brokerInfoStr = new String(data); Map<String, Object> brokerInfo = JSONUtils.INSTANCE.load(brokerInfoStr, JSONUtils.MAP_SUPPLIER); String host = (String) brokerInfo.get("host"); if(host != null) { ret.add(host + ":" + brokerInfo.get("port")); } else { Object endpoints = brokerInfo.get("endpoints"); if(endpoints != null && endpoints instanceof List) { List<String> eps = (List<String>)endpoints; for(String url : eps) { ret.addAll(fromEndpoint(url)); } } } } return ret; }
public List<String> getBrokersFromZookeeper(String zkQuorum) throws Exception { RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3); CuratorFramework framework = CuratorFrameworkFactory.newClient(zkQuorum, retryPolicy); framework.start(); try { return getBrokersFromZookeeper(framework); } finally { framework.close(); } }
public Map<String, Object> createProducerConfigs() { Map<String, Object> producerConfig = new HashMap<>(); producerConfig.put("bootstrap.servers", brokerUrl); producerConfig.put("key.serializer", keySerializer); producerConfig.put("value.serializer", valueSerializer); producerConfig.put("request.required.acks", requiredAcks); producerConfig.put(ProducerConfig.BATCH_SIZE_CONFIG, DEFAULT_BATCH_SIZE); producerConfig.putAll(producerConfigs == null?new HashMap<>():producerConfigs); producerConfig = KafkaUtils.INSTANCE.normalizeProtocol(producerConfig); return producerConfig; }
@Test public void testEndpointParsing() throws URISyntaxException { assertEquals(expected, KafkaUtils.INSTANCE.fromEndpoint(endpoint).get(0)); } }
@Override public void init(Map stormConf, TopologyContext topologyContext, WriterConfiguration config) throws Exception { if(this.zkQuorum != null && this.brokerUrl == null) { try { this.brokerUrl = Joiner.on(",").join(KafkaUtils.INSTANCE.getBrokersFromZookeeper(this.zkQuorum)); } catch (Exception e) { throw new IllegalStateException("Cannot read kafka brokers from zookeeper and you didn't specify them, giving up!", e); } } this.kafkaProducer = new KafkaProducer<>(createProducerConfigs()); }
@Bean public Map<String, Object> producerProperties() { Map<String, Object> producerConfig = new HashMap<>(); producerConfig.put("bootstrap.servers", environment.getProperty(MetronRestConstants.KAFKA_BROKER_URL_SPRING_PROPERTY)); producerConfig.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); producerConfig.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); producerConfig.put("request.required.acks", 1); if (environment.getProperty(MetronRestConstants.KERBEROS_ENABLED_SPRING_PROPERTY, Boolean.class, false)) { producerConfig.put("security.protocol", KafkaUtils.INSTANCE.normalizeProtocol(environment.getProperty(MetronRestConstants.KAFKA_SECURITY_PROTOCOL_SPRING_PROPERTY))); } return producerConfig; }
@Test public void testGetEndpointsFromZookeeperHostPort() throws Exception { ArrayList<String> brokerIds = new ArrayList<>(); brokerIds.add("1"); when(client.getChildren()).thenReturn(childrenBuilder); when(childrenBuilder.forPath("/brokers/ids")).thenReturn(brokerIds); when(client.getData()).thenReturn(dataBuilder); when(dataBuilder.forPath("/brokers/ids/1")).thenReturn(brokerWithHostPort.getBytes()); ArrayList<String> expected = new ArrayList<>(); expected.add("192.168.1.148:9092"); assertEquals(expected, (KafkaUtils.INSTANCE.getBrokersFromZookeeper(client))); }
/** * Create properties that will be used by {@link org.apache.metron.rest.config.KafkaConfig#createConsumerFactory()} * * @return Configurations used by {@link org.apache.metron.rest.config.KafkaConfig#createConsumerFactory()}. */ @Bean public Map<String, Object> consumerProperties() { final Map<String, Object> props = new HashMap<>(); props.put("bootstrap.servers", environment.getProperty(MetronRestConstants.KAFKA_BROKER_URL_SPRING_PROPERTY)); props.put("group.id", "metron-rest"); props.put("enable.auto.commit", "false"); props.put("auto.commit.interval.ms", "1000"); props.put("session.timeout.ms", "30000"); props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); if (environment.getProperty(MetronRestConstants.KERBEROS_ENABLED_SPRING_PROPERTY, Boolean.class, false)) { props.put("security.protocol", KafkaUtils.INSTANCE.normalizeProtocol(environment.getProperty(MetronRestConstants.KAFKA_SECURITY_PROTOCOL_SPRING_PROPERTY))); } return props; }
@Test public void testGetEndpointsFromZookeeperHostPortAndEndpoints() throws Exception { ArrayList<String> brokerIds = new ArrayList<>(); brokerIds.add("1"); when(client.getChildren()).thenReturn(childrenBuilder); when(childrenBuilder.forPath("/brokers/ids")).thenReturn(brokerIds); when(client.getData()).thenReturn(dataBuilder); when(dataBuilder.forPath("/brokers/ids/1")) .thenReturn(brokerWithHostPortAndEndpoints.getBytes()); ArrayList<String> expected = new ArrayList<>(); expected.add("192.168.1.148:9092"); assertEquals(expected, (KafkaUtils.INSTANCE.getBrokersFromZookeeper(client))); } }
protected String[] getParserStartCommand(String names) { List<String> command = new ArrayList<>(); command.add( environment.getProperty(MetronRestConstants.PARSER_SCRIPT_PATH_SPRING_PROPERTY)); // sensor type command.add( "-s"); command.add( names); // zookeeper command.add( "-z"); command.add( environment.getProperty(MetronRestConstants.ZK_URL_SPRING_PROPERTY)); // kafka broker command.add( "-k"); command.add( environment.getProperty(MetronRestConstants.KAFKA_BROKER_URL_SPRING_PROPERTY)); // kafka security protocol command.add( "-ksp"); command.add(KafkaUtils.INSTANCE.normalizeProtocol(environment.getProperty(MetronRestConstants.KAFKA_SECURITY_PROTOCOL_SPRING_PROPERTY))); // extra topology options boolean kerberosEnabled = environment.getProperty(MetronRestConstants.KERBEROS_ENABLED_SPRING_PROPERTY, Boolean.class, false); boolean topologyOptionsDefined = StringUtils.isNotBlank(environment.getProperty(MetronRestConstants.PARSER_TOPOLOGY_OPTIONS_SPRING_PROPERTY)); if (kerberosEnabled && topologyOptionsDefined) { command.add("-e"); command.add(environment.getProperty(MetronRestConstants.PARSER_TOPOLOGY_OPTIONS_SPRING_PROPERTY)); } return command.toArray(new String[0]); }
@Test public void testGetEndpointsFromZookeeperEndpoints() throws Exception { ArrayList<String> brokerIds = new ArrayList<>(); brokerIds.add("1"); when(client.getChildren()).thenReturn(childrenBuilder); when(childrenBuilder.forPath("/brokers/ids")).thenReturn(brokerIds); when(client.getData()).thenReturn(dataBuilder); when(dataBuilder.forPath("/brokers/ids/1")).thenReturn(brokerWithEndpoints.getBytes()); ArrayList<String> expected = new ArrayList<>(); expected.add("host1:9092"); expected.add("host1:9093"); expected.add("host1:9094"); expected.add("host1:9095"); assertEquals(expected, (KafkaUtils.INSTANCE.getBrokersFromZookeeper(client))); }
); if(securityProtocol.isPresent()) { kafkaSpoutConfigOptions.putIfAbsent("security.protocol", KafkaUtils.INSTANCE.normalizeProtocol(securityProtocol.get()));
String zkQuorum = (String) evaluatedArgs.get(LoadOptions.ZK).get(); kafkaConfig.put( ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG , Joiner.on(",").join(KafkaUtils.INSTANCE.getBrokersFromZookeeper(zkQuorum)) );
/** * Create an object with the specified properties and exposing the specified fields. * @param kafkaProps The special kafka properties * @param subscription The subscription to the kafka topic(s) * @param zkQuorum The zookeeper quorum. We will use this to pull the brokers from this. * @param fieldsConfiguration The fields to expose in the storm tuple emitted. */ public SimpleStormKafkaBuilder( Map<String, Object> kafkaProps , Subscription subscription , String zkQuorum , List<String> fieldsConfiguration ) { super( getBootstrapServers(zkQuorum, kafkaProps) , createDeserializer(Optional.ofNullable((String)kafkaProps.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG)), DEFAULT_DESERIALIZER) , createDeserializer(Optional.ofNullable((String)kafkaProps.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG)), DEFAULT_DESERIALIZER) , subscription ); kafkaProps = KafkaUtils.INSTANCE.normalizeProtocol(kafkaProps); setProp(kafkaProps); setRecordTranslator(new SpoutRecordTranslator<>(FieldsConfiguration.toList(fieldsConfiguration))); }