private MultiScheme createMultiScheme(Map conf, String topic, String schemeClsName) throws Exception { Object scheme = SchemeBuilder.buildFromClsName(schemeClsName, topic, conf); if (scheme instanceof MultiScheme) { return (MultiScheme) scheme; } else if (scheme instanceof Scheme) { return new SchemeAsMultiScheme((Scheme) scheme); } else { LOG.error("create spout scheme failed."); throw new IllegalArgumentException("create spout scheme failed."); } }
public SchemeAsMultiScheme getStreamScheme(String deserClsName, Config context) { return new SchemeAsMultiScheme(new KafkaSourcedSpoutScheme(deserClsName, context)); }
private boolean initializeKafkaSpout(String name) { try { BrokerHosts zk = new ZkHosts(config.getString("kafka.zk")); String input_topic = config.getString("spout.kafka.topic"); SpoutConfig kafkaConfig = new SpoutConfig(zk, input_topic, "", input_topic); kafkaConfig.scheme = new SchemeAsMultiScheme(new RawScheme()); kafkaConfig.forceFromStart = Boolean.valueOf("True"); kafkaConfig.startOffsetTime = -1; builder.setSpout(name, new KafkaSpout(kafkaConfig), config.getInt("spout.kafka.parallelism.hint")).setNumTasks( config.getInt("spout.kafka.num.tasks")); } catch (Exception e) { e.printStackTrace(); System.exit(0); } return true; }
spoutConfig.scheme = new SchemeAsMultiScheme(scheme); KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig); return kafkaSpout;
spoutConfig.scheme = new SchemeAsMultiScheme(scheme); KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig); return kafkaSpout;
try { Scheme s = (Scheme) Class.forName(context.getString("schemeCls")).newInstance(); spoutConfig.scheme = new SchemeAsMultiScheme(s); } catch (Exception ex) { LOG.error("error instantiating scheme object");
public static TransactionalTridentKafkaSpout testTweetSpout(BrokerHosts hosts) { TridentKafkaConfig kafkaConfig = new TridentKafkaConfig(hosts, "test", "storm"); kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme()); return new TransactionalTridentKafkaSpout(kafkaConfig); }
public static KafkaSpout getKafkaSpout(String topic, String zkHost, String zkPort, Boolean rewind) { String zkRoot = "/" + topic; String zkSpoutId = topic; BrokerHosts hosts = new ZkHosts(zkHost + ":" + zkPort); SpoutConfig spoutCfg = new SpoutConfig(hosts, topic, zkRoot, zkSpoutId); if (rewind) { spoutCfg.ignoreZkOffsets = true; //spoutCfg.startOffsetTime = kafka.api.OffsetRequest.EarliestTime(); } spoutCfg.scheme = new SchemeAsMultiScheme(new StringScheme()); KafkaSpout kafkaSpout = new KafkaSpout(spoutCfg); return kafkaSpout; }
try { Scheme s = config.getSchemaClass().newInstance(); spoutConfig.scheme = new SchemeAsMultiScheme(s); } catch (Exception ex) { LOG.error("Error instantiating scheme object");
kafkaConfig.scheme = new SchemeAsMultiScheme(new JsonScheme() { @Override public Fields getOutputFields() {
public StormTopology buildTopology() { SpoutConfig kafkaConfig = new SpoutConfig(brokerHosts, "storm-sentence", "", "storm"); kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme()); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("words", new KafkaSpout(kafkaConfig), 10); builder.setBolt("print", new PrinterBolt()).shuffleGrouping("words"); return builder.createTopology(); }
@Override protected void initialize() { String parserClass = config.getString(getConfigKey(BaseConf.SPOUT_PARSER)); String host = config.getString(getConfigKey(BaseConf.KAFKA_HOST)); String topic = config.getString(getConfigKey(BaseConf.KAFKA_SPOUT_TOPIC)); String consumerId = config.getString(getConfigKey(BaseConf.KAFKA_CONSUMER_ID)); String path = config.getString(getConfigKey(BaseConf.KAFKA_ZOOKEEPER_PATH)); Parser parser = (Parser) ClassLoaderUtils.newInstance(parserClass, "parser", LOG); parser.initialize(config); Fields defaultFields = fields.get(Utils.DEFAULT_STREAM_ID); if (defaultFields == null) { throw new RuntimeException("A KafkaSpout must have a default stream"); } brokerHosts = new ZkHosts(host); SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, path, consumerId); spoutConfig.scheme = new SchemeAsMultiScheme(new ParserScheme(parser, defaultFields)); spout = new storm.kafka.KafkaSpout(spoutConfig); spout.open(config, context, collector); }
public static void main(String[] args) throws Exception { GlobalPartitionInformation hostsAndPartitions = new GlobalPartitionInformation(); hostsAndPartitions.addPartition(0, new Broker("localhost", 9092)); BrokerHosts brokerHosts = new StaticHosts(hostsAndPartitions); SpoutConfig kafkaConfig = new SpoutConfig(brokerHosts, "storm-sentence", "", "storm"); kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme()); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("words", new KafkaSpout(kafkaConfig), 10); builder.setBolt("print", new PrinterBolt()).shuffleGrouping("words"); LocalCluster cluster = new LocalCluster(); Config config = new Config(); cluster.submitTopology("kafka-test", config, builder.createTopology()); Thread.sleep(600000); } }
public StormTopology buildTopology(String topic) { SpoutConfig kafkaConfig = new SpoutConfig(brokerHosts, topic, "", "xlog_"+topic); kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme()); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("KafkaSpout", new KafkaSpout(kafkaConfig), 2).setNumTasks(8); builder.setBolt("SplitBolt", new SplitSentence(), 1).setNumTasks(2).shuffleGrouping("KafkaSpout"); builder.setBolt("XlogBolt", new XlogBolt(), 4).setNumTasks(8).fieldsGrouping("SplitBolt", new Fields("ip")); return builder.createTopology(); }
kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme());
BrokerHosts brokerHosts = new ZkHosts(zks); SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, topic, zkRoot, spoutId); spoutConfig.scheme = new SchemeAsMultiScheme(new EventScheme());
public StormTopology buildTopology(LocalDRPC drpc) { TridentKafkaConfig kafkaConfig = new TridentKafkaConfig(brokerHosts, "storm-sentence", "storm"); kafkaConfig.scheme = new SchemeAsMultiScheme(new StringScheme()); TransactionalTridentKafkaSpout kafkaSpout = new TransactionalTridentKafkaSpout(kafkaConfig); TridentTopology topology = new TridentTopology(); TridentState wordCounts = topology.newStream("kafka", kafkaSpout).shuffle(). each(new Fields("str"), new WordSplit(), new Fields("word")). groupBy(new Fields("word")). persistentAggregate(new HazelCastStateFactory(), new Count(), new Fields("aggregates_words")).parallelismHint(2); topology.newDRPCStream("words", drpc) .each(new Fields("args"), new Split(), new Fields("word")) .groupBy(new Fields("word")) .stateQuery(wordCounts, new Fields("word"), new MapGet(), new Fields("count")) .each(new Fields("count"), new FilterNull()) .aggregate(new Fields("count"), new Sum(), new Fields("sum")); return topology.build(); }