builder.setSpout("1", new TestWordSpout(true), 1); builder.setBolt("2", new TestGlobalCount(), 1) .setNumTasks(10) .shuffleGrouping("1"); StormTopology stormTopology = builder.createTopology();
declarer.setNumTasks(boltDef.getNumTasks());
.setNumTasks(parserNumTasks); ); builder.setBolt("errorMessageWriter", errorBolt, errorWriterParallelism) .setNumTasks(errorWriterNumTasks) .localOrShuffleGrouping("parserBolt", Constants.ERROR_STREAM);
private static TopologyBuilder buildTopology() throws Exception { TopologyBuilder builder = new TopologyBuilder(); String topicName = Configuration.getConfig().getString("rtc.mq.spout.topic"); String groupName = Configuration.getConfig().getString("rtc.mq.spout.group"); BrokerHosts hosts = new ZkHosts(Configuration.getConfig().getString("rtc.zk.hosts")); SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "/consumers", groupName); spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime(); spoutConfig.zkServers = Arrays.asList(Configuration.getConfig().getString("rtc.storm.zkServers").split(",")); spoutConfig.zkPort = Configuration.getConfig().getInt("rtc.storm.zkPort"); spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme()); KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig); builder.setSpout("MQSpout", kafkaSpout, Configuration.getConfig().getInt("rtc.storm.spout.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.spout.task")); builder.setBolt("ExtractBolt", new ExtractBolt(), Configuration.getConfig().getInt("rtc.storm.extract.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.extract.bolt.task")).shuffleGrouping("MQSpout"); builder.setBolt("Statistic", new StatisticBolt(), Configuration.getConfig().getInt("rtc.storm.statistic.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.statistic.bolt.task")).fieldsGrouping("ExtractBolt", new Fields(new String[]{"hashKeys"})); // builder.setBolt("Alarm", new AlarmBolt(), Configuration.getConfig().getInt("rtc.storm.alarm.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.alarm.bolt.task")).fieldsGrouping("Statistic", new Fields(new String[]{"EventName"})); return builder; }
private static TopologyBuilder buildTopology() throws Exception { TopologyBuilder builder = new TopologyBuilder(); String topicName = Configuration.getConfig().getString("rtc.mq.spout.topic"); String groupName = Configuration.getConfig().getString("rtc.mq.spout.group"); BrokerHosts hosts = new ZkHosts(Configuration.getConfig().getString("rtc.zk.hosts")); SpoutConfig spoutConfig = new SpoutConfig(hosts, topicName, "/consumers", groupName); spoutConfig.startOffsetTime = kafka.api.OffsetRequest.LatestTime(); spoutConfig.zkServers = Arrays.asList(Configuration.getConfig().getString("rtc.storm.zkServers").split(",")); spoutConfig.zkPort = Configuration.getConfig().getInt("rtc.storm.zkPort"); spoutConfig.scheme = new SchemeAsMultiScheme(new StringScheme()); KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig); builder.setSpout("MQSpout", kafkaSpout, Configuration.getConfig().getInt("rtc.storm.spout.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.spout.task")); builder.setBolt("ExtractBolt", new ExtractBolt(), Configuration.getConfig().getInt("rtc.storm.extract.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.extract.bolt.task")).shuffleGrouping("MQSpout"); builder.setBolt("Statistic", new StatisticBolt(), Configuration.getConfig().getInt("rtc.storm.statistic.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.statistic.bolt.task")).fieldsGrouping("ExtractBolt", new Fields(new String[]{"hashKeys"})); // builder.setBolt("Alarm", new AlarmBolt(), Configuration.getConfig().getInt("rtc.storm.alarm.bolt.parallelismHint")).setNumTasks(Configuration.getConfig().getInt("rtc.storm.alarm.bolt.task")).fieldsGrouping("Statistic", new Fields(new String[]{"EventName"})); return builder; }
builder.setBolt(Constants.INSERT_BOLT, new InsertBolt(), 1).setNumTasks(1).shuffleGrouping(Constants.KAFKA_SPOUT); Config conf = new Config();
builder.setBolt(test_bolt, new TestBolt(),1).setNumTasks(1).shuffleGrouping(test_spout); builder.setBolt(test2_bolt, new Test2Bolt(),1).setNumTasks(1).fieldsGrouping(test_bolt, new Fields("count")); Config conf = new Config(); conf.put("test", "test");
builder.setBolt(test_bolt, new TestBolt(),1).setNumTasks(1).shuffleGrouping(test_spout); builder.setBolt(test2_bolt, new Test2Bolt(),1).setNumTasks(1).fieldsGrouping(test_bolt, new Fields("count")); Config conf = new Config(); conf.put("test", "test");
builder.setBolt(Constants.INSERT_BOLT, new InsertBolt(), 1).setNumTasks(1).shuffleGrouping(Constants.KAFKA_SPOUT); Config conf = new Config();
.setNumTasks(2).localOrShuffleGrouping("fetch"); builder.setBolt("feed", new FeedParserBolt(), numWorkers).setNumTasks(4) .localOrShuffleGrouping("sitemap"); .localOrShuffleGrouping("feed", Constants.StatusStreamName) .localOrShuffleGrouping("ssb", Constants.StatusStreamName) .setNumTasks(numShards);
declarer.setNumTasks(boltDef.getNumTasks());
.localOrShuffleGrouping("parse", Constants.StatusStreamName) .localOrShuffleGrouping("ssb", Constants.StatusStreamName) .setNumTasks(numShards);
.setNumTasks(tasks)//sets the total number of tasks
log.info("Setting bolt for "+getAdapterName()+". Workers:"+workers+" Executors:" + executors + " Tasks:" + tasks); builder.setBolt(LGConstants.LEMONGRENADE_COORDINATOR, new CoordinatorBolt(), executors) .setNumTasks(tasks) .fieldsGrouping("input", new Fields(LGConstants.LG_JOB_ID));