Refine search
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException { Map config = new Config(); config.put(ConfigExtension.TOPOLOGY_MASTER_USER_DEFINED_STREAM_CLASS, "com.alipay.dw.jstorm.example.tm.TMUdfHandler"); config.put(Config.TOPOLOGY_WORKERS, 2); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("TMUdfSpout", new TMUdfSpout(), 2); builder.setBolt("TMUdfBolt", new TMUdfBolt(), 4); StormTopology topology = builder.createTopology(); StormSubmitter.submitTopology("TMUdfTopology", config, topology); } }
private static void submitJar(Map conf) { if(submittedJar==null) { LOG.info("Jar not uploaded to master yet. Submitting jar..."); String localJar = System.getProperty("storm.jar"); submittedJar = submitJar(conf, localJar); } else { LOG.info("Jar already uploaded to master. Not submitting jar."); } }
/** * Submits a topology to run on the cluster with a progress bar. A topology runs forever or until explicitly killed. * * @param name the name of the storm. * @param stormConf the topology-specific configuration. See {@link Config}. * @param topology the processing to execute. * @throws AlreadyAliveException if a topology with this name is already running * @throws InvalidTopologyException if an invalid topology was submitted */ public static void submitTopologyWithProgressBar(String name, Map stormConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException { submitTopologyWithProgressBar(name, stormConf, topology, null); }
userTotalConf.putAll(TopologyBuilder.getStormConf()); // add the configuration generated during topology building userTotalConf.putAll(stormConf); userTotalConf.putAll(Utils.readCommandLineOpts()); putUserInfo(conf, stormConf); boolean dynamicUpdate = enableDeploy || isUpgrade; if (topologyNameExists(client, conf, name) != dynamicUpdate) { if (dynamicUpdate) { throw new RuntimeException("Topology with name `" + name + "` does not exist on cluster"); submitJar(client, conf); LOG.info("Submitting topology " + name + " in distributed mode with conf " + serConf); if (opts != null) {
public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException { TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("word-reader", new WordReaderSpout()); builder.setBolt("word-spilter", new WordSpliterBolt()).shuffleGrouping("word-reader"); builder.setBolt("word-counter", new WordCounterBolt()).shuffleGrouping("word-spilter"); Config conf = new Config(); conf.setDebug(true); if (args.length >0 && "cluster".equals(args[0])) { StormSubmitter.submitTopology("Cluster-Storm-Demo", conf, builder.createTopology()); } else { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("Local-Storm-Demo", conf, builder.createTopology()); } } }
public static void allInOneRace(boolean local){ Config conf = new Config(); conf.put("is.stat.enable",false); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("all_in_one_spout", new AllInOneSpout(), 1); //写入tair BoltDeclarer flushBolt = builder.setBolt("flush_tair_bolt", new FlushTairBolt(),1); flushBolt.shuffleGrouping("all_in_one_spout"); String topologyName = RaceConfig.JstormTopologyName; try { if(local){ submitLocal(topologyName,conf,builder.createTopology()); }else { StormSubmitter.submitTopology(topologyName, conf, builder.createTopology()); } } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } }
TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("kafka-reader", new KafkaSpout(spoutConfig), 2) .setNumTasks(2); builder.setBolt("stock-filter", new StockFilterBolt(), 2)//设置2个并行度(executor) .setNumTasks(2)//设置关联task个数 .shuffleGrouping("kafka-reader"); .shuffleGrouping("stock-stategy-3"); Config config = new Config(); config.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 10000); config.setDebug(false); config.setNumWorkers(2); config.put(Config.NIMBUS_HOST, args[0]); StormSubmitter.submitTopologyWithProgressBar(name, config, builder.createTopology()); } else { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("test", config, builder.createTopology());
inputDelay, debug); Config stormConfig = new Config(); stormConfig.addSerialization(TrainingExample.Serialization.class); stormConfig.addSerialization(BlockPair.Serialization.class); stormConfig.addSerialization(MatrixSerialization.class); stormConfig.setNumWorkers(config.getNumProcesses()); stormConfig.setNumAckers(config.getNumWorkers()); // our notion of a worker is different from Storm's TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(1, new RatingsSource(config)); builder.setBolt(2, new Master(config)) .globalGrouping(1) .globalGrouping(3, Worker.TO_MASTER_STREAM_ID) LocalCluster cluster = new LocalCluster(); cluster.submitTopology("StreamingDSGD", stormConfig, builder.createTopology()); } else { StormSubmitter.submitTopology("StreamingDSGD", stormConfig, builder.createTopology());
public static void main(String[] args) throws Exception { if (args.length < 2) { System.err.println("######## Wrong number of arguments"); System.err.println("######## required args: local|production fileName"); return; } Config config = new Config(); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(1, new FileReaderSpout(args[1])); builder.setBolt(2, new CounterBolt()).shuffleGrouping(1); builder.setBolt(3, new MsgForwarder(1,2)).shuffleGrouping(1).shuffleGrouping(2); builder.setBolt(4, new MsgTracker(1,2)).shuffleGrouping(3,1).shuffleGrouping(3,2); System.out.println("######## LineCountTopology.main: submitting topology"); if ("local".equals(args[0])) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("line-count", config, builder.createTopology()); System.out.println("######## LineCountTopology.main: sleeping for 10 secs"); Utils.sleep(10000); cluster.shutdown(); } else { StormSubmitter.submitTopology("line-count", config, builder.createTopology()); } } }
public static void SetRemoteTopology() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException { String streamName = (String) conf.get(Config.TOPOLOGY_NAME); if (streamName == null) { String[] className = Thread.currentThread().getStackTrace()[1].getClassName().split("\\."); streamName = className[className.length - 1]; } TopologyBuilder builder = new TopologyBuilder(); int spout_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_SPOUT_PARALLELISM_HINT), 1); int bolt_Parallelism_hint = JStormUtils.parseInt(conf.get(TOPOLOGY_BOLT_PARALLELISM_HINT), 2); builder.setSpout("spout", new TestSpout(), spout_Parallelism_hint); BoltDeclarer boltDeclarer = builder.setBolt("bolt", new TestBolt(), bolt_Parallelism_hint); // localFirstGrouping is only for jstorm // boltDeclarer.localFirstGrouping(SequenceTopologyDef.SEQUENCE_SPOUT_NAME); boltDeclarer.shuffleGrouping("spout"); // .addConfiguration(Config.TOPOLOGY_TICK_TUPLE_FREQ_SECS, 60); conf.put(Config.STORM_CLUSTER_MODE, "distributed"); StormSubmitter.submitTopology(streamName, conf, builder.createTopology()); }
public static void main(String[] args) throws Exception { String kafkaZk = args[0]; KafkaSpoutTestTopology kafkaSpoutTestTopology = new KafkaSpoutTestTopology(kafkaZk); Config config = new Config(); config.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, 2000); StormTopology stormTopology = kafkaSpoutTestTopology.buildTopology(); if (args != null && args.length > 1) { String name = args[1]; String dockerIp = args[2]; config.setNumWorkers(2); config.setMaxTaskParallelism(5); config.put(Config.NIMBUS_HOST, dockerIp); config.put(Config.NIMBUS_THRIFT_PORT, 6627); config.put(Config.STORM_ZOOKEEPER_PORT, 2181); config.put(Config.STORM_ZOOKEEPER_SERVERS, Arrays.asList(dockerIp)); StormSubmitter.submitTopology(name, config, stormTopology); } else { config.setNumWorkers(2); config.setMaxTaskParallelism(2); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("kafka", config, stormTopology); } } }
public static void main(String[] args) throws Exception TopologyBuilder builder = new TopologyBuilder(); Config conf = new Config(); conf.setDebug(true); conf.setNumWorkers(3); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("tweet-word-count", conf, builder.createTopology()); cluster.killTopology("tweet-word-count");
builder = new TopologyBuilder(); conf = new Config(); conf.registerSerialization(JSONObject.class, MapSerializer.class); conf.setDebug(debug); conf.setNumWorkers(config.getInt("num.workers")); conf.setMaxTaskParallelism(1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(topology_name, conf, builder.createTopology()); } else { conf.setNumWorkers(config.getInt("num.workers")); conf.setNumAckers(config.getInt("num.ackers")); StormSubmitter.submitTopology(topology_name, conf, builder.createTopology());
private static void submitTopology(TopologyBuilder builder) { try { if (local_mode(conf)) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology(String.valueOf(conf.get("topology.name")), conf, builder.createTopology()); Thread.sleep(200000); cluster.shutdown(); } else { StormSubmitter.submitTopology(String.valueOf(conf.get("topology.name")), conf, builder.createTopology()); } } catch (Exception e) { e.printStackTrace(); } }
Config conf = new Config(); conf.setNumWorkers(6); if (args.length != 0) { Map yamlConf = LoadConf.LoadYaml(args[0]); if (yamlConf != null) { conf.putAll(yamlConf); StormSubmitter.submitTopology(TOPOLOGY_NAME, conf, builder.createRemoteTopology()); } else { conf.setMaxTaskParallelism(3); LocalDRPC drpc = new LocalDRPC(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(TOPOLOGY_NAME, conf, builder.createLocalTopology(drpc)); cluster.shutdown(); drpc.shutdown();
boolean localMode) { backtype.storm.Config stormConfig = givenStormConfig == null ? new backtype.storm.Config() : givenStormConfig; stormConfig.setMessageTimeoutSecs(messageTimeoutSecs); stormConfig.registerMetricsConsumer(StormMetricTaggedConsumer.class, config.root().render(ConfigRenderOptions.concise()), 1); stormConfig.setNumWorkers(numOfTotalWorkers); StormTopology topology = buildTopology(topologyId, numOfSpoutTasks, numOfRouterBolts, numOfAlertBolts, numOfPublishExecutors, numOfPublishTasks, config).createTopology(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(topologyId, stormConfig, topology); Utils.sleep(Long.MAX_VALUE); } else { LOG.info("Submitting as cluster mode"); try { StormSubmitter.submitTopologyWithProgressBar(topologyId, stormConfig, topology); } catch (Exception ex) { LOG.error("fail submitting topology {}", topology, ex);
public static void main(String[] args) throws Exception { if (args.length != 1) { System.err.println("Missing XML definition (base64 encoded)!"); return; } Document doc = DocumentEncoder.decodeDocument(args[0]); Config conf = new Config(); conf.setNumWorkers(20); StreamTopology streamTop = build(doc, new TopologyBuilder()); StormTopology topology = streamTop.getTopologyBuilder() .createTopology(); StormSubmitter.submitTopology("test", conf, topology); } }
public static void main(String[] args) throws Exception { Config conf = new Config(); if (args.length == 2) { // Ready & submit the topology String name = args[0]; BrokerHosts hosts = new ZkHosts(args[1]); TransactionalTridentKafkaSpout kafkaSpout = TestUtils.testTweetSpout(hosts); StormSubmitter.submitTopology(name, conf, buildTopology(kafkaSpout)); }else{ System.err.println("<topologyName> <zookeeperHost>"); } }
public static void SetRemoteTopology() throws AlreadyAliveException, InvalidTopologyException, TopologyAssignException { String streamName = (String) conf.get(Config.TOPOLOGY_NAME); if (streamName == null) { streamName = "SequenceTest"; } TopologyBuilder builder = new TopologyBuilder(); SetBuilder(builder, conf); conf.put(Config.STORM_CLUSTER_MODE, "distributed"); StormSubmitter.submitTopology(streamName, conf, builder.createTopology()); }
public static void submit(StormTopology topology, Config config){ backtype.storm.Config stormConfig = new backtype.storm.Config(); int messageTimeoutSecs = config.hasPath(MESSAGE_TIMEOUT_SECS)?config.getInt(MESSAGE_TIMEOUT_SECS) : DEFAULT_MESSAGE_TIMEOUT_SECS; LOG.info("Set topology.message.timeout.secs as {}",messageTimeoutSecs); stormConfig.setMessageTimeoutSecs(messageTimeoutSecs); props.put("serializer.class", config.getString("dataSinkConfig.serializerClass")); props.put("key.serializer.class", config.getString("dataSinkConfig.keySerializerClass")); stormConfig.put(KafkaBolt.KAFKA_BROKER_PROPERTIES, props); if(localMode) { LOG.info("Submitting as local mode"); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(topologyId, stormConfig, topology); Utils.sleep(Long.MAX_VALUE); }else{ LOG.info("Submitting as cluster mode"); try { StormSubmitter.submitTopologyWithProgressBar(topologyId, stormConfig, topology); } catch(Exception ex) { LOG.error("fail submitting topology {}", topology, ex);