Refine search
public static void testDrpc() { TopologyBuilder builder = new TopologyBuilder(); LocalDRPC drpc = new LocalDRPC(); DRPCSpout spout = new DRPCSpout("exclamation", drpc); builder.setSpout("drpc", spout); builder.setBolt("exclaim", new ExclamationBolt(), 3).shuffleGrouping("drpc"); builder.setBolt("return", new ReturnResults(), 3).shuffleGrouping("exclaim"); LocalCluster cluster = new LocalCluster(); Config conf = new Config(); cluster.submitTopology("exclaim", conf, builder.createTopology()); JStormUtils.sleepMs(30 * 1000); try { System.out.println(drpc.execute("exclamation", "aaa")); System.out.println(drpc.execute("exclamation", "bbb")); } catch (Exception e) { Assert.fail("Failed to test drpc"); } drpc.shutdown(); cluster.shutdown(); }
public static void SetLocalTopology() throws Exception { TopologyBuilder builder = new TopologyBuilder(); conf.put(TOPOLOGY_BOLT_PARALLELISM_HINT, 1); SetBuilder(builder, conf); LOG.debug("test"); LOG.info("Submit log"); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("SplitMerge", conf, builder.createTopology()); Thread.sleep(60000); cluster.killTopology("SplitMerge"); cluster.shutdown(); }
public static void main(String[] args) throws Exception { LocalCluster localCluster = null; try { localCluster = new LocalCluster(); } finally { if (localCluster != null) { localCluster.shutdown(); } } }
public void SetLocalTopology() throws Exception { Config conf = getConf(); StormTopology topology = buildTopology(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("SplitMerge", conf, topology); Thread.sleep(60000); cluster.shutdown(); }
public static void main(String[] args) { final Config conf = new Config(); conf.setDebug(false); conf.setNumWorkers(2); conf.setMaxSpoutPending(1); conf.setFallBackOnJavaSerialization(false); conf.setSkipMissingKryoRegistrations(false); final LocalCluster cluster = new LocalCluster(); final TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("randomSpout1", new RandomFieldSpout(2, 0, 0, 1)); // (nfields,seed,min,max) builder.setSpout("randomSpout2", new RandomFieldSpout(2, 10, 0, 1)); // (nfields,seed,min,max) JoinBolt.connectNewBolt(builder); final StormTopology topology = builder.createTopology(); cluster.submitTopology("playTopology", conf, topology); Utils.sleep(10000); cluster.killTopology("playTopology"); cluster.shutdown(); } }
TopologyBuilder builder = new TopologyBuilder(); builder.setSpout("kafka-reader", new KafkaSpout(spoutConfig), 2) .setNumTasks(2); builder.setBolt("stock-filter", new StockFilterBolt(), 2)//设置2个并行度(executor) .setNumTasks(2)//设置关联task个数 .shuffleGrouping("kafka-reader"); .shuffleGrouping("stock-stategy-3"); Config config = new Config(); config.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 10000); config.setDebug(false); config.setNumWorkers(2); config.put(Config.NIMBUS_HOST, args[0]); StormSubmitter.submitTopologyWithProgressBar(name, config, builder.createTopology()); } else { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("test", config, builder.createTopology());
public static void testDrpc() { LinearDRPCTopologyBuilder builder = new LinearDRPCTopologyBuilder("exclamation"); builder.addBolt(new ExclaimBolt(), 3); Config conf = new Config(); LocalDRPC drpc = new LocalDRPC(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("drpc-demo", conf, builder.createLocalTopology(drpc)); try { Thread.sleep(30000); } catch (InterruptedException e) { // TODO Auto-generated catch block } try { for (String word : new String[] { "hello", "goodbye" }) { System.out.println("Result for \"" + word + "\": " + drpc.execute("exclamation", word)); } } catch (Exception e) { e.printStackTrace(); Assert.fail("Failed to run DRPC Test"); } drpc.shutdown(); cluster.shutdown(); }
public static void main(String[] args) throws Exception { String kafkaZk = args[0]; KafkaSpoutTestTopology kafkaSpoutTestTopology = new KafkaSpoutTestTopology(kafkaZk); Config config = new Config(); config.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, 2000); StormTopology stormTopology = kafkaSpoutTestTopology.buildTopology(); if (args != null && args.length > 1) { String name = args[1]; String dockerIp = args[2]; config.setNumWorkers(2); config.setMaxTaskParallelism(5); config.put(Config.NIMBUS_HOST, dockerIp); config.put(Config.NIMBUS_THRIFT_PORT, 6627); config.put(Config.STORM_ZOOKEEPER_PORT, 2181); config.put(Config.STORM_ZOOKEEPER_SERVERS, Arrays.asList(dockerIp)); StormSubmitter.submitTopology(name, config, stormTopology); } else { config.setNumWorkers(2); config.setMaxTaskParallelism(2); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("kafka", config, stormTopology); } } }
public static void main(String[] args) throws Exception { if (args.length < 2) { System.err.println("######## Wrong number of arguments"); System.err.println("######## required args: local|production fileName"); return; } Config config = new Config(); TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(1, new FileReaderSpout(args[1])); builder.setBolt(2, new CounterBolt()).shuffleGrouping(1); builder.setBolt(3, new MsgForwarder(1,2)).shuffleGrouping(1).shuffleGrouping(2); builder.setBolt(4, new MsgTracker(1,2)).shuffleGrouping(3,1).shuffleGrouping(3,2); System.out.println("######## LineCountTopology.main: submitting topology"); if ("local".equals(args[0])) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology("line-count", config, builder.createTopology()); System.out.println("######## LineCountTopology.main: sleeping for 10 secs"); Utils.sleep(10000); cluster.shutdown(); } else { StormSubmitter.submitTopology("line-count", config, builder.createTopology()); } } }
public static void main(String[] args) throws Exception TopologyBuilder builder = new TopologyBuilder(); Config conf = new Config(); conf.setDebug(true); conf.setNumWorkers(3); StormSubmitter.submitTopology(args[0], conf, builder.createTopology()); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("tweet-word-count", conf, builder.createTopology()); Utils.sleep(300000); cluster.killTopology("tweet-word-count"); cluster.shutdown();
inputDelay, debug); Config stormConfig = new Config(); stormConfig.addSerialization(TrainingExample.Serialization.class); stormConfig.addSerialization(BlockPair.Serialization.class); stormConfig.addSerialization(MatrixSerialization.class); stormConfig.setNumWorkers(config.getNumProcesses()); stormConfig.setNumAckers(config.getNumWorkers()); // our notion of a worker is different from Storm's TopologyBuilder builder = new TopologyBuilder(); builder.setSpout(1, new RatingsSource(config)); builder.setBolt(2, new Master(config)) .globalGrouping(1) .globalGrouping(3, Worker.TO_MASTER_STREAM_ID) LocalCluster cluster = new LocalCluster(); cluster.submitTopology("StreamingDSGD", stormConfig, builder.createTopology()); } else { StormSubmitter.submitTopology("StreamingDSGD", stormConfig, builder.createTopology());
public static void main(String[] args) throws Exception { Config conf = new Config(); LocalDRPC drpc = new LocalDRPC(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("hackaton", conf, buildTopology(drpc)); } }
CreateTopicUtils.ensureTopicReady(System.getProperty("withMetadataChangeNotifyService.zkConfig.zkQuorum"), topicName2); TopologyBuilder topoBuilder = new TopologyBuilder(); CorrelationSpout spout = new CorrelationSpout(config, topoId, new MockMetadataChangeNotifyService(topoId, spoutId), numBolts); SpoutDeclarer declarer = topoBuilder.setSpout(spoutId, spout); declarer.setNumTasks(2); for (int i = 0; i < numBolts; i++) { TestBolt bolt = new TestBolt(); BoltDeclarer boltDecl = topoBuilder.setBolt("engineBolt" + i, bolt); boltDecl.fieldsGrouping(spoutId, StreamIdConversion.generateStreamIdBetween(AlertConstants.DEFAULT_SPOUT_NAME, AlertConstants.DEFAULT_ROUTERBOLT_NAME + i), new Fields()); LocalCluster cluster = new LocalCluster(); StormTopology topology = topoBuilder.createTopology(); cluster.submitTopology(topoName, new HashMap(), topology); Utils.sleep(Long.MAX_VALUE);
builder = new TopologyBuilder(); conf = new Config(); conf.registerSerialization(JSONObject.class, MapSerializer.class); conf.setDebug(debug); conf.setNumWorkers(config.getInt("num.workers")); conf.setMaxTaskParallelism(1); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(topology_name, conf, builder.createTopology()); } else { conf.setNumWorkers(config.getInt("num.workers")); conf.setNumAckers(config.getInt("num.ackers")); StormSubmitter.submitTopology(topology_name, conf, builder.createTopology());
Config conf = new Config(); conf.setNumWorkers(6); if (args.length != 0) { Map yamlConf = LoadConf.LoadYaml(args[0]); if (yamlConf != null) { conf.putAll(yamlConf); StormSubmitter.submitTopology(TOPOLOGY_NAME, conf, builder.createRemoteTopology()); } else { conf.setMaxTaskParallelism(3); LocalDRPC drpc = new LocalDRPC(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(TOPOLOGY_NAME, conf, builder.createLocalTopology(drpc)); cluster.shutdown(); drpc.shutdown();
private static void submitTopology(TopologyBuilder builder) { try { if (local_mode(conf)) { LocalCluster cluster = new LocalCluster(); cluster.submitTopology(String.valueOf(conf.get("topology.name")), conf, builder.createTopology()); Thread.sleep(200000); cluster.shutdown(); } else { StormSubmitter.submitTopology(String.valueOf(conf.get("topology.name")), conf, builder.createTopology()); } } catch (Exception e) { e.printStackTrace(); } }
public void run() { StormTopology topology = new FlowmixBuilder() .setFlowLoader(new SimpleFlowLoaderSpout(provider.getFlows(), 60000)) .setEventsLoader(new MockEventGeneratorSpout(getMockEvents(), 10)) .setOutputBolt(new PrinterBolt()) .setParallelismHint(6) .create() .createTopology(); Config conf = new Config(); conf.setNumWorkers(20); conf.setMaxSpoutPending(5000); conf.setDebug(false); conf.registerSerialization(BaseEvent.class, EventSerializer.class); conf.setSkipMissingKryoRegistrations(false); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("example-topology", conf, topology); }
boolean localMode) { backtype.storm.Config stormConfig = givenStormConfig == null ? new backtype.storm.Config() : givenStormConfig; stormConfig.setMessageTimeoutSecs(messageTimeoutSecs); stormConfig.registerMetricsConsumer(StormMetricTaggedConsumer.class, config.root().render(ConfigRenderOptions.concise()), 1); stormConfig.setNumWorkers(numOfTotalWorkers); StormTopology topology = buildTopology(topologyId, numOfSpoutTasks, numOfRouterBolts, numOfAlertBolts, numOfPublishExecutors, numOfPublishTasks, config).createTopology(); LocalCluster cluster = new LocalCluster(); cluster.submitTopology(topologyId, stormConfig, topology); Utils.sleep(Long.MAX_VALUE); } else { LOG.info("Submitting as cluster mode"); try { StormSubmitter.submitTopologyWithProgressBar(topologyId, stormConfig, topology); } catch (Exception ex) { LOG.error("fail submitting topology {}", topology, ex);
public static void runTopologyLocally(StormTopology topology, String topologyName, Config conf, int runtimeInSeconds, Callback callback) throws Exception { LocalCluster cluster = new LocalCluster(); cluster.submitTopology(topologyName, conf, topology); if (runtimeInSeconds < 120) { JStormUtils.sleepMs(120 * 1000); } else { JStormUtils.sleepMs(runtimeInSeconds * 1000); } if (callback != null) { callback.execute(topologyName); } cluster.killTopology(topologyName); cluster.shutdown(); }
return; Config conf = new Config(); conf.setDebug(false); LocalCluster cluster = new LocalCluster(); cluster.submitTopology("test", conf, storm); log.info("########################################################################"); Utils.sleep(10000000); cluster.killTopology("test"); cluster.shutdown();