protected void submitTopologyRemoteCluster(String arg, StormTopology topology, Config config) throws Exception { StormSubmitter.submitTopology(arg, config, topology); }
/** * Submits a topology to run on the cluster with a progress bar. A topology runs forever or until explicitly killed. * * @param name the name of the storm. * @param topoConf the topology-specific configuration. See {@link Config}. * @param topology the processing to execute. * @throws AlreadyAliveException if a topology with this name is already running * @throws InvalidTopologyException if an invalid topology was submitted * @throws AuthorizationException if authorization is failed */ public static void submitTopologyWithProgressBar(String name, Map<String, Object> topoConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException { submitTopologyWithProgressBar(name, topoConf, topology, null); }
/** * Submits a topology to run on the cluster. A topology runs forever or until explicitly killed. * * @param name the name of the storm. * @param topoConf the topology-specific configuration. See {@link Config}. * @param topology the processing to execute. * @param opts to manipulate the starting of the topology * @param progressListener to track the progress of the jar upload process * @throws AlreadyAliveException if a topology with this name is already running * @throws InvalidTopologyException if an invalid topology was submitted * @throws AuthorizationException if authorization is failed * @thorws SubmitterHookException if any Exception occurs during initialization or invocation of registered {@link ISubmitterHook} */ @SuppressWarnings("unchecked") public static void submitTopology(String name, Map<String, Object> topoConf, StormTopology topology, SubmitOptions opts, ProgressListener progressListener) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException { submitTopologyAs(name, topoConf, topology, opts, progressListener, null); }
public static Map<String, Object> prepareZookeeperAuthentication(Map<String, Object> conf) { Map<String, Object> toRet = new HashMap<>(); String secretPayload = (String) conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD); // Is the topology ZooKeeper authentication configuration unset? if (!conf.containsKey(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD) || conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD) == null || !validateZKDigestPayload((String) conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD))) { secretPayload = generateZookeeperDigestSecretPayload(); LOG.info("Generated ZooKeeper secret payload for MD5-digest: " + secretPayload); } toRet.put(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD, secretPayload); // This should always be set to digest. toRet.put(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME, "digest"); return toRet; }
@SuppressWarnings("unchecked") public static Map prepareZookeeperAuthentication(Map conf) { Map toRet = new HashMap(); String secretPayload = (String) conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD); // Is the topology ZooKeeper authentication configuration unset? if (! conf.containsKey(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD) || conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD) == null || ! validateZKDigestPayload((String) conf.get(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD))) { secretPayload = generateZookeeperDigestSecretPayload(); LOG.info("Generated ZooKeeper secret payload for MD5-digest: " + secretPayload); } toRet.put(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_PAYLOAD, secretPayload); // This should always be set to digest. toRet.put(Config.STORM_ZOOKEEPER_TOPOLOGY_AUTH_SCHEME, "digest"); return toRet; }
private void submitTopologyRemoteCluster(StormTopology topology, Config config) throws Exception { StormSubmitter.submitTopology(tplgyName, config, topology); }
public static void runOnClusterAndPrintMetrics(int durationSec, String topoName, Map<String, Object> topoConf, StormTopology topology) throws Exception { // submit topology StormSubmitter.submitTopologyWithProgressBar(topoName, topoConf, topology); setupShutdownHook(topoName); // handle Ctrl-C // poll metrics every minute, then kill topology after specified duration Integer pollIntervalSec = 60; collectMetricsAndKill(topoName, pollIntervalSec, durationSec); } }
System.setProperty("storm.jar", jarPath.toString()); packageTopology(jarPath, processor); StormSubmitter.submitTopologyAs(name, topoConf, topo, opts, progressListener, asUser); } finally { if (jarPath != null) {
/** * Submits a topology to run on the cluster. A topology runs forever or until explicitly killed. * * @param name the name of the storm. * @param topoConf the topology-specific configuration. See {@link Config}. * @param topology the processing to execute. * @throws AlreadyAliveException if a topology with this name is already running * @throws InvalidTopologyException if an invalid topology was submitted * @throws AuthorizationException if authorization is failed * @thorws SubmitterHookException if any Exception occurs during initialization or invocation of registered {@link ISubmitterHook} */ public static void submitTopology(String name, Map<String, Object> topoConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException { submitTopology(name, topoConf, topology, null, null); }
public static void main(String[] args) throws Exception { Config conf = new Config(); conf.setMaxSpoutPending(20); conf.put(Config.TOPOLOGY_TRIDENT_WINDOWING_INMEMORY_CACHE_LIMIT, 100); // window-state table should already be created with cf:tuples column HBaseWindowsStoreFactory windowStoreFactory = new HBaseWindowsStoreFactory(new HashMap<String, Object>(), "window-state", "cf".getBytes("UTF-8"), "tuples".getBytes("UTF-8")); String topoName = "wordCounterWithWindowing"; if (args.length > 0) { topoName = args[0]; } conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(topoName, conf, buildTopology(windowStoreFactory)); }
/** * Submits a topology to run on the cluster. A topology runs forever or until * explicitly killed. * * * @param name the name of the storm. * @param stormConf the topology-specific configuration. See {@link Config}. * @param topology the processing to execute. * @param opts to manipulate the starting of the topology * @param progressListener to track the progress of the jar upload process * @throws AlreadyAliveException if a topology with this name is already running * @throws InvalidTopologyException if an invalid topology was submitted * @throws AuthorizationException if authorization is failed * @thorws SubmitterHookException if any Exception occurs during initialization or invocation of registered {@link ISubmitterHook} */ @SuppressWarnings("unchecked") public static void submitTopology(String name, Map stormConf, StormTopology topology, SubmitOptions opts, ProgressListener progressListener) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException { submitTopologyAs(name, stormConf, topology, opts, progressListener, null); }
/** * Submits a topology to run on the cluster. A topology runs forever or until explicitly killed. * * @param name the name of the storm. * @param topoConf the topology-specific configuration. See {@link Config}. * @param topology the processing to execute. * @param opts to manipulate the starting of the topology. * @throws AlreadyAliveException if a topology with this name is already running * @throws InvalidTopologyException if an invalid topology was submitted * @throws AuthorizationException if authorization is failed * @thorws SubmitterHookException if any Exception occurs during initialization or invocation of registered {@link ISubmitterHook} */ public static void submitTopology(String name, Map<String, Object> topoConf, StormTopology topology, SubmitOptions opts) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException { submitTopology(name, topoConf, topology, opts, null); }
public static void main(String[] args) throws Exception { StormTopology topology = buildVehiclesTopology(); Config conf = new Config(); conf.setMaxSpoutPending(20); conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar("vehicles-topology", conf, topology); }
AuthorizationException { submitTopology(name, topoConf, topology, opts, new StormSubmitter.ProgressListener() { @Override public void onStart(String srcFile, String targetFile, long totalBytes) {
public static void main(String[] args) throws Exception { StormTopology topology = getStormTopology(); Config conf = new Config(); conf.setDebug(true); String topoName = "test"; if (args.length > 0) { topoName = args[0]; } conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(topoName, conf, topology); }
/** * Submits the topology under a specific name **/ protected int submit(String name, Config conf, TopologyBuilder builder) { try { StormSubmitter.submitTopology(name, conf, builder.createTopology()); } catch (Exception e) { e.printStackTrace(); return -1; } return 0; }
public static void main(String[] args) throws Exception { StormTopology topology = buildDevicesTopology(); Config conf = new Config(); conf.setMaxSpoutPending(20); conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar("devices-topology", conf, topology); }
protected void runMain(String[] args) throws Exception { final String brokerUrl = args.length > 0 ? args[0] : KAFKA_LOCAL_BROKER; System.out.println("Running with broker url: " + brokerUrl); Config tpConf = getConfig(); // Producers. This is just to get some data in Kafka, normally you would be getting this data from elsewhere StormSubmitter.submitTopology(TOPIC_0 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, TOPIC_0)); StormSubmitter.submitTopology(TOPIC_1 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, TOPIC_1)); StormSubmitter.submitTopology(TOPIC_2 + "-producer", tpConf, KafkaProducerTopology.newTopology(brokerUrl, TOPIC_2)); //Consumer. Sets up a topology that reads the given Kafka spouts and logs the received messages StormSubmitter.submitTopology("storm-kafka-client-spout-test", tpConf, getTopologyKafkaSpout(getKafkaSpoutConfig(brokerUrl))); }
public static void main(String[] args) throws Exception { Config conf = new Config(); conf.setMaxSpoutPending(20); String topoName = "wordCounter"; if (args.length > 0) { topoName = args[0]; } conf.setNumWorkers(3); StormSubmitter.submitTopologyWithProgressBar(topoName, conf, buildTopology()); try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { for (int i = 0; i < 10; i++) { System.out.println("DRPC RESULT: " + drpc.execute("words", "cat the dog jumped")); Thread.sleep(1000); } } }
public static void main(String[] args) throws Exception { if (args.length != 2) { System.out.println("Usage: WordCountTrident redis-host redis-port"); System.exit(1); } String redisHost = args[0]; Integer redisPort = Integer.valueOf(args[1]); Config conf = new Config(); conf.setMaxSpoutPending(5); conf.setNumWorkers(3); StormSubmitter.submitTopology("test_wordCounter_for_redis", conf, buildTopology(redisHost, redisPort)); }