private Map<String, Object> mkConf(Map<String, Object> extra) { Map<String, Object> config = Utils.readDefaultConfig(); config.putAll(extra); return config; }
public static Map<String, Object> readStormConfig() { Map<String, Object> ret = readDefaultConfig(); String confFile = System.getProperty("storm.conf.file"); Map<String, Object> storm; if (confFile == null || confFile.equals("")) { storm = findAndReadConfigFile("storm.yaml", false); } else { storm = findAndReadConfigFile(confFile, true); } ret.putAll(storm); ret.putAll(readCommandLineOpts()); return ret; }
/** * Initialize a fake config. * @return conf */ private static Config initializedConfig() { Config conf = new Config(); conf.putAll(Utils.readDefaultConfig()); ArrayList<String> nimbusSeeds = new ArrayList<>(); nimbusSeeds.add(NIMBUS_HOST); conf.put(Config.NIMBUS_SEEDS, nimbusSeeds); conf.put(Config.NIMBUS_THRIFT_PORT, NIMBUS_PORT); return conf; }
@Before public void setup() { conf = Utils.readDefaultConfig(); conf.put(Config.STORM_NIMBUS_RETRY_TIMES, 0); }
@Test(expected = RuntimeException.class) public void test_throws_runtimeexception_when_no_such_class() { Map<String, Object> conf = Utils.readDefaultConfig(); conf.put(Config.TOPOLOGY_TUPLE_SERIALIZER, "null.this.class.does.not.exist"); SerializationFactory.getKryo(conf); }
@Test public void test_registers_when_valid_class_name() { Class arbitraryClass = BlowfishTupleSerializer.class; String secretKey = "0123456789abcdef"; Map<String, Object> conf = Utils.readDefaultConfig(); conf.put(Config.TOPOLOGY_TUPLE_SERIALIZER, arbitraryClass.getName()); conf.put(BlowfishTupleSerializer.SECRET_KEY, secretKey); Kryo kryo = SerializationFactory.getKryo(conf); Assert.assertEquals(arbitraryClass, kryo.getSerializer(ListDelegate.class).getClass()); }
@Test public void test_registers_default_when_not_in_conf() throws ClassNotFoundException { Map<String, Object> conf = Utils.readDefaultConfig(); String className = (String) conf.get(Config.TOPOLOGY_TUPLE_SERIALIZER); Class configuredClass = Class.forName(className); Kryo kryo = SerializationFactory.getKryo(conf); Assert.assertEquals(configuredClass, kryo.getSerializer(ListDelegate.class).getClass()); }
public static Config createClusterConfig(double compPcore, double compOnHeap, double compOffHeap, Map<String, Map<String, Number>> pools) { Config config = new Config(); config.putAll(Utils.readDefaultConfig()); config.put(Config.STORM_NETWORK_TOPOGRAPHY_PLUGIN, GenSupervisorsDnsToSwitchMapping.class.getName()); config.put(DaemonConfig.RESOURCE_AWARE_SCHEDULER_PRIORITY_STRATEGY, DefaultSchedulingPriorityStrategy.class.getName()); config.put(Config.TOPOLOGY_SCHEDULER_STRATEGY, DefaultResourceAwareStrategy.class.getName()); config.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, compPcore); config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, compOffHeap); config.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, compOnHeap); if (pools != null) { config.put(DaemonConfig.RESOURCE_AWARE_SCHEDULER_USER_POOLS, pools); } return config; }
@Test public void newCuratorUsesExponentialBackoffTest() throws InterruptedException { final int expectedInterval = 2400; final int expectedRetries = 10; final int expectedCeiling = 3000; Map<String, Object> config = Utils.readDefaultConfig(); config.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL, expectedInterval); config.put(Config.STORM_ZOOKEEPER_RETRY_TIMES, expectedRetries); config.put(Config.STORM_ZOOKEEPER_RETRY_INTERVAL_CEILING, expectedCeiling); CuratorFramework curator = CuratorUtils.newCurator(config, Arrays.asList("bogus_server"), 42, "", DaemonType.WORKER.getDefaultZkAcls(config)); StormBoundedExponentialBackoffRetry policy = (StormBoundedExponentialBackoffRetry) curator.getZookeeperClient().getRetryPolicy(); Assert.assertEquals(policy.getBaseSleepTimeMs(), expectedInterval); Assert.assertEquals(policy.getN(), expectedRetries); Assert.assertEquals(policy.getSleepTimeMs(10, 0), expectedCeiling); }
INimbus iNimbus = new TestUtilsForBlacklistScheduler.INimbusTest(); Config config = new Config(); config.putAll(Utils.readDefaultConfig()); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME, 200); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT, 2);
@Test public void testSubmitTopologyToLocalNimbus() throws Exception { int port = Utils.getAvailablePort(); try (ILocalCluster localCluster = new LocalCluster.Builder() .withNimbusDaemon(true) .withDaemonConf(Config.NIMBUS_THRIFT_PORT, port) .build()) { Config topoConf = new Config(); topoConf.putAll(Utils.readDefaultConfig()); topoConf.setDebug(true); topoConf.put("storm.cluster.mode", "local"); // default is aways "distributed" but here local cluster is being used. topoConf.put(Config.STORM_TOPOLOGY_SUBMISSION_NOTIFIER_PLUGIN, InmemoryTopologySubmitterHook.class.getName()); topoConf.put(Config.NIMBUS_THRIFT_PORT, port); List<TopologyDetails> topologyNames = new ArrayList<>(); for (int i = 0; i < 4; i++) { final String topologyName = "word-count-" + UUID.randomUUID().toString(); final StormTopology stormTopology = createTestTopology(); topologyNames.add(new TopologyDetails(topologyName, stormTopology)); localCluster.submitTopology(topologyName, topoConf, stormTopology); } Assert.assertEquals(InmemoryTopologySubmitterHook.submittedTopologies, topologyNames); } }
config.putAll(Utils.readDefaultConfig()); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME,200); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT,2);
public void testSetupAndTearDown() throws IOException { Config config = new Config(); config.putAll(Utils.readDefaultConfig());
@Test public void TestBadSlot() { INimbus iNimbus = new TestUtilsForBlacklistScheduler.INimbusTest(); Map<String, SupervisorDetails> supMap = TestUtilsForBlacklistScheduler.genSupervisors(3, 4); Config config = new Config(); config.putAll(Utils.readDefaultConfig()); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME, 200); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT, 2); config.put(DaemonConfig.BLACKLIST_SCHEDULER_RESUME_TIME, 300); Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>(); TopologyDetails topo1 = TestUtilsForBlacklistScheduler.getTopology("topo-1", config, 5, 15, 1, 1, currentTime - 2, true); topoMap.put(topo1.getId(), topo1); Topologies topologies = new Topologies(topoMap); StormMetricsRegistry metricsRegistry = new StormMetricsRegistry(); ResourceMetrics resourceMetrics = new ResourceMetrics(metricsRegistry); Cluster cluster = new Cluster(iNimbus, resourceMetrics, supMap, new HashMap<String, SchedulerAssignmentImpl>(), topologies, config); BlacklistScheduler bs = new BlacklistScheduler(new DefaultScheduler(), metricsRegistry); bs.prepare(config); bs.schedule(topologies, cluster); cluster = new Cluster(iNimbus, resourceMetrics, TestUtilsForBlacklistScheduler.removePortFromSupervisors(supMap, "sup-0", 0), TestUtilsForBlacklistScheduler.assignmentMapToImpl(cluster.getAssignments()), topologies, config); bs.schedule(topologies, cluster); cluster = new Cluster(iNimbus, resourceMetrics, TestUtilsForBlacklistScheduler.removePortFromSupervisors(supMap, "sup-0", 0), TestUtilsForBlacklistScheduler.assignmentMapToImpl(cluster.getAssignments()), topologies, config); bs.schedule(topologies, cluster); cluster = new Cluster(iNimbus, resourceMetrics, supMap, new HashMap<String, SchedulerAssignmentImpl>(), topologies, config); bs.schedule(topologies, cluster); Assert.assertEquals("blacklist", Collections.singleton("host-0"), cluster.getBlacklistedHosts()); }
config.putAll(Utils.readDefaultConfig()); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME, 200); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT, 2);
private Map readStormConfigWithoutCLI() { Map ret = Utils.readDefaultConfig(); String confFile = System.getProperty("storm.conf.file"); Map storm; if (confFile == null || confFile.equals("")) { storm = Utils.findAndReadConfigFile("storm.yaml", false); } else { storm = Utils.findAndReadConfigFile(confFile, true); } ret.putAll(storm); return ret; }
config.putAll(Utils.readDefaultConfig()); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME, 200); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT, 2);
@Test public void TestBadSupervisor() { INimbus iNimbus = new TestUtilsForBlacklistScheduler.INimbusTest(); Map<String, SupervisorDetails> supMap = TestUtilsForBlacklistScheduler.genSupervisors(3, 4); Config config = new Config(); config.putAll(Utils.readDefaultConfig()); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_TIME, 200); config.put(DaemonConfig.BLACKLIST_SCHEDULER_TOLERANCE_COUNT, 2); config.put(DaemonConfig.BLACKLIST_SCHEDULER_RESUME_TIME, 300); Map<String, TopologyDetails> topoMap = new HashMap<String, TopologyDetails>(); TopologyDetails topo1 = TestUtilsForBlacklistScheduler.getTopology("topo-1", config, 5, 15, 1, 1, currentTime - 2, true); topoMap.put(topo1.getId(), topo1); Topologies topologies = new Topologies(topoMap); StormMetricsRegistry metricsRegistry = new StormMetricsRegistry(); ResourceMetrics resourceMetrics = new ResourceMetrics(metricsRegistry); Cluster cluster = new Cluster(iNimbus, resourceMetrics, supMap, new HashMap<>(), topologies, config); BlacklistScheduler bs = new BlacklistScheduler(new DefaultScheduler(), metricsRegistry); bs.prepare(config); bs.schedule(topologies, cluster); cluster = new Cluster(iNimbus, resourceMetrics, TestUtilsForBlacklistScheduler.removeSupervisorFromSupervisors(supMap, "sup-0"), TestUtilsForBlacklistScheduler.assignmentMapToImpl(cluster.getAssignments()), topologies, config); bs.schedule(topologies, cluster); cluster = new Cluster(iNimbus, resourceMetrics, TestUtilsForBlacklistScheduler.removeSupervisorFromSupervisors(supMap, "sup-0"), TestUtilsForBlacklistScheduler.assignmentMapToImpl(cluster.getAssignments()), topologies, config); bs.schedule(topologies, cluster); cluster = new Cluster(iNimbus, resourceMetrics, supMap, TestUtilsForBlacklistScheduler.assignmentMapToImpl(cluster.getAssignments()), topologies, config); bs.schedule(topologies, cluster); Assert.assertEquals("blacklist", Collections.singleton("host-0"), cluster.getBlacklistedHosts()); }
public static Map<String, Object> readStormConfig() { Map<String, Object> ret = readDefaultConfig(); String confFile = System.getProperty("storm.conf.file"); Map<String, Object> storm; if (confFile == null || confFile.equals("")) { storm = findAndReadConfigFile("storm.yaml", false); } else { storm = findAndReadConfigFile(confFile, true); } ret.putAll(storm); ret.putAll(readCommandLineOpts()); return ret; }
@Test public void testSerialization() throws IOException { Map conf = Utils.readDefaultConfig(); Config.registerSerialization(conf, Metadata.class); KryoValuesSerializer kvs = new KryoValuesSerializer(conf); Metadata md = new Metadata(); byte[] content = kvs.serializeObject(md); KryoValuesDeserializer kvd = new KryoValuesDeserializer(conf); Metadata md2 = (Metadata) kvd.deserializeObject(content); // TODO compare md1 and md2 } }