public static Double getDouble(Object o) { Double result = getDouble(o, null); if (null == result) { throw new IllegalArgumentException("Don't know how to convert null to double"); } return result; }
public static int samplingRate(Map<String, Object> conf) { double rate = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_STATS_SAMPLE_RATE)); if (rate != 0) { return (int) (1 / rate); } throw new IllegalArgumentException("Illegal topology.stats.sample.rate in conf: " + rate); }
private boolean isFragmented(SupervisorResources supervisorResources) { double minMemory = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB), 256.0) + ObjectReader.getDouble(conf.get(Config.TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB), 128.0); double minCPU = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT), 50.0) + ObjectReader.getDouble(conf.get(Config.TOPOLOGY_ACKER_CPU_PCORE_PERCENT), 50.0); return minMemory > supervisorResources.getAvailableMem() || minCPU > supervisorResources.getAvailableCpu(); }
public static int getEstimatedWorkerCountForRASTopo(Map<String, Object> topoConf, StormTopology topology) throws InvalidTopologyException { Double defaultWorkerMaxHeap = ObjectReader.getDouble(topoConf.get(Config.WORKER_HEAP_MEMORY_MB), 768d); Double topologyWorkerMaxHeap = ObjectReader.getDouble(topoConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB), defaultWorkerMaxHeap); return (int) Math.ceil(getEstimatedTotalHeapMemoryRequiredByTopo(topoConf, topology) / topologyWorkerMaxHeap); }
/** * Read the stats from a config. * @param conf the config. * @param def the default mean. * @return the corresponding stats. */ public static NormalDistStats fromConf(Map<String, Object> conf, Double def) { if (conf == null) { conf = Collections.emptyMap(); } double mean = ObjectReader.getDouble(conf.get("mean"), def); double stddev = ObjectReader.getDouble(conf.get("stddev"), mean / 4); double min = ObjectReader.getDouble(conf.get("min"), 0.0); double max = ObjectReader.getDouble(conf.get("max"), Double.MAX_VALUE); return new NormalDistStats(mean, stddev, min, max); }
/** * initializes member variables. */ private void initConfigs() { this.topologyWorkerMaxHeapSize = ObjectReader.getDouble( topologyConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB), null); this.topologyPriority = ObjectReader.getInt(topologyConf.get(Config.TOPOLOGY_PRIORITY), null); assert this.topologyWorkerMaxHeapSize != null; assert this.topologyPriority != null; }
private Map<String, Double> mkSupervisorCapacities(Map<String, Object> conf) { Map<String, Double> ret = new HashMap<String, Double>(); // Put in legacy values Double mem = ObjectReader.getDouble(conf.get(Config.SUPERVISOR_MEMORY_CAPACITY_MB), 4096.0); ret.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, mem); Double cpu = ObjectReader.getDouble(conf.get(Config.SUPERVISOR_CPU_CAPACITY), 400.0); ret.put(Config.SUPERVISOR_CPU_CAPACITY, cpu); // If configs are present in Generic map and legacy - the legacy values will be overwritten Map<String, Number> rawResourcesMap = (Map<String, Number>) conf.getOrDefault( Config.SUPERVISOR_RESOURCES_MAP, Collections.emptyMap() ); for (Map.Entry<String, Number> stringNumberEntry : rawResourcesMap.entrySet()) { ret.put(stringNumberEntry.getKey(), stringNumberEntry.getValue().doubleValue()); } return NormalizedResources.RESOURCE_NAME_NORMALIZER.normalizedResourceMap(ret); }
private long calculateMemoryLimit(final WorkerResources resources, final int memOnHeap) { long ret = memOnHeap; if (_resourceIsolationManager != null) { final int memoffheap = (int) Math.ceil(resources.get_mem_off_heap()); final int extraMem = (int) (Math.ceil( ObjectReader.getDouble( _conf.get(DaemonConfig.STORM_SUPERVISOR_MEMORY_LIMIT_TOLERANCE_MARGIN_MB), 0.0))); ret += memoffheap + extraMem; } return ret; }
/** * Creates a SlowExecutorPattern from a Map config. * @param conf the conf to parse. * @return the corresponding SlowExecutorPattern. */ public static SlowExecutorPattern fromConf(Map<String, Object> conf) { double slowness = ObjectReader.getDouble(conf.get("slownessMs"), 0.0); int count = ObjectReader.getInt(conf.get("count"), 1); return new SlowExecutorPattern(slowness, count); }
@VisibleForTesting static void validateTopologyWorkerMaxHeapSizeConfigs( Map<String, Object> stormConf, StormTopology topology, double defaultWorkerMaxHeapSizeMB) { double largestMemReq = getMaxExecutorMemoryUsageForTopo(topology, stormConf); double topologyWorkerMaxHeapSize = ObjectReader.getDouble(stormConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB), defaultWorkerMaxHeapSizeMB); if (topologyWorkerMaxHeapSize < largestMemReq) { throw new IllegalArgumentException( "Topology will not be able to be successfully scheduled: Config " + "TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB=" + topologyWorkerMaxHeapSize + " < " + largestMemReq + " (Largest memory requirement of a component in the topology)." + " Perhaps set TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB to a larger amount"); } }
if (topoConf.containsKey(Config.TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB)) { ret.put(Constants.COMMON_ONHEAP_MEMORY_RESOURCE_NAME, ObjectReader.getDouble(topoConf.get(Config.TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB))); ObjectReader.getDouble(topoConf.get(Config.TOPOLOGY_ACKER_RESOURCES_OFFHEAP_MEMORY_MB))); ObjectReader.getDouble(topoConf.get(Config.TOPOLOGY_ACKER_CPU_PCORE_PERCENT))); ObjectReader.getDouble(topoConf.get(Config.TOPOLOGY_METRICS_CONSUMER_RESOURCES_ONHEAP_MEMORY_MB))); ObjectReader.getDouble(topoConf.get(Config.TOPOLOGY_METRICS_CONSUMER_RESOURCES_OFFHEAP_MEMORY_MB))); ObjectReader.getDouble(topoConf.get(Config.TOPOLOGY_METRICS_CONSUMER_CPU_PCORE_PERCENT)));
if (jsonObject.containsKey(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB)) { Double topoMemOnHeap = ObjectReader .getDouble(jsonObject.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB), null); topologyResources.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, topoMemOnHeap); .getDouble(jsonObject.get(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB), null); topologyResources.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, topoMemOffHeap); Double topoCpu = ObjectReader.getDouble(jsonObject.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT), null); topologyResources.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, topoCpu);
/** * Parse the LoadCompConf from a config Map. * @param conf the map holding the config for a LoadCompConf. * @return the parsed object. */ public static LoadCompConf fromConf(Map<String, Object> conf) { String id = (String) conf.get("id"); int parallelism = ObjectReader.getInt(conf.get("parallelism"), 1); List<OutputStream> streams = new ArrayList<>(); List<Map<String, Object>> streamData = (List<Map<String, Object>>) conf.get("streams"); if (streamData != null) { for (Map<String, Object> streamInfo: streamData) { streams.add(OutputStream.fromConf(streamInfo)); } } double memoryMb = ObjectReader.getDouble(conf.get("memoryLoad"), 0.0); double cpuPercent = ObjectReader.getDouble(conf.get("cpuLoad"), 0.0); SlowExecutorPattern slp = null; if (conf.containsKey("slowExecutorPattern")) { slp = SlowExecutorPattern.fromConf((Map<String, Object>) conf.get("slowExecutorPattern")); } return new LoadCompConf(id, parallelism, streams, memoryMb, cpuPercent, slp); }
.getDouble(jsonObject.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB), null); topologyResources.put(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB, topoMemOnHeap); .getDouble(jsonObject.get(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB), null); topologyResources.put(Config.TOPOLOGY_COMPONENT_RESOURCES_OFFHEAP_MEMORY_MB, topoMemOffHeap); Double topoCpu = ObjectReader.getDouble(jsonObject.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT), null); topologyResources.put(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT, topoCpu);
@Override public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) { random = new Random(); sourceNodeInfo = new NodeInfo(context.getThisWorkerHost(), Sets.newHashSet((long) context.getThisWorkerPort())); taskToNodePort = context.getTaskToNodePort(); this.targetTasks = targetTasks; capacity = targetTasks.size() == 1 ? 1 : Math.max(1000, targetTasks.size() * 5); conf = context.getConf(); dnsToSwitchMapping = ReflectionUtils.newInstance((String) conf.get(Config.STORM_NETWORK_TOPOGRAPHY_PLUGIN)); localityGroup = new HashMap<>(); currentScope = Scope.WORKER_LOCAL; higherBound = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_LOCALITYAWARE_HIGHER_BOUND)); lowerBound = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_LOCALITYAWARE_LOWER_BOUND)); rets = (List<Integer>[]) new List<?>[targetTasks.size()]; int i = 0; for (int target : targetTasks) { rets[i] = Arrays.asList(target); orig.put(target, new IndexAndWeights(i)); i++; } // can't leave choices to be empty, so initiate it similar as ShuffleGrouping choices = new int[capacity]; current = new AtomicInteger(0); // allocate another array to be switched prepareChoices = new int[capacity]; updateRing(null); }
(int) (Math.ceil( ObjectReader.getDouble( this.conf.get(DaemonConfig.STORM_CGROUP_MEMORY_LIMIT_TOLERANCE_MARGIN_MB), 0.0)));
ObjectReader.getDouble(conf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB))); Utils.validateTopologyBlobStoreMap(topoConf, blobStore); long uniqueNum = submittedCount.incrementAndGet();