/** * Get how long this topology has been executing. */ public int getUpTime() { return Time.currentTimeSecs() - launchTime; }
private void updateLastReturned(Map ret) { lastReturnedTime = Time.currentTimeSecs(); lastReturnedValue = ret; }
public UptimeComputer() { startTime = Time.currentTimeSecs(); }
public static int deltaSecs(int timeInSeconds) { return Time.currentTimeSecs() - timeInSeconds; }
public ExecutorCache(Map<String, Object> newBeat) { if (newBeat != null) { executorReportedTimeSecs = (Integer) newBeat.getOrDefault(ClientStatsUtil.TIME_SECS, 0); } else { executorReportedTimeSecs = 0; } nimbusTimeSecs = Time.currentTimeSecs(); isTimedOut = false; }
/** * Create a new worker heartbeat for zookeeper. * @param topoId the topology id * @param executorStats the stats for the executors * @param uptime the uptime for the worker. * @return the heartbeat map. */ public static Map<String, Object> mkZkWorkerHb(String topoId, Map<List<Integer>, ExecutorStats> executorStats, Integer uptime) { Map<String, Object> ret = new HashMap<>(); ret.put("storm-id", topoId); ret.put(EXECUTOR_STATS, executorStats); ret.put(UPTIME, uptime); ret.put(TIME_SECS, Time.currentTimeSecs()); return ret; }
public synchronized void updateFromHb(Integer timeout, Map<String,Object> newBeat) { if (newBeat != null) { Integer newReportedTime = (Integer) newBeat.getOrDefault(ClientStatsUtil.TIME_SECS, 0); if (!newReportedTime.equals(executorReportedTimeSecs)) { nimbusTimeSecs = Time.currentTimeSecs(); } executorReportedTimeSecs = newReportedTime; } updateTimeout(timeout); } }
/** * Deletes topologies from history older than mins minutes. * * @param mins the number of mins for old topologies */ private void cleanTopologyHistory(int mins) { int cutoffAgeSecs = Time.currentTimeSecs() - (mins * 60); synchronized (topologyHistoryLock) { LocalState state = topologyHistoryState; state.filterOldTopologies(cutoffAgeSecs); } }
private static IClusterMetricsConsumer.ClusterInfo mkClusterInfo() { return new IClusterMetricsConsumer.ClusterInfo(Time.currentTimeSecs()); }
public ReportError(Map<String, Object> topoConf, IStormClusterState stormClusterState, String stormId, String componentId, WorkerTopologyContext workerTopologyContext) { this.topoConf = topoConf; this.stormClusterState = stormClusterState; this.stormId = stormId; this.componentId = componentId; this.workerTopologyContext = workerTopologyContext; this.errorIntervalSecs = ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_ERROR_THROTTLE_INTERVAL_SECS)); this.maxPerInterval = ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_MAX_ERROR_REPORT_PER_INTERVAL)); this.intervalStartTime = new AtomicInteger(Time.currentTimeSecs()); this.intervalErrors = new AtomicInteger(0); }
/** * Used for local test. */ public static SupervisorWorkerHeartbeat thriftifyRpcWorkerHb(String stormId, List<Long> executorId) { SupervisorWorkerHeartbeat supervisorWorkerHeartbeat = new SupervisorWorkerHeartbeat(); supervisorWorkerHeartbeat.set_storm_id(stormId); supervisorWorkerHeartbeat .set_executors(Collections.singletonList(new ExecutorInfo(executorId.get(0).intValue(), executorId.get(1).intValue()))); supervisorWorkerHeartbeat.set_time_secs(Time.currentTimeSecs()); return supervisorWorkerHeartbeat; }
/** * Return a sorted set of java.io.Files that were written by workers that are now active. */ public SortedSet<File> getAliveWorkerDirs() { Set<String> aliveIds = getAliveIds(Time.currentTimeSecs()); Set<File> logDirs = getAllWorkerDirs(); return getLogDirs(logDirs, (wid) -> aliveIds.contains(wid)); }
public void doHeartBeat() throws IOException { LocalState state = ConfigUtils.workerState(workerState.conf, workerState.workerId); LSWorkerHeartbeat lsWorkerHeartbeat = new LSWorkerHeartbeat(Time.currentTimeSecs(), workerState.topologyId, workerState.localExecutors.stream() .map(executor -> new ExecutorInfo( executor.get(0).intValue(), executor.get(1).intValue())) .collect(Collectors.toList()), workerState.port); state.setWorkerHeartBeat(lsWorkerHeartbeat); state.cleanup(60); // this is just in case supervisor is down so that disk doesn't fill up. // it shouldn't take supervisor 120 seconds between listing dir and reading it heartbeatToMasterIfLocalbeatFail(lsWorkerHeartbeat); }
@Override public void report(Throwable error) { LOG.error("Error", error); if (Time.deltaSecs(intervalStartTime.get()) > errorIntervalSecs) { intervalErrors.set(0); intervalStartTime.set(Time.currentTimeSecs()); } if (intervalErrors.incrementAndGet() <= maxPerInterval) { try { stormClusterState.reportError(stormId, componentId, Utils.hostname(), workerTopologyContext.getThisWorkerPort().longValue(), error); } catch (UnknownHostException e) { throw Utils.wrapInRuntime(e); } } } }
private void addTopoToHistoryLog(String topoId, Map<String, Object> topoConf) { LOG.info("Adding topo to history log: {}", topoId); LocalState state = topologyHistoryState; List<String> users = ServerConfigUtils.getTopoLogsUsers(topoConf); List<String> groups = ServerConfigUtils.getTopoLogsGroups(topoConf); synchronized (topologyHistoryLock) { state.addTopologyHistory(new LSTopoHistory(topoId, Time.currentTimeSecs(), users, groups)); } }
private static Map<IClusterMetricsConsumer.SupervisorInfo, List<DataPoint>> extractSupervisorMetrics(ClusterSummary summ) { Map<IClusterMetricsConsumer.SupervisorInfo, List<DataPoint>> ret = new HashMap<>(); for (SupervisorSummary sup : summ.get_supervisors()) { IClusterMetricsConsumer.SupervisorInfo info = new IClusterMetricsConsumer.SupervisorInfo(sup.get_host(), sup.get_supervisor_id(), Time.currentTimeSecs()); List<DataPoint> metrics = new ArrayList<>(); metrics.add(new DataPoint("slotsTotal", sup.get_num_workers())); metrics.add(new DataPoint("slotsUsed", sup.get_num_used_workers())); metrics.add(new DataPoint("totalMem", sup.get_total_resources().get(Constants.COMMON_TOTAL_MEMORY_RESOURCE_NAME))); metrics.add(new DataPoint("totalCpu", sup.get_total_resources().get(Constants.COMMON_CPU_RESOURCE_NAME))); metrics.add(new DataPoint("usedMem", sup.get_used_mem())); metrics.add(new DataPoint("usedCpu", sup.get_used_cpu())); ret.put(info, metrics); } return ret; }
@Override public void execute(Tuple tuple) { LOG.info("GOT {} at time {}", tuple, Time.currentTimeMillis()); if (!receivedAnyTuple.get() && Time.currentTimeSecs() > TICK_INTERVAL_SECS) { throw new RuntimeException("Simulated time was higher than " + TICK_INTERVAL_SECS + " at start of test." + " Increase the interval until this no longer occurs, but keep an eye on Storm's timeouts for e.g. worker heartbeat."); } receivedAnyTuple.set(true); if (tickTupleCount.get() > 3) { throw new RuntimeException("Unexpectedly many tick tuples"); } if (TupleUtils.isTick(tuple)) { tickTupleCount.incrementAndGet(); collector.ack(tuple); } else { if (tuple.getValues().size() == 1 && "val".equals(tuple.getValue(0))) { collector.ack(tuple); } else { nonTickTuple.set(tuple); } } }
@Test public void deltaSecsConvertsToSecondsTest() { try (SimulatedTime t = new SimulatedTime()) { int current = Time.currentTimeSecs(); Time.advanceTime(1000); Assert.assertEquals(Time.deltaSecs(current), 1); } }
@Test public void deltaSecsTruncatesFractionalSecondsTest() { try (SimulatedTime t = new SimulatedTime()) { int current = Time.currentTimeSecs(); Time.advanceTime(1500); Assert.assertEquals(Time.deltaSecs(current), 1, 0); } }
@Test public void testResourcePoolUtilization() { INimbus iNimbus = new INimbusTest(); Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 100, 1000); Double cpuGuarantee = 400.0; Double memoryGuarantee = 1000.0; Map<String, Map<String, Number>> resourceUserPool = userResourcePool( userRes("user1", cpuGuarantee, memoryGuarantee)); Config config = createClusterConfig(100, 200, 200, resourceUserPool); TopologyDetails topo1 = genTopology("topo-1", config, 1, 1, 2, 1, Time.currentTimeSecs() - 24, 9, "user1"); Topologies topologies = new Topologies(topo1); Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config); User user1 = new User("user1", toDouble(resourceUserPool.get("user1"))); WorkerSlot slot = cluster.getAvailableSlots().get(0); cluster.assign(slot, topo1.getId(), topo1.getExecutors()); Assert.assertEquals("check cpu resource guarantee", cpuGuarantee, user1.getCpuResourceGuaranteed(), 0.001); Assert.assertEquals("check memory resource guarantee", memoryGuarantee, user1.getMemoryResourceGuaranteed(), 0.001); Assert.assertEquals("check cpu resource pool utilization", ((100.0 * 3.0) / cpuGuarantee), user1.getCpuResourcePoolUtilization(cluster), 0.001); Assert.assertEquals("check memory resource pool utilization", ((200.0 + 200.0) * 3.0) / memoryGuarantee, user1.getMemoryResourcePoolUtilization(cluster), 0.001); } }