public IStormClusterState mkStormClusterStateImpl(Object stateStorage, ILocalAssignmentsBackend backend, ClusterStateContext context) throws Exception { if (stateStorage instanceof IStateStorage) { return new StormClusterStateImpl((IStateStorage) stateStorage, backend, context, false); } else { IStateStorage Storage = _instance.mkStateStorageImpl((Map<String, Object>) stateStorage, (Map<String, Object>) stateStorage, context); return new StormClusterStateImpl(Storage, backend, context, true); } }
@Override public List<ProfileRequest> getWorkerProfileRequests(String stormId, NodeInfo nodeInfo) { List<ProfileRequest> requests = new ArrayList<>(); List<ProfileRequest> profileRequests = getTopologyProfileRequests(stormId); for (ProfileRequest profileRequest : profileRequests) { NodeInfo nodeInfo1 = profileRequest.get_nodeInfo(); if (nodeInfo1.equals(nodeInfo)) { requests.add(profileRequest); } } return requests; }
/** * need to take executor->node+port in explicitly so that we don't run into a situation where a long dead worker with a skewed clock * overrides all the timestamps. By only checking heartbeats with an assigned node+port, and only reading executors from that heartbeat * that are actually assigned, we avoid situations like that. * * @param stormId topology id * @param executorNodePort executor id -> node + port * @return mapping of executorInfo -> executor beat */ @Override public Map<ExecutorInfo, ExecutorBeat> executorBeats(String stormId, Map<List<Long>, NodeInfo> executorNodePort) { Map<ExecutorInfo, ExecutorBeat> executorWhbs = new HashMap<>(); Map<NodeInfo, List<List<Long>>> nodePortExecutors = Utils.reverseMap(executorNodePort); for (Map.Entry<NodeInfo, List<List<Long>>> entry : nodePortExecutors.entrySet()) { String node = entry.getKey().get_node(); Long port = entry.getKey().get_port_iterator().next(); ClusterWorkerHeartbeat whb = getWorkerHeartbeat(stormId, node, port); List<ExecutorInfo> executorInfoList = new ArrayList<>(); for (List<Long> list : entry.getValue()) { executorInfoList.add(new ExecutorInfo(list.get(0).intValue(), list.get(list.size() - 1).intValue())); } if (whb != null) { executorWhbs.putAll(ClusterUtils.convertExecutorBeats(executorInfoList, whb)); } } return executorWhbs; }
if (size == 1) { issueCallback(assignmentsCallback); } else { issueMapCallback(assignmentInfoCallback, toks.get(1)); issueMapCallback(assignmentVersionCallback, toks.get(1)); issueMapCallback(assignmentInfoWithVersionCallback, toks.get(1)); issueCallback(supervisorsCallback); } else if (root.equals(ClusterUtils.BLOBSTORE_ROOT)) { issueCallback(blobstoreCallback); } else if (root.equals(ClusterUtils.STORMS_ROOT) && size > 1) { issueMapCallback(stormBaseCallback, toks.get(1)); } else if (root.equals(ClusterUtils.CREDENTIALS_ROOT) && size > 1) { issueMapCallback(credentialsCallback, toks.get(1)); } else if (root.equals(ClusterUtils.LOGCONFIG_ROOT) && size > 1) { issueMapCallback(logConfigCallback, toks.get(1)); } else if (root.equals(ClusterUtils.BACKPRESSURE_ROOT) && size > 1) { issueMapCallback(backPressureCallback, toks.get(1)); } else if (root.equals(ClusterUtils.LEADERINFO_ROOT)) { issueCallback(leaderInfoCallback); } else { LOG.error("{} Unknown callback for subtree {}", new RuntimeException("Unknown callback for this path"), path);
@Override public void syncRemoteIds(Map<String, String> remote) { if (null != remote) { this.assignmentsBackend.syncRemoteIds(remote); } else { Map<String, String> tmp = new HashMap<>(); List<String> activeStorms = activeStorms(); for (String stormId : activeStorms) { tmp.put(stormId, stormBase(stormId, null).get_name()); } this.assignmentsBackend.syncRemoteIds(tmp); } }
@Override public void removeStorm(String stormId) { stateStorage.delete_node(ClusterUtils.assignmentPath(stormId)); this.assignmentsBackend.clearStateForStorm(stormId); stateStorage.delete_node(ClusterUtils.credentialsPath(stormId)); stateStorage.delete_node(ClusterUtils.logConfigPath(stormId)); stateStorage.delete_node(ClusterUtils.profilerConfigPath(stormId)); removeStormBase(stormId); }
StormBase stormBase = stormBase(stormId, null); if (stormBase.get_component_executors() != null) {
if (size == 1) { issueCallback(assignmentsCallback); } else { issueMapCallback(assignmentInfoCallback, toks.get(1)); issueMapCallback(assignmentVersionCallback, toks.get(1)); issueMapCallback(assignmentInfoWithVersionCallback, toks.get(1)); issueCallback(supervisorsCallback); } else if (root.equals(ClusterUtils.BLOBSTORE_ROOT)) { issueCallback(blobstoreCallback); } else if (root.equals(ClusterUtils.STORMS_ROOT) && size > 1) { issueMapCallback(stormBaseCallback, toks.get(1)); } else if (root.equals(ClusterUtils.CREDENTIALS_ROOT) && size > 1) { issueMapCallback(credentialsCallback, toks.get(1)); } else if (root.equals(ClusterUtils.LOGCONFIG_ROOT) && size > 1) { issueMapCallback(logConfigCallback, toks.get(1)); } else if (root.equals(ClusterUtils.BACKPRESSURE_ROOT) && size > 1) { issueMapCallback(backPressureCallback, toks.get(1)); } else { LOG.error("{} Unknown callback for subtree {}", new RuntimeException("Unknown callback for this path"), path);
@Override public void removeStorm(String stormId) { stateStorage.delete_node(ClusterUtils.assignmentPath(stormId)); stateStorage.delete_node(ClusterUtils.credentialsPath(stormId)); stateStorage.delete_node(ClusterUtils.logConfigPath(stormId)); stateStorage.delete_node(ClusterUtils.profilerConfigPath(stormId)); removeStormBase(stormId); }
public void updateStorm(String stormId, StormBase newElems) { StormBase stormBase = stormBase(stormId, null); if (stormBase.get_component_executors() != null) {
@Before public void init() throws Exception { storage = Mockito.mock(IStateStorage.class); context = new ClusterStateContext(); state = new StormClusterStateImpl(storage, LocalAssignmentsBackendFactory.getDefault(), context, false /*solo*/); }
@Override public List<ProfileRequest> getWorkerProfileRequests(String stormId, NodeInfo nodeInfo) { List<ProfileRequest> requests = new ArrayList<>(); List<ProfileRequest> profileRequests = getTopologyProfileRequests(stormId); for (ProfileRequest profileRequest : profileRequests) { NodeInfo nodeInfo1 = profileRequest.get_nodeInfo(); if (nodeInfo1.equals(nodeInfo)) requests.add(profileRequest); } return requests; }
/** * need to take executor->node+port in explicitly so that we don't run into a situation where a long dead worker with a skewed clock overrides all the * timestamps. By only checking heartbeats with an assigned node+port, and only reading executors from that heartbeat that are actually assigned, we avoid * situations like that * * @param stormId * @param executorNodePort * @return */ @Override public Map<ExecutorInfo, ExecutorBeat> executorBeats(String stormId, Map<List<Long>, NodeInfo> executorNodePort) { Map<ExecutorInfo, ExecutorBeat> executorWhbs = new HashMap<>(); Map<NodeInfo, List<List<Long>>> nodePortExecutors = Utils.reverseMap(executorNodePort); for (Map.Entry<NodeInfo, List<List<Long>>> entry : nodePortExecutors.entrySet()) { String node = entry.getKey().get_node(); Long port = entry.getKey().get_port_iterator().next(); ClusterWorkerHeartbeat whb = getWorkerHeartbeat(stormId, node, port); List<ExecutorInfo> executorInfoList = new ArrayList<>(); for (List<Long> list : entry.getValue()) { executorInfoList.add(new ExecutorInfo(list.get(0).intValue(), list.get(list.size() - 1).intValue())); } if (whb != null) executorWhbs.putAll(ClusterUtils.convertExecutorBeats(executorInfoList, whb)); } return executorWhbs; }
public IStormClusterState mkStormClusterStateImpl(Object stateStorage, List<ACL> acls, ClusterStateContext context) throws Exception { if (stateStorage instanceof IStateStorage) { return new StormClusterStateImpl((IStateStorage) stateStorage, acls, context, false); } else { IStateStorage Storage = _instance.mkStateStorageImpl((Map) stateStorage, (Map) stateStorage, acls, context); return new StormClusterStateImpl(Storage, acls, context, true); } }