@VisibleForTesting public static Set<String> topoIdsToClean(IStormClusterState state, BlobStore store, Map<String, Object> conf) { Set<String> ret = new HashSet<>(); ret.addAll(Utils.OR(state.heartbeatStorms(), EMPTY_STRING_LIST)); ret.addAll(Utils.OR(state.errorTopologies(), EMPTY_STRING_LIST)); ret.addAll(Utils.OR(store.storedTopoIds(), EMPTY_STRING_SET)); ret.addAll(Utils.OR(state.backpressureTopologies(), EMPTY_STRING_LIST)); ret.addAll(Utils.OR(state.idsOfTopologiesWithPrivateWorkerKeys(), EMPTY_STRING_SET)); ret = getExpiredTopologyIds(ret, conf); ret.removeAll(Utils.OR(state.activeStorms(), EMPTY_STRING_LIST)); return ret; }
/** * Get all of the supervisors with the ID as the key. */ default Map<String, SupervisorInfo> allSupervisorInfo() { return allSupervisorInfo(null); }
@Override public void run(String[] args, Map<String, Object> conf, String command) throws Exception { // We are pretending to be nimbus here. IStormClusterState state = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf)); for (String topologyId: args) { System.out.println(topologyId + ":"); Credentials creds = state.credentials(topologyId, null); if (creds != null) { for (String key : creds.get_creds().keySet()) { System.out.println("\t" + key); } } } }
default Map<String, StormBase> topologyBases() { Map<String, StormBase> stormBases = new HashMap<>(); for (String topologyId : activeStorms()) { StormBase base = stormBase(topologyId, null); if (base != null) { //race condition with delete stormBases.put(topologyId, base); } } return stormBases; } }
@Override public void run(String[] args, Map<String, Object> conf, String command) throws Exception { IStormClusterState stormClusterState = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf)); stormClusterState.syncRemoteAssignments(null); stormClusterState.syncRemoteIds(null); stormClusterState.setAssignmentsBackendSynchronized(); Map<String, Assignment> infos = stormClusterState.assignmentsInfo(); if (args.length <= 0) { for (Map.Entry<String, Assignment> entry: infos.entrySet()) { System.out.println(entry.getKey() + ":"); System.out.println(prettyPrint(entry.getValue())); } } else { for (String arg : args) { System.out.println(arg + ":"); System.out.println(prettyPrint(infos.get(arg))); } } }
WorkerTokenManager wtm = new WorkerTokenManager(conf, mockState); when(mockState.getNextPrivateWorkerKeyVersion(type, topoId)).thenReturn(versionNumber); doAnswer((invocation) -> { }).when(mockState).addPrivateWorkerKey(eq(type), eq(topoId), eq(versionNumber), any(PrivateWorkerKey.class)); when(mockState.getPrivateWorkerKey(type, topoId, versionNumber)).thenAnswer((invocation) -> privateKey.get()); verify(mockState).addPrivateWorkerKey(eq(type), eq(topoId), eq(versionNumber), any(PrivateWorkerKey.class)); assertTrue(wt.is_set_serviceType()); assertEquals(type, wt.get_serviceType());
@Override public void close() { try { LOG.info("Shutting down supervisor {}", getId()); this.active = false; heartbeatTimer.close(); workerHeartbeatTimer.close(); eventTimer.close(); if (eventManager != null) { eventManager.close(); } if (readState != null) { readState.close(); } asyncLocalizer.close(); getStormClusterState().disconnect(); if (thriftServer != null) { this.thriftServer.stop(); } } catch (Exception e) { LOG.error("Error Shutting down", e); } }
private ClusterSummary getClusterInfoImpl() throws Exception { IStormClusterState state = stormClusterState; Map<String, SupervisorInfo> infos = state.allSupervisorInfo(); List<SupervisorSummary> summaries = new ArrayList<>(infos.size()); for (Entry<String, SupervisorInfo> entry : infos.entrySet()) { Map<String, StormBase> bases = state.topologyBases(); List<NimbusSummary> nimbuses = state.nimbuses(); Assignment assignment = state.assignmentInfo(topoId, null);
synchronized (submitLock) { bases = state.topologyBases(); List<String> assignedTopologyIds = state.assignments(null); Map<String, Assignment> existingAssignments = new HashMap<>(); for (String id : assignedTopologyIds) { Assignment currentAssignment = state.assignmentInfo(id, null); if (!currentAssignment.is_set_owner()) { TopologyDetails td = tds.get(id); if (td != null) { currentAssignment.set_owner(td.getTopologySubmitter()); state.setAssignment(id, currentAssignment, td.getConf());
Map<String, StormBase> assignedBases = state.topologyBases(); if (assignedBases != null) { for (Entry<String, StormBase> entry : assignedBases.entrySet()) { Map<String, Object> topoConf = Collections.unmodifiableMap(Utils.merge(conf, tryReadTopoConf(id, topoCache))); synchronized (credUpdateLock) { Credentials origCreds = state.credentials(id, null); if (origCreds != null) { Map<String, String> origCredsMap = origCreds.get_creds(); state.setCredentials(id, new Credentials(newCredsMap), topoConf);
getSupervisorPageInfoCalls.mark(); IStormClusterState state = stormClusterState; Map<String, SupervisorInfo> superInfos = state.allSupervisorInfo(); Map<String, List<String>> hostToSuperId = new HashMap<>(); for (Entry<String, SupervisorInfo> entry : superInfos.entrySet()) { Map<String, Assignment> topoToAssignment = state.assignmentsInfo(); for (String sid : supervisorIds) { SupervisorInfo info = superInfos.get(sid);
@Override public void run(String[] args, Map<String, Object> conf, String command) throws Exception { try (BlobStore nimbusBlobStore = ServerUtils.getNimbusBlobStore(conf, NimbusInfo.fromConf(conf), null)) { IStormClusterState stormClusterState = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf)); Set<String> blobStoreTopologyIds = nimbusBlobStore.filterAndListKeys(key -> ConfigUtils.getIdFromBlobKey(key)); Set<String> activeTopologyIds = new HashSet<>(stormClusterState.activeStorms()); Sets.SetView<String> diffTopology = Sets.difference(activeTopologyIds, blobStoreTopologyIds); LOG.info("active-topology-ids [{}] blob-topology-ids [{}] diff-topology [{}]", activeTopologyIds, blobStoreTopologyIds, diffTopology); for (String corruptId : diffTopology) { stormClusterState.removeStorm(corruptId); } } }
/** * Create or update an existing key. * * @param serviceType the type of service to create a token for * @param user the user the token is for * @param topologyId the topology the token is for * @return a newly generated token that should be good to start using form now until it expires. */ public WorkerToken createOrUpdateTokenFor(WorkerTokenServiceType serviceType, String user, String topologyId) { long nextVersion = state.getNextPrivateWorkerKeyVersion(serviceType, topologyId); SecretKey topoSecret = getCurrentSecret(); long expirationTimeMillis = Time.currentTimeMillis() + tokenLifetimeMillis; WorkerTokenInfo info = new WorkerTokenInfo(user, topologyId, nextVersion, expirationTimeMillis); byte[] serializedInfo = ClientAuthUtils.serializeWorkerTokenInfo(info); byte[] signature = WorkerTokenSigner.createPassword(serializedInfo, topoSecret); WorkerToken ret = new WorkerToken(serviceType, ByteBuffer.wrap(serializedInfo), ByteBuffer.wrap(signature)); PrivateWorkerKey key = new PrivateWorkerKey(ByteBuffer.wrap(topoSecret.getEncoded()), user, expirationTimeMillis); state.addPrivateWorkerKey(serviceType, topologyId, nextVersion, key); LOG.info("Created new WorkerToken for user {} topology {} on service {}", user, topologyId, serviceType); return ret; }
Credentials origCreds = state.credentials(topoId, null); if (origCreds != null) { Map<String, String> mergedCreds = origCreds.get_creds(); credentials.set_creds(mergedCreds); state.setCredentials(topoId, credentials, topoConf);
workerState.runWorkerShutdownHooks(); workerState.stormClusterState.removeWorkerHeartbeat(topologyId, assignmentId, (long) port); LOG.info("Disconnecting from storm cluster state context"); workerState.stormClusterState.disconnect(); workerState.stateStorage.close(); LOG.info("Shut down worker {} {} {}", topologyId, assignmentId, port);
checkAuthorization(null, null, "getOwnerResourceSummaries"); IStormClusterState state = stormClusterState; Map<String, Assignment> topoIdToAssignments = state.assignmentsInfo(); Map<String, StormBase> topoIdToBases = state.topologyBases(); Map<String, Number> clusterSchedulerConfig = scheduler.config(); String topoId = state.getTopoId(base.get_name()) .orElseThrow(() -> new WrappedNotAliveException(base.get_name() + " is not alive")); TopologyResources resources = getResourcesForTopology(topoId, base);
state.addNimbusHost(hpi.getHost(), new NimbusSummary(hpi.getHost(), hpi.getPort(), Time.currentTimeSecs(), false, STORM_VERSION)); leaderElector.addToLeaderLockQueue(); for (String topoId : state.activeStorms()) { transition(topoId, TopologyActions.STARTUP, null);
protected Map<String, Assignment> getAssignmentsSnapshot(IStormClusterState stormClusterState) throws Exception { return stormClusterState.assignmentsInfo(); }
private static boolean isTopologyActiveOrActivating(IStormClusterState state, String topoName) { return isTopologyActive(state, topoName) || state.activeStorms().contains(topoName); }
protected Map<String, List<ProfileRequest>> getProfileActions(IStormClusterState stormClusterState, List<String> stormIds) throws Exception { Map<String, List<ProfileRequest>> ret = new HashMap<String, List<ProfileRequest>>(); for (String stormId : stormIds) { List<ProfileRequest> profileRequests = stormClusterState.getTopologyProfileRequests(stormId); ret.put(stormId, profileRequests); } return ret; }