private static IStormClusterState makeStormClusterState(Map<String, Object> conf) throws Exception { return ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf)); }
@Override public void addPrivateWorkerKey(WorkerTokenServiceType type, String topologyId, long keyVersion, PrivateWorkerKey key) { assert context.getDaemonType() == DaemonType.NIMBUS; stateStorage.mkdirs(ClusterUtils.SECRET_KEYS_SUBTREE, defaultAcls); List<ACL> secretAcls = context.getZkSecretAcls(type); String path = ClusterUtils.secretKeysPath(type, topologyId, keyVersion); LOG.info("Storing private key for {} connecting to a {} at {} with ACL {}", topologyId, type, path, secretAcls); stateStorage.set_data(path, Utils.serialize(key), secretAcls); }
public ZKStateStorage(Map<String, Object> conf, Map<String, Object> authConf, ClusterStateContext context) throws Exception { this.conf = conf; this.authConf = authConf; if (context.getDaemonType().equals(DaemonType.NIMBUS)) { this.isNimbus = true; } // just mkdir STORM_ZOOKEEPER_ROOT dir CuratorFramework zkTemp = mkZk(context.getDaemonType()); String rootPath = String.valueOf(conf.get(Config.STORM_ZOOKEEPER_ROOT)); ClientZookeeper.mkdirs(zkTemp, rootPath, context.getDefaultZkAcls()); zkTemp.close(); active = new AtomicBoolean(true); zkWriter = mkZk(new ZkWatcherCallBack(), context.getDaemonType()); if (isNimbus) { zkReader = mkZk(new ZkWatcherCallBack(), context.getDaemonType()); } else { zkReader = zkWriter; } }
public ZKStateStorage(Map<Object, Object> conf, Map authConf, List<ACL> acls, ClusterStateContext context) throws Exception { this.conf = conf; this.authConf = authConf; if (context.getDaemonType().equals(DaemonType.NIMBUS)) this.isNimbus = true; // just mkdir STORM_ZOOKEEPER_ROOT dir CuratorFramework zkTemp = mkZk(acls); String rootPath = String.valueOf(conf.get(Config.STORM_ZOOKEEPER_ROOT)); Zookeeper.mkdirs(zkTemp, rootPath, acls); zkTemp.close(); active = new AtomicBoolean(true); zkWriter = mkZk(acls, new ZkWatcherCallBack()); if (isNimbus) { zkReader = mkZk(acls, new ZkWatcherCallBack()); } else { zkReader = zkWriter; } }
this.defaultAcls = context.getDefaultZkAcls(); this.context = context; this.assignmentsBackend = assignmentsassignmentsBackend;
private static IStormClusterState buildStateIfNeeded(Map<String, Object> conf, ThriftConnectionType connectionType) { IStormClusterState state = null; if (ClientAuthUtils.areWorkerTokensEnabledServer(connectionType, conf)) { try { state = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.UNKNOWN, conf)); } catch (Exception e) { throw new RuntimeException(e); } } return state; }
@Override public void run(String[] args, Map<String, Object> conf, String command) throws Exception { IStormClusterState stormClusterState = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf)); Map<String, SupervisorInfo> infos = stormClusterState.allSupervisorInfo(); if (args.length <= 0) { for (Map.Entry<String, SupervisorInfo> entry: infos.entrySet()) { System.out.println(entry.getKey() + ":"); System.out.println(prettyPrint(entry.getValue())); } } else { for (String arg : args) { System.out.println(arg + ":"); System.out.println(prettyPrint(infos.get(arg))); } } }
@Override public void run(String[] args, Map<String, Object> conf, String command) throws Exception { // We are pretending to be nimbus here. IStormClusterState state = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf)); for (String topologyId: args) { System.out.println(topologyId + ":"); Credentials creds = state.credentials(topologyId, null); if (creds != null) { for (String key : creds.get_creds().keySet()) { System.out.println("\t" + key); } } } }
@Override public void run(String[] args, Map<String, Object> conf, String command) throws Exception { IStormClusterState stormClusterState = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf)); stormClusterState.syncRemoteAssignments(null); stormClusterState.syncRemoteIds(null); stormClusterState.setAssignmentsBackendSynchronized(); Map<String, Assignment> infos = stormClusterState.assignmentsInfo(); if (args.length <= 0) { for (Map.Entry<String, Assignment> entry: infos.entrySet()) { System.out.println(entry.getKey() + ":"); System.out.println(prettyPrint(entry.getValue())); } } else { for (String arg : args) { System.out.println(arg + ":"); System.out.println(prettyPrint(infos.get(arg))); } } }
public static void main(String[] args) throws Exception { if (args.length < 2) { throw new IllegalArgumentException("Command and path arguments must be provided."); } String command = args[0]; String path = args[1]; Map<String, Object> conf = Utils.readStormConfig(); IStateStorage cluster = ClusterUtils.mkStateStorage(conf, conf, new ClusterStateContext()); LOG.info("Command: [{}]", command); switch (command) { case "list": handleListCommand(cluster, path); break; case "get": handleGetCommand(cluster, path); break; default: LOG.info("Usage: heartbeats [list|get] path"); } try { cluster.close(); } catch (Exception e) { LOG.info("Caught exception: {} on close.", e); } // force process to be terminated System.exit(0); }
@Override public void prepare(Map<String, Object> conf, String overrideBase, NimbusInfo nimbusInfo, ILeaderElector leaderElector) { this.conf = conf; this.nimbusInfo = nimbusInfo; zkClient = BlobStoreUtils.createZKClient(conf, DaemonType.NIMBUS); if (overrideBase == null) { overrideBase = ConfigUtils.absoluteStormBlobStoreDir(conf); } File baseDir = new File(overrideBase, BASE_BLOBS_DIR_NAME); try { fbs = new FileBlobStoreImpl(baseDir, conf); } catch (IOException e) { throw new RuntimeException(e); } _aclHandler = new BlobStoreAclHandler(conf); try { this.stormClusterState = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf)); } catch (Exception e) { e.printStackTrace(); } timer = new Timer("BLOB-STORE-TIMER", true); this.leaderElector = leaderElector; }
@Override public void run(String[] args, Map<String, Object> conf, String command) throws Exception { try (BlobStore nimbusBlobStore = ServerUtils.getNimbusBlobStore(conf, NimbusInfo.fromConf(conf), null)) { IStormClusterState stormClusterState = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf)); Set<String> blobStoreTopologyIds = nimbusBlobStore.filterAndListKeys(key -> ConfigUtils.getIdFromBlobKey(key)); Set<String> activeTopologyIds = new HashSet<>(stormClusterState.activeStorms()); Sets.SetView<String> diffTopology = Sets.difference(activeTopologyIds, blobStoreTopologyIds); LOG.info("active-topology-ids [{}] blob-topology-ids [{}] diff-topology [{}]", activeTopologyIds, blobStoreTopologyIds, diffTopology); for (String corruptId : diffTopology) { stormClusterState.removeStorm(corruptId); } } }
@Before public void init() throws Exception { storage = Mockito.mock(IStateStorage.class); context = new ClusterStateContext(); state = new StormClusterStateImpl(storage, LocalAssignmentsBackendFactory.getDefault(), context, false /*solo*/); }
try { this.stormClusterState = ClusterUtils.mkStormClusterState(workerData.getStateStorage(), new ClusterStateContext(DaemonType.WORKER, topoConf)); } catch (Exception e) { throw Utils.wrapInRuntime(e);
ClusterStateContext csContext = new ClusterStateContext(DaemonType.WORKER, topologyConf); IStateStorage stateStorage = ClusterUtils.mkStateStorage(conf, topologyConf, csContext); IStormClusterState stormClusterState = ClusterUtils.mkStormClusterState(stateStorage, null, csContext);
new ClusterStateContext(DaemonType.SUPERVISOR, conf)); } catch (Exception e) { LOG.error("supervisor can't create stormClusterState");
ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf)); WorkerTokenManager wtMan = new WorkerTokenManager(conf, state); Subject bob = testConnectWithTokenFor(wtMan, conf, server, "bob", "topo-bob");
this.stormClusterState = ClusterUtils.mkStormClusterState(conf, acls, new ClusterStateContext(DaemonType.SUPERVISOR)); } catch (Exception e) { LOG.error("supervisor can't create stormClusterState");