@Override public void stopAutoIndexingProperty( String propName ) { propertyKeysToInclude.getAndUpdate( current -> { Set<String> updated = new HashSet<>(); updated.addAll( current ); updated.remove( propName ); return updated; }); }
@Override public void startAutoIndexingProperty( String propName ) { propertyKeysToInclude.getAndUpdate( current -> { Set<String> updated = new HashSet<>(); updated.addAll( current ); updated.add( propName ); return updated; }); }
private Set<List<Integer>> getOrUpdateExecutors(String topoId, StormBase base, Map<String, Object> topoConf, StormTopology topology) throws IOException, AuthorizationException, InvalidTopologyException, KeyNotFoundException { Set<List<Integer>> executors = idToExecutors.get().get(topoId); if (null == executors) { executors = new HashSet<>(computeExecutors(topoId, base, topoConf, topology)); idToExecutors.getAndUpdate(new Assoc<>(topoId, executors)); } return executors; }
/** * Closes any file related access if the upload is on * disk and releases the buffer for the file. */ void destroy() { fileAccess.getAndUpdate(channel -> { if (channel != null) { try { channel.close(); } catch (IOException e) { LOG.warn("Error closing file channel for disk file upload", e); } } return null; }); data.release(); }
private String recolorMessage(boolean transparent, String message, ChatMessageType messageType) { final Collection<ChatColor> chatColors = colorCache.get(messageType); final AtomicReference<String> resultMessage = new AtomicReference<>(message); // Replace custom formatting with actual colors chatColors.stream() .filter(chatColor -> chatColor.isTransparent() == transparent) .forEach(chatColor -> resultMessage.getAndUpdate(oldMessage -> oldMessage.replaceAll( "<col" + chatColor.getType().name() + ">", ColorUtil.colorTag(chatColor.getColor())))); return resultMessage.get(); }
@Override public void start(BundleContext context) throws Exception { BundleContext currentContext = CORE_BUNDLE.getAndUpdate(current -> current == null ? context : current); if (currentContext == null) { String greeting = "Detected OSGi Environment (core is in bundle: " + context.getBundle() + ")"; if ("false".equalsIgnoreCase(context.getProperty(OSGI_LOADING))) { SafeOsgi.disableOSGiServiceLoading(); LOGGER.info(greeting + ": OSGi Based Service Loading Disabled Via System/Framework Property - Extensions Outside This Bundle Will Not Be Detected"); LOGGER.debug("JDK Service Loading Sees:\n\t" + stream(spliterator(ClassLoading.servicesOfType(ServiceFactory.class).iterator(), Long.MAX_VALUE, 0), false) .map(sf -> sf.getServiceType().getName()).collect(joining("\n\t"))); } else { SafeOsgi.enableOSGiServiceLoading(); LOGGER.info(greeting + ": Using OSGi Based Service Loading"); } } else { throw new IllegalStateException("Multiple bundle instances running against the same core classes: existing bundle: " + currentContext.getBundle() + " new bundle: " + context.getBundle()); } }
private Map<WorkerSlot, WorkerResources> getWorkerResourcesForTopology(String topoId) { Map<WorkerSlot, WorkerResources> ret = idToWorkerResources.get().get(topoId); if (ret == null) { IStormClusterState state = stormClusterState; ret = new HashMap<>(); Assignment assignment = state.assignmentInfo(topoId, null); if (assignment != null && assignment.is_set_worker_resources()) { for (Entry<NodeInfo, WorkerResources> entry : assignment.get_worker_resources().entrySet()) { NodeInfo ni = entry.getKey(); WorkerSlot slot = new WorkerSlot(ni.get_node(), ni.get_port_iterator().next()); ret.put(slot, entry.getValue()); } idToWorkerResources.getAndUpdate(new Assoc<>(topoId, ret)); } } return ret; }
cachedNodeToPortSocket.getAndUpdate(prev -> { Map<NodeInfo, IConnection> next = new HashMap<>(prev); for (NodeInfo nodeInfo : newConnections) { cachedNodeToPortSocket.getAndUpdate(prev -> { Map<NodeInfo, IConnection> next = new HashMap<>(prev); removeConnections.forEach(next::remove);
public void resetLogLevels() { TreeMap<String, LogLevel> latestLogLevelMap = latestLogConfig.get(); LOG.debug("Resetting log levels: Latest log config is {}", latestLogLevelMap); LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); for (String loggerName : latestLogLevelMap.descendingKeySet()) { LogLevel loggerSetting = latestLogLevelMap.get(loggerName); long timeout = loggerSetting.get_reset_log_level_timeout_epoch(); String resetLogLevel = loggerSetting.get_reset_log_level(); if (timeout < Time.currentTimeMillis()) { LOG.info("{}: Resetting level to {}", loggerName, resetLogLevel); setLoggerLevel(loggerContext, loggerName, resetLogLevel); latestLogConfig.getAndUpdate(input -> { TreeMap<String, LogLevel> result = new TreeMap<>(input); result.remove(loggerName); return result; }); } } loggerContext.updateLoggers(); }
private void stateTransition(State newState, Function<CircuitBreakerState, CircuitBreakerState> newStateGenerator) { CircuitBreakerState previousState = stateReference.getAndUpdate(currentState -> { if (currentState.getState() == newState) { return currentState; } return newStateGenerator.apply(currentState); }); if (previousState.getState() != newState) { publishStateTransitionEvent(StateTransition.transitionBetween(previousState.getState(), newState)); } }
@Override public void accept(Long sample) { foo.getAndUpdate(current -> new Metrics(current.count + 1, current.total + sample)); }
fileAccess.getAndUpdate(channel -> { if (channel == null) { try {
private void startTopology(String topoName, String topoId, TopologyStatus initStatus, String owner, String principal, Map<String, Object> topoConf, StormTopology stormTopology) throws KeyNotFoundException, AuthorizationException, IOException, InvalidTopologyException { assert (TopologyStatus.ACTIVE == initStatus || TopologyStatus.INACTIVE == initStatus); IStormClusterState state = stormClusterState; Map<String, Integer> numExecutors = new HashMap<>(); StormTopology topology = StormCommon.systemTopology(topoConf, stormTopology); for (Entry<String, Object> entry : StormCommon.allComponents(topology).entrySet()) { numExecutors.put(entry.getKey(), StormCommon.numStartExecutors(entry.getValue())); } LOG.info("Activating {}: {}", topoName, topoId); StormBase base = new StormBase(); base.set_name(topoName); if (topoConf.containsKey(Config.TOPOLOGY_VERSION)) { base.set_topology_version(ObjectReader.getString(topoConf.get(Config.TOPOLOGY_VERSION))); } base.set_launch_time_secs(Time.currentTimeSecs()); base.set_status(initStatus); base.set_num_workers(ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_WORKERS), 0)); base.set_component_executors(numExecutors); base.set_owner(owner); base.set_principal(principal); base.set_component_debug(new HashMap<>()); state.activateStorm(topoId, base, topoConf); idToExecutors.getAndUpdate(new Assoc<>(topoId, new HashSet<>(computeExecutors(topoId, base, topoConf, stormTopology)))); notifyTopologyActionListener(topoName, "activate"); }
@Override public void reset() { CircuitBreakerState previousState = stateReference.getAndUpdate(currentState -> new ClosedState(this)); if (previousState.getState() != CLOSED) { publishStateTransitionEvent(StateTransition.transitionBetween(previousState.getState(), CLOSED)); } publishResetEvent(); }
@VisibleForTesting public void doCleanup() throws Exception { if (!isLeader()) { LOG.info("not a leader, skipping cleanup"); return; } IStormClusterState state = stormClusterState; Set<String> toClean; synchronized (submitLock) { toClean = topoIdsToClean(state, blobStore, this.conf); } if (toClean != null) { for (String topoId : toClean) { LOG.info("Cleaning up {}", topoId); state.teardownHeartbeats(topoId); state.teardownTopologyErrors(topoId); state.removeAllPrivateWorkerKeys(topoId); state.removeBackpressure(topoId); rmDependencyJarsInTopology(topoId); forceDeleteTopoDistDir(topoId); rmTopologyKeys(topoId); heartbeatsCache.removeTopo(topoId); idToExecutors.getAndUpdate(new Dissoc<>(topoId)); } } }
typeVariable = typeVariable.getFirstTypeVariable().orElse(Argument.OBJECT_ARGUMENT); dataReference.subject.getAndUpdate(subject -> { if (subject == null) { ReplaySubject childSubject = ReplaySubject.create(); dataReference.upload.getAndUpdate(upload -> { if (upload == null) { return new NettyStreamingFileUpload(
void doRebalance(String topoId, StormBase stormBase) throws Exception { RebalanceOptions rbo = stormBase.get_topology_action_options().get_rebalance_options(); StormBase updated = new StormBase(); updated.set_topology_action_options(null); updated.set_component_debug(Collections.emptyMap()); if (rbo.is_set_num_executors()) { updated.set_component_executors(rbo.get_num_executors()); } if (rbo.is_set_num_workers()) { updated.set_num_workers(rbo.get_num_workers()); } stormClusterState.updateStorm(topoId, updated); updateBlobStore(topoId, rbo, ServerUtils.principalNameToSubject(rbo.get_principal())); idToExecutors.getAndUpdate(new Dissoc<>(topoId)); // remove the executors cache to let it recompute. mkAssignments(topoId); }
@Override public void startAutoIndexingProperty( String propName ) { propertyKeysToInclude.getAndUpdate( current -> { Set<String> updated = new HashSet<>(); updated.addAll( current ); updated.add( propName ); return updated; }); }
@Override public void afterConnectionClosed(WebSocketSession session, CloseStatus status) { cancelAll(); if (taskScheduler != null) { this.keepAlive.getAndUpdate(task -> { if (task != null) { task.cancel(false); } return null; }); } }
private void addHost(final HostMetaData host) { this.healthyHosts.getAndUpdate(healthyHosts -> { final List<HostMetaData> hosts = healthyHosts.getHosts(); final List<HostMetaData> updatedHosts = new ArrayList<>(hosts); if (updatedHosts.add(host)) { return HealthyHosts.healthy(updatedHosts); } return healthyHosts; }); }