public static List<InstanceState> getActiveInstanceStates( Map<WorkflowInstance, RunState> activeStatesMap) { return activeStatesMap.entrySet().stream() .map(entry -> InstanceState.create(entry.getKey(), entry.getValue())) .collect(toList()); }
public static Set<WorkflowInstance> getTimedOutInstances(Map<WorkflowId, Workflow> workflows, List<InstanceState> activeStates, Instant instant, TimeoutConfig ttl) { return activeStates.parallelStream() .filter(entry -> { final Optional<Workflow> workflowOpt = Optional.ofNullable(workflows.get(entry.workflowInstance().workflowId())); return hasTimedOut(workflowOpt, entry.runState(), instant, ttl.ttlOf(entry.runState().state())); }) .map(InstanceState::workflowInstance) .collect(toSet()); }
private Map<WorkflowId, Workflow> getWorkflows(final List<InstanceState> activeStates) { final Set<WorkflowId> workflowIds = activeStates.stream() .map(activeState -> activeState.workflowInstance().workflowId()) .collect(toSet()); return storage.workflows(workflowIds); }
private void dequeueInstances( StyxConfig config, Map<String, Resource> resources, Map<WorkflowId, Set<String>> workflowResourceReferences, Map<WorkflowId, Workflow> workflows, List<InstanceState> eligibleInstances, AtomicLongMap<String> currentResourceDemand) { final ConcurrentMap<String, Boolean> resourceExhaustedCache = new ConcurrentHashMap<>(); final Map<WorkflowInstance, CompletableFuture<Void>> futures = eligibleInstances.stream() .collect(toMap( InstanceState::workflowInstance, instanceState -> CompletableFuture.runAsync(() -> dequeueInstance(config, resources, workflowResourceReferences, Optional.ofNullable(workflows.get(instanceState.workflowInstance().workflowId())), instanceState, resourceExhaustedCache, currentResourceDemand), executor))); futures.forEach((instance, future) -> { try { future.get(); } catch (InterruptedException e) { LOG.warn("Interrupted", e); Thread.currentThread().interrupt(); throw new RuntimeException(e); } catch (ExecutionException e) { LOG.error("Failed to process instance for dequeue: " + instance, e); } }); }
public static ConcurrentMap<String, Long> getResourceUsage(boolean globalConcurrencyEnabled, List<InstanceState> activeStates, Set<WorkflowInstance> timedOutInstances, WorkflowResourceDecorator resourceDecorator, Map<WorkflowId, Workflow> workflows) { return activeStates.parallelStream() .filter(entry -> !timedOutInstances.contains(entry.workflowInstance())) .filter(entry -> isConsumingResources(entry.runState().state())) .flatMap(instanceState -> pairWithResources(globalConcurrencyEnabled, instanceState, workflows, resourceDecorator)) .collect(groupingByConcurrent( ResourceWithInstance::resource, ConcurrentHashMap::new, counting())); }
private static Stream<ResourceWithInstance> pairWithResources(boolean globalConcurrencyEnabled, InstanceState instanceState, Map<WorkflowId, Workflow> workflows, WorkflowResourceDecorator resourceDecorator) { final Optional<Workflow> workflowOpt = Optional.ofNullable(workflows.get(instanceState.workflowInstance().workflowId())); final Set<String> workflowResources = workflowResources(globalConcurrencyEnabled, workflowOpt); return workflowOpt .map(workflow -> resourceDecorator.decorateResources( instanceState.runState(), workflow.configuration(), workflowResources)) .orElse(workflowResources).stream() .map(resource -> ResourceWithInstance.create(resource, instanceState)); }
private void sendDequeue(InstanceState instanceState, Set<String> resourceIds) { final WorkflowInstance workflowInstance = instanceState.workflowInstance(); final RunState state = instanceState.runState(); if (state.data().tries() == 0) { LOG.info("Executing {}", workflowInstance); } else { LOG.info("Executing {}, retry #{}", workflowInstance, state.data().tries()); } stateManager.receiveIgnoreClosed(Event.dequeue(workflowInstance, resourceIds), instanceState.runState().counter()); }
LOG.debug("Evaluating instance for dequeue: {}", instanceState.workflowInstance()); workflowResourceReferences.getOrDefault(instanceState.workflowInstance().workflowId(), emptySet()); instanceState.runState(), workflow.configuration(), workflowResourceRefs)) .orElse(workflowResourceRefs); Event.runError(instanceState.workflowInstance(), String.format("Referenced resources not found: %s", unknownResources)), instanceState.runState().counter()); return; if (!depletedResources.isEmpty()) { LOG.debug("Resource limit reached for instance, not dequeueing: {}: exhausted resources={}", instanceState.workflowInstance(), depletedResources); MessageUtil.emitResourceLimitReachedMessage(stateManager, instanceState.runState(), depletedResources); return; Optional<ExecutionBlocker> blocker = Optional.empty(); try { blocker = gate.executionBlocker(instanceState.workflowInstance()) .toCompletableFuture().get(10, TimeUnit.SECONDS); } catch (InterruptedException e) { } catch (ExecutionException | TimeoutException e) { LOG.warn("Failed to check execution blocker for {}, assuming there is no blocker", instanceState.workflowInstance(), e); stateManager.receiveIgnoreClosed(Event.retryAfter(instanceState.workflowInstance(),
.filter(entry -> !timedOutInstances.contains(entry.workflowInstance())) .filter(entry -> shouldExecute(entry.runState())) .sorted(comparingLong(i -> i.runState().timestamp())) .collect(toList());