public MetricsWorkflowExecutorListener(MetricRegistry metricRegistry, ExecutorDao executors) { this.metricRegistry = metricRegistry; this.nflowExecutorGroup = executors.getExecutorGroup(); this.nflowExecutorId = executors.getExecutorId(); }
public synchronized int getExecutorId() { if (executorId == -1) { executorId = allocateExecutorId(); } return executorId; }
@Transactional public void recoverWorkflowInstance(final int instanceId, final WorkflowInstanceAction action) { int executorId = executorInfo.getExecutorId(); int updated = jdbc.update("update nflow_workflow set executor_id = null, status = " + sqlVariants.workflowStatus(inProgress) + " where id = ? and executor_id in (select id from nflow_executor where " + executorInfo.getExecutorGroupCondition() + " and id <> " + executorId + " and expires < current_timestamp)", instanceId); if (updated > 0) { insertWorkflowInstanceAction(action); } }
public void updateActiveTimestamp() { updateWithPreparedStatement("update nflow_executor set active=current_timestamp, expires=" + sqlVariants.currentTimePlusSeconds(timeoutSeconds) + " where id = " + getExecutorId()); }
public void markShutdown() { try { jdbc.update("update nflow_executor set expires=current_timestamp where executor_group = ? and id = ?", executorGroup, getExecutorId()); } catch (DataAccessException e) { logger.warn("Failed to mark executor as expired", e); } } }
private void updateNextWorkflowInstancesWithMultipleUpdates(List<OptimisticLockKey> instances, List<Integer> ids) { boolean raceConditionDetected = false; for (OptimisticLockKey instance : instances) { int updated = jdbc.update(updateInstanceForExecutionQuery() + " where id = ? and modified = ? and executor_id is null", instance.id, instance.modified); if (updated == 1) { ids.add(instance.id); } else { raceConditionDetected = true; } } if (raceConditionDetected && ids.isEmpty()) { throw new PollingRaceConditionException("Race condition in polling workflow instances detected. " + "Multiple pollers using same name (" + executorInfo.getExecutorGroup() + ")"); } }
while (!shutdownRequested) { try { executor.waitUntilQueueSizeLowerThanThreshold(executorRecovery.getMaxWaitUntil()); executorRecovery.tick(); int potentiallyStuckProcessors = stateProcessorFactory.getPotentiallyStuckProcessors(); if (potentiallyStuckProcessors > 0) { executorRecovery.markShutdown(); logger.info("Shutdown finished."); shutdownDone.countDown();
conditions.add(executorInfo.getExecutorGroupCondition()); if (!isEmpty(query.ids)) { conditions.add("w.id in (:ids)"); params.addValue("executor_group", executorInfo.getExecutorGroup()); sql += " where " + collectionToDelimitedString(conditions, " and ") + " order by w.created desc"; sql = sqlVariants.limit(sql, ":limit");
public void tick() { if (nextUpdate.isAfterNow()) { return; } nextUpdate = now().plusSeconds(keepaliveIntervalSeconds); updateActiveTimestamp(); recoverWorkflowInstancesFromDeadNodes(); }
@Transactional public boolean wakeUpWorkflowExternally(int workflowInstanceId) { String sql = "update nflow_workflow set next_activation = (case when executor_id is null then " + "least(current_timestamp, coalesce(next_activation, current_timestamp)) else next_activation end), " + "external_next_activation = current_timestamp where " + executorInfo.getExecutorGroupCondition() + " and id = ? and next_activation is not null"; return jdbc.update(sql, workflowInstanceId) == 1; }
/** * Return all workflow executors of this executor group. * @return The workflow executors. */ public List<WorkflowExecutor> getWorkflowExecutors() { return executorDao.getExecutors(); } }
@Inject public WorkflowDispatcher(WorkflowInstanceExecutor executor, WorkflowInstanceDao workflowInstances, WorkflowStateProcessorFactory stateProcessorFactory, ExecutorDao executorRecovery, Environment env) { this.executor = executor; this.workflowInstances = workflowInstances; this.stateProcessorFactory = stateProcessorFactory; this.executorRecovery = executorRecovery; this.sleepTime = env.getRequiredProperty("nflow.dispatcher.sleep.ms", Long.class); this.stuckThreadThresholdSeconds = env.getRequiredProperty("nflow.executor.stuckThreadThreshold.seconds", Integer.class); if (!executorRecovery.isTransactionSupportEnabled()) { throw new BeanCreationException("Transaction support must be enabled"); } }
@Inject public void setEnvironment(Environment env) { this.executorGroup = trimToNull(env.getRequiredProperty("nflow.executor.group")); this.executorGroupCondition = createWhereCondition(executorGroup); timeoutSeconds = env.getRequiredProperty("nflow.executor.timeout.seconds", Integer.class); keepaliveIntervalSeconds = env.getRequiredProperty("nflow.executor.keepalive.seconds", Integer.class); }
String updateInstanceForExecutionQuery() { return "update nflow_workflow set executor_id = " + executorInfo.getExecutorId() + ", status = " + sqlVariants.workflowStatus(executing) + ", " + "external_next_activation = null"; }
private void updateNextWorkflowInstancesWithBatchUpdate(List<OptimisticLockKey> instances, List<Integer> ids) { List<Object[]> batchArgs = new ArrayList<>(instances.size()); for (OptimisticLockKey instance : instances) { batchArgs.add(new Object[] { instance.id, instance.modified }); ids.add(instance.id); } int[] updateStatuses = jdbc .batchUpdate(updateInstanceForExecutionQuery() + " where id = ? and modified = ? and executor_id is null", batchArgs); Iterator<Integer> idIt = ids.iterator(); for (int status : updateStatuses) { idIt.next(); if (status == 0) { idIt.remove(); if (ids.isEmpty()) { throw new PollingRaceConditionException("Race condition in polling workflow instances detected. " + "Multiple pollers using same name (" + executorInfo.getExecutorGroup() + ")"); } continue; } if (status != 1 && status != Statement.SUCCESS_NO_INFO) { throw new PollingRaceConditionException("Race condition in polling workflow instances detected. " + "Multiple pollers using same name (" + executorInfo.getExecutorGroup() + ")"); } } } });
private QueueStatistics queuedStatistics() { String sql = "select count(1) items, min(next_activation) oldest, max(next_activation) newest, current_timestamp dbtime " + "from nflow_workflow " + "where next_activation < current_timestamp and executor_id is null and " + executorInfo.getExecutorGroupCondition(); return jdbc.query(sql, new StatisticsExtractor(false)); }
public void storeWorkflowDefinition(AbstractWorkflowDefinition<? extends WorkflowState> definition) { StoredWorkflowDefinition storedDefinition = convert(definition); MapSqlParameterSource params = new MapSqlParameterSource(); params.addValue("type", definition.getType()); String serializedDefinition = serializeDefinition(storedDefinition); params.addValue("definition_sha1", sha1(serializedDefinition)); params.addValue("definition", serializedDefinition, sqlVariants.longTextType()); params.addValue("modified_by", executorInfo.getExecutorId()); params.addValue("executor_group", executorInfo.getExecutorGroup()); String sql = "update nflow_workflow_definition " + "set definition = :definition, modified_by = :modified_by, definition_sha1 = :definition_sha1 " + "where type = :type and executor_group = :executor_group and definition_sha1 <> :definition_sha1"; int updatedRows = namedJdbc.update(sql, params); if (updatedRows == 0) { sql = "insert into nflow_workflow_definition(type, definition_sha1, definition, modified_by, executor_group) " + "values (:type, :definition_sha1, :definition, :modified_by, :executor_group)"; try { namedJdbc.update(sql, params); } catch (DataIntegrityViolationException dex) { logger.debug("Another executor already stored the definition.", dex); } } }
private String updateWorkflowInstanceSql() { return "update nflow_workflow set status = " + sqlVariants.workflowStatus() + ", state = ?, state_text = ?, " + "next_activation = " + sqlVariants.nextActivationUpdate() + ", external_next_activation = null, executor_id = ?, retries = ? where id = ? and executor_id = " + executorInfo.getExecutorId(); }
private int insertWorkflowInstanceWithCte(WorkflowInstance instance) { try { StringBuilder sqlb = new StringBuilder(256); sqlb.append("with wf as (" + insertWorkflowInstanceSql() + " returning id)"); Object[] instanceValues = new Object[] { instance.type, instance.rootWorkflowId, instance.parentWorkflowId, instance.parentActionId, instance.businessKey, instance.externalId, executorInfo.getExecutorGroup(), instance.status.name(), instance.state, abbreviate(instance.stateText, instanceStateTextLength), toTimestamp(instance.nextActivation) }; int pos = instanceValues.length; Object[] args = Arrays.copyOf(instanceValues, pos + instance.stateVariables.size() * 2); for (Entry<String, String> var : instance.stateVariables.entrySet()) { sqlb.append(", ins").append(pos).append(" as (").append(insertWorkflowInstanceStateSql()) .append(" select wf.id,0,?,? from wf)"); args[pos++] = var.getKey(); args[pos++] = var.getValue(); } sqlb.append(" select wf.id from wf"); return jdbc.queryForObject(sqlb.toString(), Integer.class, args); } catch (DuplicateKeyException e) { logger.warn("Failed to insert workflow instance", e); return -1; } }
private QueueStatistics executionStatistics() { String sql = "select count(1) items, current_timestamp oldest, current_timestamp newest, current_timestamp dbtime " + "from nflow_workflow " + "where executor_id is not null and " + executorInfo.getExecutorGroupCondition(); return jdbc.query(sql, new StatisticsExtractor(true)); }