private void validatePublishments() { Collection<PolicyDefinition> definitions = context.getPolicies().values(); for (Publishment p : context.getPublishments().values()) { //TODO: check type; check serializer types; check dedup fields existence; check extend deduplicator... Set<String> unknown = p.getPolicyIds().stream().filter(pid -> definitions.stream().anyMatch(pd -> pd.getName().equals(pid))).collect(Collectors.toSet()); if (unknown.size() > 0) { state.appendPublishemtnValidation(p.getName(), String.format("publishment %s reference unknown/uneabled policy %s!", p.getName(), unknown)); } } }
public InMemScheduleConext(IScheduleContext context) { this.topologies = new HashMap<String, Topology>(context.getTopologies()); this.usages = new HashMap<String, TopologyUsage>(context.getTopologyUsages()); this.policies = new HashMap<String, PolicyDefinition>(context.getPolicies()); this.datasources = new HashMap<String, Kafka2TupleMetadata>(context.getDataSourceMetadata()); this.policyAssignments = new HashMap<String, PolicyAssignment>(context.getPolicyAssignments()); this.schemas = new HashMap<String, StreamDefinition>(context.getStreamSchemas()); this.monitoredStreams = new HashMap<StreamGroup, MonitoredStream>(context.getMonitoredStreams()); this.publishments = new HashMap<String, Publishment>(context.getPublishments()); }
private List<WorkItem> findWorkingSets() { // find the unassigned definition List<WorkItem> workSets = new LinkedList<WorkItem>(); for (PolicyDefinition def : context.getPolicies().values()) { int expectParal = def.getParallelismHint(); if (expectParal == 0) { expectParal = policyDefaultParallelism; } // how to handle expand of an policy in a smooth transition manner // TODO policy fix PolicyAssignment assignment = context.getPolicyAssignments().get(def.getName()); if (assignment != null) { LOG.info("policy {} already allocated", def.getName()); continue; } WorkItem item = new WorkItem(def, expectParal); workSets.add(item); } LOG.info("work set calculation: {}", workSets); return workSets; }
private void validateStreams() { Collection<Kafka2TupleMetadata> datasources = context.getDataSourceMetadata().values(); Collection<PolicyDefinition> definitions = context.getPolicies().values(); for (StreamDefinition sd : context.getStreamSchemas().values()) { if (!datasources.stream().anyMatch(d -> d.getName().equals(sd.getDataSource()))) { state.appendStreamValidation(sd.getStreamId(), String.format("stream %s reference unknown data source %s !", sd.getStreamId(), sd.getDataSource())); } if (!definitions.stream().anyMatch(p -> p.getInputStreams().contains(sd.getStreamId()))) { state.appendUnusedStreams(sd.getStreamId()); } // more on columns if (sd.getColumns() == null || sd.getColumns().size() == 0) { state.appendStreamValidation(sd.getStreamId(), String.format("stream %s have empty columns!", sd.getStreamId())); } } }
PolicyDefinition definition = context.getPolicies().get(p); if (definition == null) { continue;
private void validatePolicies() { Collection<Publishment> pubs = context.getPublishments().values(); for (PolicyDefinition pd : context.getPolicies().values()) { if (!pubs.stream().anyMatch(p -> p.getPolicyIds().contains(pd.getName()))) { state.appendUnPublishedPolicies(pd.getName());
public ScheduleState generate(List<WorkItem> expandworkSets) { // topologyId -> SpoutSpec Map<String, SpoutSpec> topoSpoutSpecsMap = generateSpoutMonitorMetadata(); // grp-by meta spec(sort & grp) Map<String, RouterSpec> groupSpecsMap = generateGroupbyMonitorMetadata(); // alert bolt spec Map<String, AlertBoltSpec> alertSpecsMap = generateAlertMonitorMetadata(); Map<String, PublishSpec> publishSpecsMap = generatePublishMetadata(); String uniqueVersion = generateVersion(); ScheduleState status = new ScheduleState(uniqueVersion, topoSpoutSpecsMap, groupSpecsMap, alertSpecsMap, publishSpecsMap, context.getPolicyAssignments().values(), context.getMonitoredStreams().values(), context.getPolicies().values(), context.getStreamSchemas().values()); return status; }
PolicyDefinition definition = context.getPolicies().get(policyName); alertSpec.addBoltPolicy(boltUsage.getBoltId(), definition.getName());
PolicyDefinition def = context.getPolicies().get(policyName);
for (Entry<String, PolicyDefinition> entry : context.getPolicies().entrySet()) { if (entry.getKey().endsWith("_nodata_alert")) { policyDefinition = entry.getValue(); Assert.assertEquals(3, context.getPolicies().size());