public InMemScheduleConext(IScheduleContext context) { this.topologies = new HashMap<String, Topology>(context.getTopologies()); this.usages = new HashMap<String, TopologyUsage>(context.getTopologyUsages()); this.policies = new HashMap<String, PolicyDefinition>(context.getPolicies()); this.datasources = new HashMap<String, Kafka2TupleMetadata>(context.getDataSourceMetadata()); this.policyAssignments = new HashMap<String, PolicyAssignment>(context.getPolicyAssignments()); this.schemas = new HashMap<String, StreamDefinition>(context.getStreamSchemas()); this.monitoredStreams = new HashMap<StreamGroup, MonitoredStream>(context.getMonitoredStreams()); this.publishments = new HashMap<String, Publishment>(context.getPublishments()); }
private void validatePublishments() { Collection<PolicyDefinition> definitions = context.getPolicies().values(); for (Publishment p : context.getPublishments().values()) { //TODO: check type; check serializer types; check dedup fields existence; check extend deduplicator... Set<String> unknown = p.getPolicyIds().stream().filter(pid -> definitions.stream().anyMatch(pd -> pd.getName().equals(pid))).collect(Collectors.toSet()); if (unknown.size() > 0) { state.appendPublishemtnValidation(p.getName(), String.format("publishment %s reference unknown/uneabled policy %s!", p.getName(), unknown)); } } }
public ScheduleState generate(List<WorkItem> expandworkSets) { // topologyId -> SpoutSpec Map<String, SpoutSpec> topoSpoutSpecsMap = generateSpoutMonitorMetadata(); // grp-by meta spec(sort & grp) Map<String, RouterSpec> groupSpecsMap = generateGroupbyMonitorMetadata(); // alert bolt spec Map<String, AlertBoltSpec> alertSpecsMap = generateAlertMonitorMetadata(); Map<String, PublishSpec> publishSpecsMap = generatePublishMetadata(); String uniqueVersion = generateVersion(); ScheduleState status = new ScheduleState(uniqueVersion, topoSpoutSpecsMap, groupSpecsMap, alertSpecsMap, publishSpecsMap, context.getPolicyAssignments().values(), context.getMonitoredStreams().values(), context.getPolicies().values(), context.getStreamSchemas().values()); return status; }
Map<String, StreamDefinition> streamSchemaMap = context.getStreamSchemas(); Map<String, Kafka2TupleMetadata> datasourcesMap = context.getDataSourceMetadata(); for (TopologyUsage usage : context.getTopologyUsages().values()) { Topology topo = context.getTopologies().get(usage.getTopoName()); PolicyDefinition def = context.getPolicies().get(policyName); PolicyAssignment assignment = context.getPolicyAssignments().get(policyName); if (assignment == null) { LOG.error(" can not find assignment for policy {} ! ", policyName);
for (Publishment pub : context.getPublishments().values()) { for (String policyId : pub.getPolicyIds()) { List<Publishment> policyPubs = policyToPub.get(policyId); for (TopologyUsage u : context.getTopologyUsages().values()) { PublishSpec pubSpec = pubSpecs.get(u.getTopoName()); if (pubSpec == null) { pubSpec = new PublishSpec(u.getTopoName(), context.getTopologies().get(u.getTopoName()).getPubBoltId()); pubSpecs.put(u.getTopoName(), pubSpec); PolicyDefinition definition = context.getPolicies().get(p); if (definition == null) { continue;
private Map<String, AlertBoltSpec> generateAlertMonitorMetadata() { Map<String, AlertBoltSpec> alertSpecs = new HashMap<String, AlertBoltSpec>(); for (TopologyUsage u : context.getTopologyUsages().values()) { AlertBoltSpec alertSpec = alertSpecs.get(u.getTopoName()); if (alertSpec == null) { PolicyDefinition definition = context.getPolicies().get(policyName); alertSpec.addBoltPolicy(boltUsage.getBoltId(), definition.getName()); for (Publishment publish : context.getPublishments().values()) { if (!publish.getPolicyIds().contains(definition.getName())) { continue;
for (Entry<String, PolicyDefinition> entry : context.getPolicies().entrySet()) { if (entry.getKey().endsWith("_nodata_alert")) { policyDefinition = entry.getValue(); Assert.assertEquals(3, context.getPolicies().size()); for (Entry<String, Kafka2TupleMetadata> entry : context.getDataSourceMetadata().entrySet()) { if ("nodata_alert_aggregation_ds".equals(entry.getKey())) { datasource = entry.getValue(); for (Entry<String, Publishment> entry : context.getPublishments().entrySet()) { if (publishmentName.equals(entry.getKey())) { publishment = entry.getValue();
Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); Assert.assertFalse(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); Assert.assertEquals(0, context.getMonitoredStreams().size()); TopologyUsage topologyUsage = context.getTopologyUsages().get(slot.topologyName); Set<String> topoPolicies = topologyUsage.getPolicies(); Assert.assertFalse(topoPolicies.contains(TEST_DATASOURCE_1));
private void placePolicyToQueue(PolicyDefinition def, StreamWorkSlotQueue queue, Map<String, PolicyAssignment> newAssignments) { for (WorkSlot slot : queue.getWorkingSlots()) { Topology targetTopology = context.getTopologies().get(slot.getTopologyName()); TopologyUsage usage = context.getTopologyUsages().get(slot.getTopologyName()); AlertBoltUsage alertBoltUsage = usage.getAlertBoltUsage(slot.getBoltId()); placePolicy(def, alertBoltUsage, targetTopology, usage); } // queue.placePolicy(def); PolicyAssignment assignment = new PolicyAssignment(def.getName(), queue.getQueueId()); context.getPolicyAssignments().put(def.getName(), assignment); newAssignments.put(def.getName(), assignment); }
private void validateStreams() { Collection<Kafka2TupleMetadata> datasources = context.getDataSourceMetadata().values(); Collection<PolicyDefinition> definitions = context.getPolicies().values(); for (StreamDefinition sd : context.getStreamSchemas().values()) { if (!datasources.stream().anyMatch(d -> d.getName().equals(sd.getDataSource()))) { state.appendStreamValidation(sd.getStreamId(), String.format("stream %s reference unknown data source %s !", sd.getStreamId(), sd.getDataSource())); } if (!definitions.stream().anyMatch(p -> p.getInputStreams().contains(sd.getStreamId()))) { state.appendUnusedStreams(sd.getStreamId()); } // more on columns if (sd.getColumns() == null || sd.getColumns().size() == 0) { state.appendStreamValidation(sd.getStreamId(), String.format("stream %s have empty columns!", sd.getStreamId())); } } }
private void validatePolicies() { Collection<Publishment> pubs = context.getPublishments().values(); for (PolicyDefinition pd : context.getPolicies().values()) { if (!pubs.stream().anyMatch(p -> p.getPolicyIds().contains(pd.getName()))) { state.appendUnPublishedPolicies(pd.getName()); StringBuilder builder = new StringBuilder(); for (String inputStream : pd.getInputStreams()) { if (context.getStreamSchemas().get(inputStream) == null) { state.appendPublishemtnValidation(pd.getName(), String.format("policy %s contains unknown stream %s!", pd.getName(), inputStream)); isStreamMiss = true; break; builder.append(buildStreamDefinition(context.getStreamSchemas().get(inputStream))); builder.append("\n");
@Test public void test_renamed_topologies() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); Topology t = client.listTopologies().get(0); t.setName("newName"); context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); }
private List<WorkItem> findWorkingSets() { // find the unassigned definition List<WorkItem> workSets = new LinkedList<WorkItem>(); for (PolicyDefinition def : context.getPolicies().values()) { int expectParal = def.getParallelismHint(); if (expectParal == 0) { expectParal = policyDefaultParallelism; } // how to handle expand of an policy in a smooth transition manner // TODO policy fix PolicyAssignment assignment = context.getPolicyAssignments().get(def.getName()); if (assignment != null) { LOG.info("policy {} already allocated", def.getName()); continue; } WorkItem item = new WorkItem(def, expectParal); workSets.add(item); } LOG.info("work set calculation: {}", workSets); return workSets; }
private boolean getQueueOnTopology(int size, List<WorkSlot> slots, Topology t) { TopologyUsage u = context.getTopologyUsages().get(t.getName()); if (!isTopologyAvailable(u)) { return false; } List<String> bolts = new ArrayList<String>(); for (AlertBoltUsage alertUsage : u.getAlertUsages().values()) { if (isBoltAvailable(alertUsage)) { bolts.add(alertUsage.getBoltId()); } if (bolts.size() == size) { break; } } if (bolts.size() == size) { for (String boltId : bolts) { WorkSlot slot = new WorkSlot(t.getName(), boltId); slots.add(slot); } return true; } return false; }
@Test public void test_remove_policy() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); PolicyAssignment assignment1 = client.getVersionedSpec().getAssignments().get(0); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); StreamWorkSlotQueue queue = SchedulerTest.getQueue(context, assignment1.getQueueId()).getRight(); client.removePolicy(0); context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); WorkSlot slot = queue.getWorkingSlots().get(0); Set<String> topoPolicies = context.getTopologyUsages().get(slot.topologyName).getPolicies(); Assert.assertFalse(topoPolicies.contains(TEST_DATASOURCE_1)); Assert.assertEquals(0, topoPolicies.size()); }
Iterator<Topology> it = context.getTopologies().values().stream().filter((t) -> t.getNumOfAlertBolt() >= size) .iterator(); context.getTopologies().put(topoMeta.topologyId, topoMeta.topology); context.getTopologyUsages().put(topoMeta.topologyId, topoMeta.usage); boolean placed = getQueueOnTopology(size, slots, topoMeta.topology); if (!placed) {
/** * Work queue not a root level object, thus we need to build a map from * MonitoredStream for later quick lookup. */ private Map<String, StreamWorkSlotQueue> buildQueueMap() { Map<String, StreamWorkSlotQueue> queueMap = new HashMap<String, StreamWorkSlotQueue>(); for (MonitoredStream ms : context.getMonitoredStreams().values()) { for (StreamWorkSlotQueue queue : ms.getQueues()) { queueMap.put(queue.getQueueId(), queue); } } return queueMap; }
private void validateDataSources() { Collection<StreamDefinition> sds = context.getStreamSchemas().values(); for (Kafka2TupleMetadata ds : context.getDataSourceMetadata().values()) {
private void validateTopology() { for (Topology t : context.getTopologies().values()) { } }
private List<String> findDatasource(PolicyDefinition def) { List<String> result = new ArrayList<String>(); List<String> inputStreams = def.getInputStreams(); Map<String, StreamDefinition> schemaMaps = context.getStreamSchemas(); for (String is : inputStreams) { StreamDefinition ss = schemaMaps.get(is); result.add(ss.getDataSource()); } return result; }