public void init(IScheduleContext context, TopologyMgmtService mgmtService) { this.context = new InMemScheduleConext(context); this.mgmtService = mgmtService; }
public static <T, K> Map<K, T> listToMap(List<T> collections) { Map<K, T> maps = new HashMap<K, T>(collections.size()); for (T t : collections) { maps.put(getKey(t), t); } return maps; }
public MetadataValdiator(IMetadataServiceClient client) { List<Topology> topologies = client.listTopologies(); List<Kafka2TupleMetadata> datasources = client.listDataSources(); List<StreamDefinition> streams = client.listStreams(); // filter out disabled policies List<PolicyDefinition> enabledPolicies = client.listPolicies(); List<Publishment> publishments = client.listPublishment(); context = new InMemScheduleConext(ScheduleContextBuilder.listToMap(topologies), new HashMap<>(), ScheduleContextBuilder.listToMap(datasources), ScheduleContextBuilder.listToMap(enabledPolicies), ScheduleContextBuilder.listToMap(publishments), ScheduleContextBuilder.listToMap(streams), new HashMap<>(), new HashMap<>()); this.state = new ValidateState(); }
topologies = listToMap(client.listTopologies()); kafkaSources = listToMap(client.listDataSources()); policies = listToMap(enabledPolicies); publishments = listToMap(client.listPublishment()); streamDefinitions = listToMap(client.listStreams()); new NodataMetadataGenerator().execute(config, streamDefinitions, kafkaSources, policies, publishments); assignments = listToMap(state == null ? new ArrayList<PolicyAssignment>() : detectAssignmentsChange(state.getAssignments(), state)); monitoredStreamMap = listToMap(state == null ? new ArrayList<MonitoredStream>() : detectMonitoredStreams(state.getMonitoredStreams())); usages = buildTopologyUsage(); builtContext = new InMemScheduleConext(topologies, assignments, kafkaSources, policies, publishments, streamDefinitions, monitoredStreamMap, usages); return builtContext;
@Test public void test_renamed_topologies() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); Topology t = client.listTopologies().get(0); t.setName("newName"); context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); }
private InMemScheduleConext loadContext(String base) throws Exception { InMemScheduleConext context = new InMemScheduleConext(); context.addDataSource(k); context.addPoilcy(p); context.addPublishment(pub); context.addSchema(def); context.addTopology(t); u.getAlertUsages().put(anid, new AlertBoltUsage(anid)); context.addTopologyUsages(u);
private InMemScheduleConext createScheduleContext(TestTopologyMgmtService mgmtService) { InMemScheduleConext context = new InMemScheduleConext(); context.addTopology(pair1.getLeft()); context.addTopologyUsages(pair1.getRight()); context.addTopology(pair2.getLeft()); context.addTopologyUsages(pair2.getRight()); ds.setTopic(TEST_TOPIC); ds.setCodec(new Tuple2StreamMetadata()); context.addDataSource(ds); schema.setValidate(false); schema.setDataSource(DS_NAME); context.addSchema(schema); schema.setValidate(false); schema.setDataSource(DS_NAME); context.addSchema(schema);
Map<String, StreamWorkSlotQueue> queueMap = new HashMap<String, StreamWorkSlotQueue>(); preBuildQueue2TopoMap(topo2MonitorStream, topo2Policies, bolt2Policies, bolt2Partition, bolt2QueueIds, queueMap); u.getAlertUsages().put(alertBolt, alertUsage); addBoltUsageInfo(bolt2Policies, bolt2Partition, bolt2QueueIds, uniqueBoltId, alertUsage, queueMap); buildTopologyDataSource(u);
@Before public void setup() { generator = new NodataMetadataGenerator(); }
@Test public void testNormal() throws Exception { StreamDefinition sd = createStreamDefinitionWithNodataAlert(); Map<String, StreamDefinition> streamDefinitionsMap = new HashMap<String, StreamDefinition>(); streamDefinitionsMap.put(sd.getStreamId(), sd); Map<String, Kafka2TupleMetadata> kafkaSources = new HashMap<String, Kafka2TupleMetadata>(); Map<String, PolicyDefinition> policies = new HashMap<String, PolicyDefinition>(); Map<String, Publishment> publishments = new HashMap<String, Publishment>(); generator.execute(config, streamDefinitionsMap, kafkaSources, policies, publishments); Assert.assertEquals(2, kafkaSources.size()); kafkaSources.forEach((key, value) -> { LOG.info("KafkaSources > {}: {}", key, ToStringBuilder.reflectionToString(value)); }); Assert.assertEquals(2, policies.size()); policies.forEach((key, value) -> { LOG.info("Policies > {}: {}", key, ToStringBuilder.reflectionToString(value)); }); Assert.assertEquals(4, publishments.size()); publishments.forEach((key, value) -> { LOG.info("Publishments > {}: {}", key, ToStringBuilder.reflectionToString(value)); }); }
private void createSamplePolicy(InMemScheduleConext context, String policyName, String stream, int hint) { PolicyDefinition pd = new PolicyDefinition(); pd.setParallelismHint(hint); Definition def = new Definition(); pd.setDefinition(def); pd.setName(policyName); pd.setInputStreams(Arrays.asList(stream)); pd.setOutputStreams(Arrays.asList("outputStream2")); StreamPartition par = new StreamPartition(); par.setColumns(Arrays.asList("col1")); par.setType(StreamPartition.Type.GLOBAL); par.setStreamId(stream); pd.setPartitionSpec(Arrays.asList(par)); context.addPoilcy(pd); }
private void buildTopologyDataSource(TopologyUsage u) { for (String policyName : u.getPolicies()) { PolicyDefinition def = policies.get(policyName); if (def != null) { u.getDataSources().addAll(findDatasource(def)); } else { LOG.error(" policy not find {}, but reference in topology usage {} !", policyName, u.getTopoName()); } } }
if (!bolt2Policies.containsKey(getUniqueBoltId(slot))) { bolt2Policies.put(getUniqueBoltId(slot), new HashSet<String>()); bolt2Policies.get(getUniqueBoltId(slot)).addAll(policiesOnQ); if (!bolt2QueueIds.containsKey(getUniqueBoltId(slot))) { bolt2QueueIds.put(getUniqueBoltId(slot), new HashSet<String>()); bolt2QueueIds.get(getUniqueBoltId(slot)).add(q.getQueueId()); if (!bolt2Partition.containsKey(getUniqueBoltId(slot))) { bolt2Partition.put(getUniqueBoltId(slot), new HashSet<StreamGroup>()); bolt2Partition.get(getUniqueBoltId(slot)).add(stream.getStreamGroup());
@Override public void run() { if (Coordinator.isPeriodicallyForceBuildEnable()) { LOG.info("CoordinatorTrigger started ... "); Stopwatch watch = Stopwatch.createStarted(); ZKConfig zkConfig = ZKConfigBuilder.getZKConfig(config); try (ExclusiveExecutor executor = new ExclusiveExecutor(zkConfig)) { executor.execute(Coordinator.GREEDY_SCHEDULER_ZK_PATH, () -> { // schedule IScheduleContext context = new ScheduleContextBuilder(config, client).buildContext(); TopologyMgmtService mgmtService = new TopologyMgmtService(); IPolicyScheduler scheduler = PolicySchedulerFactory.createScheduler(); scheduler.init(context, mgmtService); ScheduleState state = scheduler.schedule(new ScheduleOption()); // use try catch to use AutoCloseable interface to close producer automatically try (ConfigBusProducer producer = new ConfigBusProducer(ZKConfigBuilder.getZKConfig(config))) { Coordinator.postSchedule(client, state, producer); } watch.stop(); LOG.info("CoordinatorTrigger ended, used time {} sm.", watch.elapsed(TimeUnit.MILLISECONDS)); }); } catch (Exception e) { LOG.error("trigger schedule failed!", e); } } else { LOG.info("CoordinatorTrigger found isPeriodicallyForceBuildEnable = false, skipped build"); } }
@Test public void validate() throws Exception { InMemScheduleConext context = new InMemScheduleConext(); MetadataValdiator mv = new MetadataValdiator(context); // om.readValue(TestMetadataValidator.class.getResourceAsStream("/validation/datasources.json"), new Gene); // TODO add more test here. } }
private void createJoinPolicy(InMemScheduleConext context, String policyName, List<String> asList) { PolicyDefinition pd = new PolicyDefinition(); pd.setParallelismHint(5); Definition def = new Definition(); pd.setDefinition(def); pd.setName(policyName); pd.setInputStreams(asList); pd.setOutputStreams(Arrays.asList("outputStream2")); for (String streamId : pd.getInputStreams()) { StreamPartition par = new StreamPartition(); par.setColumns(Arrays.asList("col1")); par.setType(StreamPartition.Type.GROUPBY); par.setStreamId(streamId); pd.addPartition(par); } context.addPoilcy(pd); }
@Test public void test() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); IScheduleContext context = builder.buildContext(); // assert topology usage Map<String, TopologyUsage> usages = context.getTopologyUsages(); Assert.assertEquals(1, usages.get(TOPO1).getMonitoredStream().size()); Assert.assertTrue(usages.get(TOPO1).getPolicies().contains(TEST_POLICY_1)); String alertBolt0 = TOPO1 + "-alert-" + "0"; String alertBolt1 = TOPO1 + "-alert-" + "1"; String alertBolt2 = TOPO1 + "-alert-" + "2"; for (AlertBoltUsage u : usages.get(TOPO1).getAlertUsages().values()) { if (u.getBoltId().equals(alertBolt0) || u.getBoltId().equals(alertBolt1) || u.getBoltId().equals(alertBolt2)) { Assert.assertEquals(1, u.getPolicies().size()); Assert.assertTrue(u.getPolicies().contains(TEST_POLICY_1)); Assert.assertEquals(1, u.getPartitions().size()); Assert.assertEquals(1, u.getReferQueues().size()); } } }
@Test public void test_remove_policy() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); PolicyAssignment assignment1 = client.getVersionedSpec().getAssignments().get(0); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); StreamWorkSlotQueue queue = SchedulerTest.getQueue(context, assignment1.getQueueId()).getRight(); client.removePolicy(0); context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); WorkSlot slot = queue.getWorkingSlots().get(0); Set<String> topoPolicies = context.getTopologyUsages().get(slot.topologyName).getPolicies(); Assert.assertFalse(topoPolicies.contains(TEST_DATASOURCE_1)); Assert.assertEquals(0, topoPolicies.size()); }
@Test public void test_changed_policy_definition() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); PolicyAssignment assignment1 = client.getVersionedSpec().getAssignments().get(0); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); StreamWorkSlotQueue queue = SchedulerTest.getQueue(context, assignment1.getQueueId()).getRight(); PolicyDefinition pd1 = client.listPolicies().get(0); pd1.getDefinition().value = "define.. new..."; context = builder.buildContext(); PolicyAssignment assignmentNew = context.getPolicyAssignments().values().iterator().next(); StreamWorkSlotQueue queueNew = SchedulerTest.getQueue(context, assignmentNew.getQueueId()).getRight(); Assert.assertNotNull(queueNew); // just to make sure queueNew is present Assert.assertEquals(queue.getQueueId(), queueNew.getQueueId()); }
InMemMetadataServiceClient client = getSampleMetadataServiceWithNodataAlert(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); IScheduleContext context = builder.buildContext();