public void init(IScheduleContext context, TopologyMgmtService mgmtService) { this.context = new InMemScheduleConext(context); this.mgmtService = mgmtService; }
private InMemScheduleConext loadContext(String base) throws Exception { InMemScheduleConext context = new InMemScheduleConext(); context.addDataSource(k); context.addPoilcy(p); context.addPublishment(pub); context.addSchema(def); context.addTopology(t); u.getAlertUsages().put(anid, new AlertBoltUsage(anid)); context.addTopologyUsages(u);
private InMemScheduleConext createScheduleContext(TestTopologyMgmtService mgmtService) { InMemScheduleConext context = new InMemScheduleConext(); context.addTopology(pair1.getLeft()); context.addTopologyUsages(pair1.getRight()); context.addTopology(pair2.getLeft()); context.addTopologyUsages(pair2.getRight()); ds.setTopic(TEST_TOPIC); ds.setCodec(new Tuple2StreamMetadata()); context.addDataSource(ds); schema.setValidate(false); schema.setDataSource(DS_NAME); context.addSchema(schema); schema.setValidate(false); schema.setDataSource(DS_NAME); context.addSchema(schema);
@Test public void test() { InMemScheduleConext context = new InMemScheduleConext(); List<WorkSlot> slots = strategy.reserveWorkSlots(5, false, new HashMap<String, Object>()); Assert.assertEquals(0, slots.size()); Assert.assertEquals(1, context.getTopologies().size()); Assert.assertEquals(5, slots.size()); LOG.info(slots.get(0).getTopologyName()); Assert.assertEquals(2, context.getTopologies().size()); Assert.assertEquals(2, context.getTopologyUsages().size()); Assert.assertEquals(slots.get(0).getTopologyName(), ws.getTopologyName()); Iterator<TopologyUsage> it = context.getTopologyUsages().values().iterator(); TopologyUsage usage = it.next(); for (AlertBoltUsage u : usage.getAlertUsages().values()) {
System.setProperty("config.resource", "/application-multiplestreams2.conf"); InMemScheduleConext context = new InMemScheduleConext(); print(context.getTopologyUsages().values()); TopologyUsage usage = context.getTopologyUsages().values().iterator().next(); print(context.getTopologyUsages().values()); print(context.getTopologyUsages().values()); print(context.getTopologyUsages().values());
private void createSamplePolicy(InMemScheduleConext context, String policyName, String stream, int hint) { PolicyDefinition pd = new PolicyDefinition(); pd.setParallelismHint(hint); Definition def = new Definition(); pd.setDefinition(def); pd.setName(policyName); pd.setInputStreams(Arrays.asList(stream)); pd.setOutputStreams(Arrays.asList("outputStream2")); StreamPartition par = new StreamPartition(); par.setColumns(Arrays.asList("col1")); par.setType(StreamPartition.Type.GLOBAL); par.setStreamId(stream); pd.setPartitionSpec(Arrays.asList(par)); context.addPoilcy(pd); }
@Test public void testIrregularPolicyParallelismHint() { Config config = ConfigFactory.load(); int defaultParallelism = config.getInt("coordinator.policyDefaultParallelism"); TestTopologyMgmtService mgmtService = new TestTopologyMgmtService(5, 12); InMemScheduleConext context = createScheduleContext(mgmtService); // recreate test poicy context.getPolicies().clear(); // make the hint bigger than bolt number int irregularParallelism = defaultParallelism + 2; createSamplePolicy(context, "irregularPolicy", STREAM1, irregularParallelism); GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); ps.init(context, mgmtService); ScheduleState scheduled = ps.schedule(new ScheduleOption()); Assert.assertEquals(2, scheduled.getSpoutSpecs().size()); Assert.assertEquals(2, scheduled.getGroupSpecs().size()); Assert.assertEquals(2, scheduled.getAlertSpecs().size()); // assertion RouterSpec spec = scheduled.getGroupSpecs().get(TOPO1); Assert.assertTrue(spec.getRouterSpecs().size() > 0); // must be allocated for (StreamRouterSpec routerSpec : spec.getRouterSpecs()) { Assert.assertEquals(1, routerSpec.getTargetQueue().size()); // irregularParallelism is prompted to 2 * defaultParallelism = 10 Assert.assertEquals(10, routerSpec.getTargetQueue().get(0).getWorkers().size()); } }
@SuppressWarnings("unused") @Test public void test2_overlap() { InMemScheduleConext context = new InMemScheduleConext(); topo1 = queue.getWorkingSlots().get(0).getTopologyName(); bolt1 = queue.getWorkingSlots().get(0).getBoltId(); Assert.assertEquals(1, context.getTopologies().size()); Assert.assertEquals(1, context.getTopologyUsages().size()); LOG.info(queue.getWorkingSlots().get(0).getTopologyName()); for (WorkSlot ws : queue.getWorkingSlots()) { TopologyUsage usage = context.getTopologyUsages().values().iterator().next(); for (AlertBoltUsage u : usage.getAlertUsages().values()) { Assert.assertTrue(u.getPartitions().size() > 0); Assert.assertEquals(2, context.getTopologies().size()); Assert.assertEquals(2, context.getTopologyUsages().size());
System.setProperty("config.resource", "/application-multiplestreams.conf"); InMemScheduleConext context = new InMemScheduleConext(); print(context.getTopologyUsages().values()); TopologyUsage usage = context.getTopologyUsages().values().iterator().next(); print(context.getTopologyUsages().values()); print(context.getTopologyUsages().values()); print(context.getTopologyUsages().values());
private void createJoinPolicy(InMemScheduleConext context, String policyName, List<String> asList) { PolicyDefinition pd = new PolicyDefinition(); pd.setParallelismHint(5); Definition def = new Definition(); pd.setDefinition(def); pd.setName(policyName); pd.setInputStreams(asList); pd.setOutputStreams(Arrays.asList("outputStream2")); for (String streamId : pd.getInputStreams()) { StreamPartition par = new StreamPartition(); par.setColumns(Arrays.asList("col1")); par.setType(StreamPartition.Type.GROUPBY); par.setStreamId(streamId); pd.addPartition(par); } context.addPoilcy(pd); }
public MetadataValdiator(IMetadataServiceClient client) { List<Topology> topologies = client.listTopologies(); List<Kafka2TupleMetadata> datasources = client.listDataSources(); List<StreamDefinition> streams = client.listStreams(); // filter out disabled policies List<PolicyDefinition> enabledPolicies = client.listPolicies(); List<Publishment> publishments = client.listPublishment(); context = new InMemScheduleConext(ScheduleContextBuilder.listToMap(topologies), new HashMap<>(), ScheduleContextBuilder.listToMap(datasources), ScheduleContextBuilder.listToMap(enabledPolicies), ScheduleContextBuilder.listToMap(publishments), ScheduleContextBuilder.listToMap(streams), new HashMap<>(), new HashMap<>()); this.state = new ValidateState(); }
@Test public void validate() throws Exception { InMemScheduleConext context = new InMemScheduleConext(); MetadataValdiator mv = new MetadataValdiator(context); // om.readValue(TestMetadataValidator.class.getResourceAsStream("/validation/datasources.json"), new Gene); // TODO add more test here. } }
builtContext = new InMemScheduleConext(topologies, assignments, kafkaSources, policies, publishments, streamDefinitions, monitoredStreamMap, usages); return builtContext;