public InMemScheduleConext(IScheduleContext context) { this.topologies = new HashMap<String, Topology>(context.getTopologies()); this.usages = new HashMap<String, TopologyUsage>(context.getTopologyUsages()); this.policies = new HashMap<String, PolicyDefinition>(context.getPolicies()); this.datasources = new HashMap<String, Kafka2TupleMetadata>(context.getDataSourceMetadata()); this.policyAssignments = new HashMap<String, PolicyAssignment>(context.getPolicyAssignments()); this.schemas = new HashMap<String, StreamDefinition>(context.getStreamSchemas()); this.monitoredStreams = new HashMap<StreamGroup, MonitoredStream>(context.getMonitoredStreams()); this.publishments = new HashMap<String, Publishment>(context.getPublishments()); }
private List<WorkItem> findWorkingSets() { // find the unassigned definition List<WorkItem> workSets = new LinkedList<WorkItem>(); for (PolicyDefinition def : context.getPolicies().values()) { int expectParal = def.getParallelismHint(); if (expectParal == 0) { expectParal = policyDefaultParallelism; } // how to handle expand of an policy in a smooth transition manner // TODO policy fix PolicyAssignment assignment = context.getPolicyAssignments().get(def.getName()); if (assignment != null) { LOG.info("policy {} already allocated", def.getName()); continue; } WorkItem item = new WorkItem(def, expectParal); workSets.add(item); } LOG.info("work set calculation: {}", workSets); return workSets; }
public ScheduleState generate(List<WorkItem> expandworkSets) { // topologyId -> SpoutSpec Map<String, SpoutSpec> topoSpoutSpecsMap = generateSpoutMonitorMetadata(); // grp-by meta spec(sort & grp) Map<String, RouterSpec> groupSpecsMap = generateGroupbyMonitorMetadata(); // alert bolt spec Map<String, AlertBoltSpec> alertSpecsMap = generateAlertMonitorMetadata(); Map<String, PublishSpec> publishSpecsMap = generatePublishMetadata(); String uniqueVersion = generateVersion(); ScheduleState status = new ScheduleState(uniqueVersion, topoSpoutSpecsMap, groupSpecsMap, alertSpecsMap, publishSpecsMap, context.getPolicyAssignments().values(), context.getMonitoredStreams().values(), context.getPolicies().values(), context.getStreamSchemas().values()); return status; }
private void placePolicyToQueue(PolicyDefinition def, StreamWorkSlotQueue queue, Map<String, PolicyAssignment> newAssignments) { for (WorkSlot slot : queue.getWorkingSlots()) { Topology targetTopology = context.getTopologies().get(slot.getTopologyName()); TopologyUsage usage = context.getTopologyUsages().get(slot.getTopologyName()); AlertBoltUsage alertBoltUsage = usage.getAlertBoltUsage(slot.getBoltId()); placePolicy(def, alertBoltUsage, targetTopology, usage); } // queue.placePolicy(def); PolicyAssignment assignment = new PolicyAssignment(def.getName(), queue.getQueueId()); context.getPolicyAssignments().put(def.getName(), assignment); newAssignments.put(def.getName(), assignment); }
PolicyDefinition def = context.getPolicies().get(policyName); PolicyAssignment assignment = context.getPolicyAssignments().get(policyName); if (assignment == null) { LOG.error(" can not find assignment for policy {} ! ", policyName);
@Test public void test_renamed_topologies() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); Topology t = client.listTopologies().get(0); t.setName("newName"); context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); }
@Test public void test_changed_policy_parallelism() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); PolicyAssignment assignment1 = client.getVersionedSpec().getAssignments().get(0); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); StreamWorkSlotQueue queue = SchedulerTest.getQueue(context, assignment1.getQueueId()).getRight(); PolicyDefinition pd1 = client.listPolicies().get(0); pd1.setParallelismHint(4); // default queue is 5 , change to smaller, same like change bigger context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().values().iterator().hasNext()); //PolicyAssignment assignmentNew = context.getPolicyAssignments().values().iterator().next(); //StreamWorkSlotQueue queueNew = SchedulerTest.getQueue(context, assignmentNew.getQueueId()).getRight(); //Assert.assertNotNull(queueNew); // just to make sure queueNew is present //Assert.assertEquals(queue.getQueueId(), queueNew.getQueueId()); // default queue is 5 , change to bigger 6, policy assignment removed pd1.setParallelismHint(queue.getQueueSize() + 1); context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().values().iterator().hasNext()); }
@Test public void test_remove_policy() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); PolicyAssignment assignment1 = client.getVersionedSpec().getAssignments().get(0); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); StreamWorkSlotQueue queue = SchedulerTest.getQueue(context, assignment1.getQueueId()).getRight(); client.removePolicy(0); context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); WorkSlot slot = queue.getWorkingSlots().get(0); Set<String> topoPolicies = context.getTopologyUsages().get(slot.topologyName).getPolicies(); Assert.assertFalse(topoPolicies.contains(TEST_DATASOURCE_1)); Assert.assertEquals(0, topoPolicies.size()); }
@Test public void test_changed_policy_definition() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); PolicyAssignment assignment1 = client.getVersionedSpec().getAssignments().get(0); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); StreamWorkSlotQueue queue = SchedulerTest.getQueue(context, assignment1.getQueueId()).getRight(); PolicyDefinition pd1 = client.listPolicies().get(0); pd1.getDefinition().value = "define.. new..."; context = builder.buildContext(); PolicyAssignment assignmentNew = context.getPolicyAssignments().values().iterator().next(); StreamWorkSlotQueue queueNew = SchedulerTest.getQueue(context, assignmentNew.getQueueId()).getRight(); Assert.assertNotNull(queueNew); // just to make sure queueNew is present Assert.assertEquals(queue.getQueueId(), queueNew.getQueueId()); }
Assert.assertTrue(context.getPolicyAssignments().containsKey(JOIN_POLICY_1)); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); PolicyAssignment pa1 = context.getPolicyAssignments().get(JOIN_POLICY_1); PolicyAssignment pa2 = context.getPolicyAssignments().get(TEST_POLICY_1); Assert.assertNotEquals(pa1.getQueueId(), pa2.getQueueId());
Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); Assert.assertFalse(context.getPolicyAssignments().containsKey(TEST_POLICY_1));
Assert.assertEquals(0, gs.startSequence); PolicyAssignment pa1 = context.getPolicyAssignments().get(TEST_POLICY_1); PolicyAssignment pa2 = context.getPolicyAssignments().get(TEST_POLICY_2); Assert.assertNotNull(pa1); Assert.assertNotNull(pa2); PolicyAssignment pa1 = context.getPolicyAssignments().get(TEST_POLICY_1); PolicyAssignment pa2 = context.getPolicyAssignments().get(TEST_POLICY_2); PolicyAssignment pa3 = context.getPolicyAssignments().get(TEST_POLICY_3); Assert.assertNotNull(pa1); Assert.assertNotNull(pa2);
PolicyAssignment pa1 = context.getPolicyAssignments().get(TEST_POLICY_1); PolicyAssignment pa2 = context.getPolicyAssignments().get(TEST_POLICY_2); Assert.assertNotNull(pa1); Assert.assertNotNull(pa2);
Assert.assertEquals(0, gs.startSequence); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1));