private ScheduleState generateMonitorMetadata(List<WorkItem> expandworkSets, Map<String, PolicyAssignment> newAssignments) { MonitorMetadataGenerator generator = new MonitorMetadataGenerator(context); return generator.generate(expandworkSets); }
public ValidateState validate() { validateTopology(); validateDataSources(); validateStreams(); validatePolicies(); validatePublishments(); return state; }
public static IPolicyScheduler createScheduler() { return new GreedyPolicyScheduler(); } }
/** * Add policy after add policy */ @Test public void test_schedule_add2() { TestTopologyMgmtService mgmtService = createMgmtService(); IScheduleContext context = createScheduleContext(mgmtService); GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); ps.init(context, mgmtService); ScheduleOption option = new ScheduleOption(); ps.schedule(option); ScheduleState status = ps.getState(); context = ps.getContext(); // context updated! assertFirstPolicyScheduled(context, status); createSamplePolicy((InMemScheduleConext) context, TEST_POLICY_2, STREAM1, PARALELLISM); ps.init(context, mgmtService); // reinit ps.schedule(option); status = ps.getState(); context = ps.getContext(); // context updated! // now assert two policy on the same queue assertSecondPolicyCreated(context, status); // add one policy on different stream of the same topic createSamplePolicy((InMemScheduleConext) context, TEST_POLICY_3, STREAM2, PARALELLISM); ps.init(context, mgmtService); // re-init ps.schedule(option); status = ps.getState(); context = ps.getContext(); // context updated! assertThridPolicyScheduled(context, status); }
private StreamWorkSlotQueue findWorkSlotQueue(MonitoredStream targetdStream, PolicyDefinition def) { StreamWorkSlotQueue targetQueue = null; for (StreamWorkSlotQueue queue : targetdStream.getQueues()) { if (isQueueAvailable(queue, def)) { targetQueue = queue; break; } } if (targetQueue == null) { WorkQueueBuilder builder = new WorkQueueBuilder(context, mgmtService); // TODO : get the properties from policy definiton targetQueue = builder.createQueue(targetdStream, def.isDedicated(), getQueueSize(def.getParallelismHint()), new HashMap<String, Object>()); } return targetQueue; }
@Test public void testDataSources() throws Exception { InMemScheduleConext context = loadContext("/multi/"); TestTopologyMgmtService mgmtService = new TestTopologyMgmtService(4, 10); GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); ps.init(context, mgmtService); ScheduleState state = ps.schedule(new ScheduleOption()); Assert.assertNotNull(state); Assert.assertEquals(2, state.getAssignments().size()); Assert.assertEquals(1, state.getAlertSpecs().size()); Assert.assertEquals(10, state.getAlertSpecs().get("alertUnitTopology_1").getBoltPolicyIdsMap().size()); }
public ScheduleState generate(List<WorkItem> expandworkSets) { // topologyId -> SpoutSpec Map<String, SpoutSpec> topoSpoutSpecsMap = generateSpoutMonitorMetadata(); // grp-by meta spec(sort & grp) Map<String, RouterSpec> groupSpecsMap = generateGroupbyMonitorMetadata(); // alert bolt spec Map<String, AlertBoltSpec> alertSpecsMap = generateAlertMonitorMetadata(); Map<String, PublishSpec> publishSpecsMap = generatePublishMetadata(); String uniqueVersion = generateVersion(); ScheduleState status = new ScheduleState(uniqueVersion, topoSpoutSpecsMap, groupSpecsMap, alertSpecsMap, publishSpecsMap, context.getPolicyAssignments().values(), context.getMonitoredStreams().values(), context.getPolicies().values(), context.getStreamSchemas().values()); return status; }
public StreamWorkSlotQueue createQueue(MonitoredStream stream, boolean isDedicated, int size, Map<String, Object> properties) { // FIXME: make extensible and configurable IWorkSlotStrategy strategy = new SameTopologySlotStrategy(context, stream.getStreamGroup(), mgmtService); List<WorkSlot> slots = strategy.reserveWorkSlots(size, isDedicated, properties); if (slots.size() < size) { LOG.error("allocate stream work queue failed, required size"); return null; } StreamWorkSlotQueue queue = new StreamWorkSlotQueue(stream.getStreamGroup(), isDedicated, properties, slots); calculateGroupIndexAndCount(queue); assignQueueSlots(stream, queue);// build reverse reference stream.addQueues(queue); return queue; }
public ValidateState validate() { return new MetadataValdiator(client).validate(); }
List<WorkItem> workSets = findWorkingSets(); Map<String, PolicyAssignment> newAssignments = new HashMap<String, PolicyAssignment>(); for (WorkItem item : workSets) { ScheduleResult r = schedulePolicy(item, newAssignments); results.add(r); state = generateMonitorMetadata(workSets, newAssignments); if (LOG.isDebugEnabled()) { LOG.debug("calculated schedule state: {}", JsonUtils.writeValueAsString(state));
if (item.def.getPartitionSpec().isEmpty()) { LOG.error(" policy {} partition spec is empty! ", policyName); ScheduleResult result = new ScheduleResult(); result.policyName = policyName; result.code = 400; ScheduleResult result = new ScheduleResult(); result.policyName = policyName; StreamWorkSlotQueue queue = findWorkSlotQueue(targetdStream, item.def); if (queue == null) { result.code = 400; result.message = String.format("unable to allocate work queue resource for policy %s !", policyName); } else { placePolicyToQueue(item.def, queue, newAssignments); result.code = 200; result.message = "OK";
private void placePolicy(PolicyDefinition def, AlertBoltUsage alertBoltUsage, Topology targetTopology, TopologyUsage usage) { String policyName = def.getName(); // topology usage update alertBoltUsage.addPolicies(def); // update alert policy usage.getPolicies().add(policyName); // update source topics updateDataSource(usage, def); // update group-by updateGrouping(usage, def); }
@Test public void validate() throws Exception { InMemScheduleConext context = new InMemScheduleConext(); MetadataValdiator mv = new MetadataValdiator(context); // om.readValue(TestMetadataValidator.class.getResourceAsStream("/validation/datasources.json"), new Gene); // TODO add more test here. } }
private void updateDataSource(TopologyUsage usage, PolicyDefinition def) { List<String> datasources = findDatasource(def); usage.getDataSources().addAll(datasources); }
private List<WorkItem> findWorkingSets() { // find the unassigned definition List<WorkItem> workSets = new LinkedList<WorkItem>(); for (PolicyDefinition def : context.getPolicies().values()) { int expectParal = def.getParallelismHint(); if (expectParal == 0) { expectParal = policyDefaultParallelism; } // how to handle expand of an policy in a smooth transition manner // TODO policy fix PolicyAssignment assignment = context.getPolicyAssignments().get(def.getName()); if (assignment != null) { LOG.info("policy {} already allocated", def.getName()); continue; } WorkItem item = new WorkItem(def, expectParal); workSets.add(item); } LOG.info("work set calculation: {}", workSets); return workSets; }
private boolean isQueueAvailable(StreamWorkSlotQueue queue, PolicyDefinition def) { if (queue.getQueueSize() < def.getParallelismHint()) { return false; } for (WorkSlot slot : queue.getWorkingSlots()) { TopologyUsage u = context.getTopologyUsages().get(slot.getTopologyName()); AlertBoltUsage usage = u.getAlertBoltUsage(slot.getBoltId()); if (!isBoltAvailable(usage, def)) { return false; } } return true; }
private void placePolicyToQueue(PolicyDefinition def, StreamWorkSlotQueue queue, Map<String, PolicyAssignment> newAssignments) { for (WorkSlot slot : queue.getWorkingSlots()) { Topology targetTopology = context.getTopologies().get(slot.getTopologyName()); TopologyUsage usage = context.getTopologyUsages().get(slot.getTopologyName()); AlertBoltUsage alertBoltUsage = usage.getAlertBoltUsage(slot.getBoltId()); placePolicy(def, alertBoltUsage, targetTopology, usage); } // queue.placePolicy(def); PolicyAssignment assignment = new PolicyAssignment(def.getName(), queue.getQueueId()); context.getPolicyAssignments().put(def.getName(), assignment); newAssignments.put(def.getName(), assignment); }
@Test public void test01_simple() throws Exception { GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); TestTopologyMgmtService mgmtService = createMgmtService(); IScheduleContext context = createScheduleContext(mgmtService); ps.init(context, mgmtService); ps.schedule(new ScheduleOption()); ScheduleState status = ps.getState(); context = ps.getContext(); // context updated! Map<String, SpoutSpec> spec = status.getSpoutSpecs(); LOG.info(mapper.writeValueAsString(spec)); Assert.assertEquals(2, spec.size()); Assert.assertTrue(spec.containsKey(TOPO1)); assertFirstPolicyScheduled(context, status); }
@Test public void testIrregularPolicyParallelismHint() { Config config = ConfigFactory.load(); int defaultParallelism = config.getInt("coordinator.policyDefaultParallelism"); TestTopologyMgmtService mgmtService = new TestTopologyMgmtService(5, 12); InMemScheduleConext context = createScheduleContext(mgmtService); // recreate test poicy context.getPolicies().clear(); // make the hint bigger than bolt number int irregularParallelism = defaultParallelism + 2; createSamplePolicy(context, "irregularPolicy", STREAM1, irregularParallelism); GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); ps.init(context, mgmtService); ScheduleState scheduled = ps.schedule(new ScheduleOption()); Assert.assertEquals(2, scheduled.getSpoutSpecs().size()); Assert.assertEquals(2, scheduled.getGroupSpecs().size()); Assert.assertEquals(2, scheduled.getAlertSpecs().size()); // assertion RouterSpec spec = scheduled.getGroupSpecs().get(TOPO1); Assert.assertTrue(spec.getRouterSpecs().size() > 0); // must be allocated for (StreamRouterSpec routerSpec : spec.getRouterSpecs()) { Assert.assertEquals(1, routerSpec.getTargetQueue().size()); // irregularParallelism is prompted to 2 * defaultParallelism = 10 Assert.assertEquals(10, routerSpec.getTargetQueue().get(0).getWorkers().size()); } }
TestTopologyMgmtService mgmtService = new TestTopologyMgmtService(6, 10); GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); createSamplePolicy(context, TEST_POLICY_4, STREAM1, PARALELLISM); ps.init(context, mgmtService); ScheduleOption option = new ScheduleOption(); option.setPoliciesPerBolt(1); ps.schedule(option); ScheduleState state = ps.getState(); createSamplePolicy(context, TEST_POLICY_5, STREAM1, PARALELLISM); ps.init(context, mgmtService); ps.schedule(option); state = ps.getState();