public static IPolicyScheduler createScheduler() { return new GreedyPolicyScheduler(); } }
private void updateDataSource(TopologyUsage usage, PolicyDefinition def) { List<String> datasources = findDatasource(def); usage.getDataSources().addAll(datasources); }
List<WorkItem> workSets = findWorkingSets(); Map<String, PolicyAssignment> newAssignments = new HashMap<String, PolicyAssignment>(); for (WorkItem item : workSets) { ScheduleResult r = schedulePolicy(item, newAssignments); results.add(r); state = generateMonitorMetadata(workSets, newAssignments); if (LOG.isDebugEnabled()) { LOG.debug("calculated schedule state: {}", JsonUtils.writeValueAsString(state));
private StreamWorkSlotQueue findWorkSlotQueue(MonitoredStream targetdStream, PolicyDefinition def) { StreamWorkSlotQueue targetQueue = null; for (StreamWorkSlotQueue queue : targetdStream.getQueues()) { if (isQueueAvailable(queue, def)) { targetQueue = queue; break; } } if (targetQueue == null) { WorkQueueBuilder builder = new WorkQueueBuilder(context, mgmtService); // TODO : get the properties from policy definiton targetQueue = builder.createQueue(targetdStream, def.isDedicated(), getQueueSize(def.getParallelismHint()), new HashMap<String, Object>()); } return targetQueue; }
/** * Add policy after add policy */ @Test public void test_schedule_add2() { TestTopologyMgmtService mgmtService = createMgmtService(); IScheduleContext context = createScheduleContext(mgmtService); GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); ps.init(context, mgmtService); ScheduleOption option = new ScheduleOption(); ps.schedule(option); ScheduleState status = ps.getState(); context = ps.getContext(); // context updated! assertFirstPolicyScheduled(context, status); createSamplePolicy((InMemScheduleConext) context, TEST_POLICY_2, STREAM1, PARALELLISM); ps.init(context, mgmtService); // reinit ps.schedule(option); status = ps.getState(); context = ps.getContext(); // context updated! // now assert two policy on the same queue assertSecondPolicyCreated(context, status); // add one policy on different stream of the same topic createSamplePolicy((InMemScheduleConext) context, TEST_POLICY_3, STREAM2, PARALELLISM); ps.init(context, mgmtService); // re-init ps.schedule(option); status = ps.getState(); context = ps.getContext(); // context updated! assertThridPolicyScheduled(context, status); }
@Test public void testDataSources() throws Exception { InMemScheduleConext context = loadContext("/multi/"); TestTopologyMgmtService mgmtService = new TestTopologyMgmtService(4, 10); GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); ps.init(context, mgmtService); ScheduleState state = ps.schedule(new ScheduleOption()); Assert.assertNotNull(state); Assert.assertEquals(2, state.getAssignments().size()); Assert.assertEquals(1, state.getAlertSpecs().size()); Assert.assertEquals(10, state.getAlertSpecs().get("alertUnitTopology_1").getBoltPolicyIdsMap().size()); }
result.policyName = policyName; StreamWorkSlotQueue queue = findWorkSlotQueue(targetdStream, item.def); if (queue == null) { result.code = 400; result.message = String.format("unable to allocate work queue resource for policy %s !", policyName); } else { placePolicyToQueue(item.def, queue, newAssignments); result.code = 200; result.message = "OK";
private boolean isQueueAvailable(StreamWorkSlotQueue queue, PolicyDefinition def) { if (queue.getQueueSize() < def.getParallelismHint()) { return false; } for (WorkSlot slot : queue.getWorkingSlots()) { TopologyUsage u = context.getTopologyUsages().get(slot.getTopologyName()); AlertBoltUsage usage = u.getAlertBoltUsage(slot.getBoltId()); if (!isBoltAvailable(usage, def)) { return false; } } return true; }
@Test public void test01_simple() throws Exception { GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); TestTopologyMgmtService mgmtService = createMgmtService(); IScheduleContext context = createScheduleContext(mgmtService); ps.init(context, mgmtService); ps.schedule(new ScheduleOption()); ScheduleState status = ps.getState(); context = ps.getContext(); // context updated! Map<String, SpoutSpec> spec = status.getSpoutSpecs(); LOG.info(mapper.writeValueAsString(spec)); Assert.assertEquals(2, spec.size()); Assert.assertTrue(spec.containsKey(TOPO1)); assertFirstPolicyScheduled(context, status); }
@Test public void testIrregularPolicyParallelismHint() { Config config = ConfigFactory.load(); int defaultParallelism = config.getInt("coordinator.policyDefaultParallelism"); TestTopologyMgmtService mgmtService = new TestTopologyMgmtService(5, 12); InMemScheduleConext context = createScheduleContext(mgmtService); // recreate test poicy context.getPolicies().clear(); // make the hint bigger than bolt number int irregularParallelism = defaultParallelism + 2; createSamplePolicy(context, "irregularPolicy", STREAM1, irregularParallelism); GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); ps.init(context, mgmtService); ScheduleState scheduled = ps.schedule(new ScheduleOption()); Assert.assertEquals(2, scheduled.getSpoutSpecs().size()); Assert.assertEquals(2, scheduled.getGroupSpecs().size()); Assert.assertEquals(2, scheduled.getAlertSpecs().size()); // assertion RouterSpec spec = scheduled.getGroupSpecs().get(TOPO1); Assert.assertTrue(spec.getRouterSpecs().size() > 0); // must be allocated for (StreamRouterSpec routerSpec : spec.getRouterSpecs()) { Assert.assertEquals(1, routerSpec.getTargetQueue().size()); // irregularParallelism is prompted to 2 * defaultParallelism = 10 Assert.assertEquals(10, routerSpec.getTargetQueue().get(0).getWorkers().size()); } }
TestTopologyMgmtService mgmtService = new TestTopologyMgmtService(6, 10); GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); createSamplePolicy(context, TEST_POLICY_4, STREAM1, PARALELLISM); ps.init(context, mgmtService); ScheduleOption option = new ScheduleOption(); option.setPoliciesPerBolt(1); ps.schedule(option); ScheduleState state = ps.getState(); createSamplePolicy(context, TEST_POLICY_5, STREAM1, PARALELLISM); ps.init(context, mgmtService); ps.schedule(option); state = ps.getState();
TestTopologyMgmtService mgmtService = new TestTopologyMgmtService(); IScheduleContext context = createScheduleContext(mgmtService); GreedyPolicyScheduler ps = new GreedyPolicyScheduler(); ps.init(context, mgmtService); ScheduleOption option = new ScheduleOption(); ps.schedule(option); ScheduleState state = ps.getState(); context = ps.getContext(); // context updated!