Map<String, StreamWorkSlotQueue> queueMap = new HashMap<String, StreamWorkSlotQueue>(); preBuildQueue2TopoMap(topo2MonitorStream, topo2Policies, bolt2Policies, bolt2Partition, bolt2QueueIds, queueMap); u.getAlertUsages().put(alertBolt, alertUsage); addBoltUsageInfo(bolt2Policies, bolt2Partition, bolt2QueueIds, uniqueBoltId, alertUsage, queueMap); buildTopologyDataSource(u);
topologies = listToMap(client.listTopologies()); kafkaSources = listToMap(client.listDataSources()); policies = listToMap(enabledPolicies); publishments = listToMap(client.listPublishment()); streamDefinitions = listToMap(client.listStreams()); assignments = listToMap(state == null ? new ArrayList<PolicyAssignment>() : detectAssignmentsChange(state.getAssignments(), state)); monitoredStreamMap = listToMap(state == null ? new ArrayList<MonitoredStream>() : detectMonitoredStreams(state.getMonitoredStreams())); usages = buildTopologyUsage();
private void buildTopologyDataSource(TopologyUsage u) { for (String policyName : u.getPolicies()) { PolicyDefinition def = policies.get(policyName); if (def != null) { u.getDataSources().addAll(findDatasource(def)); } else { LOG.error(" policy not find {}, but reference in topology usage {} !", policyName, u.getTopoName()); } } }
ScheduleState state = null; Stopwatch watch = Stopwatch.createStarted(); IScheduleContext context = new ScheduleContextBuilder(config, client).buildContext(); TopologyMgmtService mgmtService = new TopologyMgmtService(); IPolicyScheduler scheduler = PolicySchedulerFactory.createScheduler();
public static <T, K> Map<K, T> listToMap(List<T> collections) { Map<K, T> maps = new HashMap<K, T>(collections.size()); for (T t : collections) { maps.put(getKey(t), t); } return maps; }
clearMonitoredStreams(monitoredStreams);
@Override public void run() { if (Coordinator.isPeriodicallyForceBuildEnable()) { LOG.info("CoordinatorTrigger started ... "); Stopwatch watch = Stopwatch.createStarted(); ZKConfig zkConfig = ZKConfigBuilder.getZKConfig(config); try (ExclusiveExecutor executor = new ExclusiveExecutor(zkConfig)) { executor.execute(Coordinator.GREEDY_SCHEDULER_ZK_PATH, () -> { // schedule IScheduleContext context = new ScheduleContextBuilder(config, client).buildContext(); TopologyMgmtService mgmtService = new TopologyMgmtService(); IPolicyScheduler scheduler = PolicySchedulerFactory.createScheduler(); scheduler.init(context, mgmtService); ScheduleState state = scheduler.schedule(new ScheduleOption()); // use try catch to use AutoCloseable interface to close producer automatically try (ConfigBusProducer producer = new ConfigBusProducer(ZKConfigBuilder.getZKConfig(config))) { Coordinator.postSchedule(client, state, producer); } watch.stop(); LOG.info("CoordinatorTrigger ended, used time {} sm.", watch.elapsed(TimeUnit.MILLISECONDS)); }); } catch (Exception e) { LOG.error("trigger schedule failed!", e); } } else { LOG.info("CoordinatorTrigger found isPeriodicallyForceBuildEnable = false, skipped build"); } }
@Test public void test_renamed_topologies() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); Topology t = client.listTopologies().get(0); t.setName("newName"); context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); }
@Test public void test_changed_policy_parallelism() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); PolicyAssignment assignment1 = client.getVersionedSpec().getAssignments().get(0); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); StreamWorkSlotQueue queue = SchedulerTest.getQueue(context, assignment1.getQueueId()).getRight(); PolicyDefinition pd1 = client.listPolicies().get(0); pd1.setParallelismHint(4); // default queue is 5 , change to smaller, same like change bigger context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().values().iterator().hasNext()); //PolicyAssignment assignmentNew = context.getPolicyAssignments().values().iterator().next(); //StreamWorkSlotQueue queueNew = SchedulerTest.getQueue(context, assignmentNew.getQueueId()).getRight(); //Assert.assertNotNull(queueNew); // just to make sure queueNew is present //Assert.assertEquals(queue.getQueueId(), queueNew.getQueueId()); // default queue is 5 , change to bigger 6, policy assignment removed pd1.setParallelismHint(queue.getQueueSize() + 1); context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().values().iterator().hasNext()); }
@Test public void test_remove_policy() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); PolicyAssignment assignment1 = client.getVersionedSpec().getAssignments().get(0); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); StreamWorkSlotQueue queue = SchedulerTest.getQueue(context, assignment1.getQueueId()).getRight(); client.removePolicy(0); context = builder.buildContext(); Assert.assertFalse(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); WorkSlot slot = queue.getWorkingSlots().get(0); Set<String> topoPolicies = context.getTopologyUsages().get(slot.topologyName).getPolicies(); Assert.assertFalse(topoPolicies.contains(TEST_DATASOURCE_1)); Assert.assertEquals(0, topoPolicies.size()); }
@Test public void test_changed_policy_definition() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); PolicyAssignment assignment1 = client.getVersionedSpec().getAssignments().get(0); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); StreamWorkSlotQueue queue = SchedulerTest.getQueue(context, assignment1.getQueueId()).getRight(); PolicyDefinition pd1 = client.listPolicies().get(0); pd1.getDefinition().value = "define.. new..."; context = builder.buildContext(); PolicyAssignment assignmentNew = context.getPolicyAssignments().values().iterator().next(); StreamWorkSlotQueue queueNew = SchedulerTest.getQueue(context, assignmentNew.getQueueId()).getRight(); Assert.assertNotNull(queueNew); // just to make sure queueNew is present Assert.assertEquals(queue.getQueueId(), queueNew.getQueueId()); }
@Test public void test() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); IScheduleContext context = builder.buildContext(); // assert topology usage Map<String, TopologyUsage> usages = context.getTopologyUsages(); Assert.assertEquals(1, usages.get(TOPO1).getMonitoredStream().size()); Assert.assertTrue(usages.get(TOPO1).getPolicies().contains(TEST_POLICY_1)); String alertBolt0 = TOPO1 + "-alert-" + "0"; String alertBolt1 = TOPO1 + "-alert-" + "1"; String alertBolt2 = TOPO1 + "-alert-" + "2"; for (AlertBoltUsage u : usages.get(TOPO1).getAlertUsages().values()) { if (u.getBoltId().equals(alertBolt0) || u.getBoltId().equals(alertBolt1) || u.getBoltId().equals(alertBolt2)) { Assert.assertEquals(1, u.getPolicies().size()); Assert.assertTrue(u.getPolicies().contains(TEST_POLICY_1)); Assert.assertEquals(1, u.getPartitions().size()); Assert.assertEquals(1, u.getReferQueues().size()); } } }
InMemMetadataServiceClient client = getSampleMetadataServiceWithNodataAlert(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); IScheduleContext context = builder.buildContext();
@Test public void test_changed_policy_partition() { InMemMetadataServiceClient client = getSampleMetadataService(); ScheduleContextBuilder builder = new ScheduleContextBuilder(config, client); PolicyAssignment assignment1 = client.getVersionedSpec().getAssignments().get(0); IScheduleContext context = builder.buildContext(); Assert.assertTrue(context.getPolicyAssignments().containsKey(TEST_POLICY_1)); pd1.getPartitionSpec().add(par); context = builder.buildContext();