public MetadataValdiator(IScheduleContext context) { this.context = context; this.state = new ValidateState(); }
public void execute(String path, final Runnable r) throws TimeoutException { execute(path, r, ACQUIRE_LOCK_MAX_RETRIES_TIMES * ACQUIRE_LOCK_WAIT_INTERVAL_MS); }
@Override public void contextInitialized(ServletContextEvent sce) { LOG.info("start coordinator background tasks.."); Coordinator.startSchedule(); }
@Override public void run() { if (Coordinator.isPeriodicallyForceBuildEnable()) { LOG.info("CoordinatorTrigger started ... "); Stopwatch watch = Stopwatch.createStarted(); ZKConfig zkConfig = ZKConfigBuilder.getZKConfig(config); try (ExclusiveExecutor executor = new ExclusiveExecutor(zkConfig)) { executor.execute(Coordinator.GREEDY_SCHEDULER_ZK_PATH, () -> { // schedule IScheduleContext context = new ScheduleContextBuilder(config, client).buildContext(); TopologyMgmtService mgmtService = new TopologyMgmtService(); IPolicyScheduler scheduler = PolicySchedulerFactory.createScheduler(); scheduler.init(context, mgmtService); ScheduleState state = scheduler.schedule(new ScheduleOption()); // use try catch to use AutoCloseable interface to close producer automatically try (ConfigBusProducer producer = new ConfigBusProducer(ZKConfigBuilder.getZKConfig(config))) { Coordinator.postSchedule(client, state, producer); } watch.stop(); LOG.info("CoordinatorTrigger ended, used time {} sm.", watch.elapsed(TimeUnit.MILLISECONDS)); }); } catch (Exception e) { LOG.error("trigger schedule failed!", e); } } else { LOG.info("CoordinatorTrigger found isPeriodicallyForceBuildEnable = false, skipped build"); } }
public synchronized ScheduleState schedule(ScheduleOption option) throws TimeoutException { ExclusiveExecutor executor = new ExclusiveExecutor(zkConfig); AtomicReference<ScheduleState> reference = new AtomicReference<>(); try { executor.execute(GREEDY_SCHEDULER_ZK_PATH, () -> { ScheduleState state = null; Stopwatch watch = Stopwatch.createStarted(); IScheduleContext context = new ScheduleContextBuilder(config, client).buildContext(); TopologyMgmtService mgmtService = new TopologyMgmtService(); IPolicyScheduler scheduler = PolicySchedulerFactory.createScheduler(); scheduler.init(context, mgmtService); state = scheduler.schedule(option); postSchedule(client, state, producer); } finally { try { executor.close(); } catch (IOException e) { LOG.error("Exception when close exclusive executor, log and ignore!", e);
public InMemScheduleConext(IScheduleContext context) { this.topologies = new HashMap<String, Topology>(context.getTopologies()); this.usages = new HashMap<String, TopologyUsage>(context.getTopologyUsages()); this.policies = new HashMap<String, PolicyDefinition>(context.getPolicies()); this.datasources = new HashMap<String, Kafka2TupleMetadata>(context.getDataSourceMetadata()); this.policyAssignments = new HashMap<String, PolicyAssignment>(context.getPolicyAssignments()); this.schemas = new HashMap<String, StreamDefinition>(context.getStreamSchemas()); this.monitoredStreams = new HashMap<StreamGroup, MonitoredStream>(context.getMonitoredStreams()); this.publishments = new HashMap<String, Publishment>(context.getPublishments()); }
private void validateStreams() { Collection<Kafka2TupleMetadata> datasources = context.getDataSourceMetadata().values(); Collection<PolicyDefinition> definitions = context.getPolicies().values(); for (StreamDefinition sd : context.getStreamSchemas().values()) { if (!datasources.stream().anyMatch(d -> d.getName().equals(sd.getDataSource()))) { state.appendStreamValidation(sd.getStreamId(), String.format("stream %s reference unknown data source %s !", sd.getStreamId(), sd.getDataSource())); } if (!definitions.stream().anyMatch(p -> p.getInputStreams().contains(sd.getStreamId()))) { state.appendUnusedStreams(sd.getStreamId()); } // more on columns if (sd.getColumns() == null || sd.getColumns().size() == 0) { state.appendStreamValidation(sd.getStreamId(), String.format("stream %s have empty columns!", sd.getStreamId())); } } }
private void validatePublishments() { Collection<PolicyDefinition> definitions = context.getPolicies().values(); for (Publishment p : context.getPublishments().values()) { //TODO: check type; check serializer types; check dedup fields existence; check extend deduplicator... Set<String> unknown = p.getPolicyIds().stream().filter(pid -> definitions.stream().anyMatch(pd -> pd.getName().equals(pid))).collect(Collectors.toSet()); if (unknown.size() > 0) { state.appendPublishemtnValidation(p.getName(), String.format("publishment %s reference unknown/uneabled policy %s!", p.getName(), unknown)); } } }
@POST @Path("/build") public String build() throws Exception { ScheduleOption option = new ScheduleOption(); ScheduleState state = alertCoordinator.schedule(option); return JsonUtils.writeValueAsString(state); }
public MetadataValdiator(IMetadataServiceClient client) { List<Topology> topologies = client.listTopologies(); List<Kafka2TupleMetadata> datasources = client.listDataSources(); List<StreamDefinition> streams = client.listStreams(); // filter out disabled policies List<PolicyDefinition> enabledPolicies = client.listPolicies(); List<Publishment> publishments = client.listPublishment(); context = new InMemScheduleConext(ScheduleContextBuilder.listToMap(topologies), new HashMap<>(), ScheduleContextBuilder.listToMap(datasources), ScheduleContextBuilder.listToMap(enabledPolicies), ScheduleContextBuilder.listToMap(publishments), ScheduleContextBuilder.listToMap(streams), new HashMap<>(), new HashMap<>()); this.state = new ValidateState(); }
/** * Work queue not a root level object, thus we need to build a map from * MonitoredStream for later quick lookup. */ private Map<String, StreamWorkSlotQueue> buildQueueMap() { Map<String, StreamWorkSlotQueue> queueMap = new HashMap<String, StreamWorkSlotQueue>(); for (MonitoredStream ms : context.getMonitoredStreams().values()) { for (StreamWorkSlotQueue queue : ms.getQueues()) { queueMap.put(queue.getQueueId(), queue); } } return queueMap; }
private void validateTopology() { for (Topology t : context.getTopologies().values()) { } }
@POST @Path("/disablePeriodicForceBuild") public void disablePeriodicallyBuild() { alertCoordinator.disablePeriodicallyBuild(); }
@POST @Path("/enablePeriodicForceBuild") public void enforcePeriodicallyBuild() { alertCoordinator.enforcePeriodicallyBuild(); }
@SuppressWarnings("static-access") @GET @Path("/periodicForceBuildState") public boolean statPeriodicallyBuild() { return alertCoordinator.isPeriodicallyForceBuildEnable(); }
@Override public List<TopologyMeta> listTopologies() { return super.listTopologies(); }
@GET @Path("/assignments") public String getAssignments() throws Exception { ScheduleState state = alertCoordinator.getState(); return JsonUtils.writeValueAsString(state); }
@POST @Path("/validate") public String validate() throws Exception { ValidateState state = alertCoordinator.validate(); return JsonUtils.writeValueAsString(state); }
@Override public TopologyMeta creatTopology() { if (enableCreateTopology) { TopologyMeta tm = new TopologyMeta(); tm.topologyId = namePrefix + (i++); tm.clusterId = "default-cluster"; tm.nimbusHost = "localhost"; tm.nimbusPort = "3000"; Pair<Topology, TopologyUsage> pair = createEmptyTopology(tm.topologyId); tm.topology = pair.getLeft(); tm.usage = pair.getRight(); return tm; } else { throw new UnsupportedOperationException("not supported yet!"); } }
@Test public void test_Schedule() { Coordinator.startSchedule(); }