@After public void teardown() throws Exception { if(cache != null) { cache.close(); } if(client != null) { client.close(); } if(zkComponent != null) { zkComponent.stop(); } }
private Set<JSONObject> readMessagesFromKafka(KafkaComponent kc, String topic) { Set<JSONObject> out = new HashSet<>(); for (byte[] b : kc.readMessages(topic)) { try { JSONObject m = new JSONObject( JSONUtils.INSTANCE.load(new String(b), JSONUtils.MAP_SUPPLIER)); out.add(m); } catch (IOException e) { throw new IllegalStateException(e); } } return out; }
@BeforeClass public static void setupBeforeClass() throws Exception { UnitTestHelper.setJavaLoggingLevel(Level.SEVERE); LOG.info("Starting up YARN cluster"); zkServerComponent = new ZKServerComponent(); yarnComponent = new YarnComponent().withApplicationMasterClass(ApplicationMaster.class).withTestName(MaasIntegrationTest.class.getSimpleName()); runner = new ComponentRunner.Builder() .withComponent("yarn", yarnComponent) .withComponent("zk", zkServerComponent) .withMillisecondsBetweenAttempts(15000) .withNumRetries(10) .build(); runner.start(); String zookeeperUrl = zkServerComponent.getConnectionString(); RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3); client = CuratorFrameworkFactory.newClient(zookeeperUrl, retryPolicy); client.start(); }
final ZKServerComponent zkServerComponent = getZKServerComponent(topologyProperties); final KafkaComponent kafkaComponent = getKafkaComponent(topologyProperties, new ArrayList<KafkaComponent.Topic>() {{ add(new KafkaComponent.Topic(Constants.ENRICHMENT_TOPIC, 1)); add(new KafkaComponent.Topic(Constants.INDEXING_TOPIC, 1)); add(new KafkaComponent.Topic(ERROR_TOPIC, 1)); }}); String globalConfigStr = null; }}); FluxTopologyComponent fluxComponent = new FluxTopologyComponent.Builder() .withTopologyLocation(new File(fluxPath())) .withTopologyName("test") .withTemplateLocation(new File(getTemplatePath())) .withTopologyProperties(topologyProperties) .build(); fluxComponent.submitTopology(); kafkaComponent.writeMessages(Constants.ENRICHMENT_TOPIC, inputMessages); ProcessorResult<Map<String, List<Map<String, Object>>>> result = runner.process(getProcessor()); Map<String,List<Map<String, Object>>> outputMessages = result.getResult();
final ZKServerComponent zkServerComponent = getZKServerComponent(topologyProperties); final KafkaComponent kafkaComponent = getKafkaComponent(topologyProperties, new ArrayList<KafkaComponent.Topic>() {{ add(new KafkaComponent.Topic(sensorType, 1)); }}); topologyProperties.setProperty("kafka.broker", kafkaComponent.getBrokerList()); .withSensorTypes(Collections.singletonList(sensorType)) .withTopologyProperties(topologyProperties) .withBrokerUrl(kafkaComponent.getBrokerList()) .withOutputTopic(parserConfig.getOutputTopic()) .build(); try { runner.start(); kafkaComponent.writeMessages(sensorType, inputMessages); ProcessorResult<List<LookupKV<EnrichmentKey, EnrichmentValue>>> result = runner.process(new Processor<List<LookupKV<EnrichmentKey, EnrichmentValue>>>() {
@Before public void setup() throws Exception { zkComponent = new ZKServerComponent(); zkComponent.start(); client = ConfigurationsUtils.getClient(zkComponent.getConnectionString()); client.start(); cache = new ZKConfigurationsCache(client);
try { runner.start(); kafkaComponent.writeMessages(emptyObjectSensorType, emptyObjectInputMessages); kafkaComponent.writeMessages(dummySensorType, dummyInputMessages);
"--jar", yarnComponent.getAppMasterJar(), "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--master_memory", "512", YarnConfiguration conf = yarnComponent.getConfig(); LOG.info("Initializing DS Client"); final Client client = new Client(new Configuration(conf)); "--name", "dummy", "--version", "1.0", "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--local_model_path", "src/test/resources/maas", "--name", "dummy", "--version", "1.0", "--zk_quorum", zkServerComponent.getConnectionString(), "--zk_root", configRoot, "--num_instances", "1",
List<KafkaComponent.Topic> topics = new ArrayList<>(); for(String sensorType : sensorTypes) { topics.add(new KafkaComponent.Topic(sensorType, 1)); topics.add(new KafkaComponent.Topic(Constants.ENRICHMENT_TOPIC, 1)); kafkaComponent = getKafkaComponent(topologyProperties, topics); topologyProperties.setProperty("kafka.broker", kafkaComponent.getBrokerList()); .withSensorTypes(sensorTypes) .withTopologyProperties(topologyProperties) .withBrokerUrl(kafkaComponent.getBrokerList()) .withErrorTopic(parserConfigs.get(0).getErrorTopic()) .withOutputTopic(parserConfigs.get(0).getOutputTopic())
@Before public void setup() throws Exception { // a component that uploads the global configuration Map<String, Object> globals = new HashMap<>(); ConfigUploadComponent configUploader = new ConfigUploadComponent() .withGlobals(globals); // create zookeeper component properties = new Properties(); zkServer = getZKServerComponent(properties); // can only get the zookeeperUrl AFTER it has started zkServer.withPostStartCallback((zk) -> { zookeeperURL = zk.getConnectionString(); configUploader.withZookeeperURL(zookeeperURL); }); // start the integration test components runner = new ComponentRunner.Builder() .withComponent("zk", zkServer) .withComponent("config", configUploader) .build(); runner.start(); context = mock(InterpreterContext.class); }
@Override public void stop() { if (stormCluster != null) { try { try { // Kill the topology directly instead of sitting through the wait period killTopology(); stormCluster.shutdown(); } catch (IllegalStateException ise) { if (!(ise.getMessage().contains("It took over") && ise.getMessage().contains("to shut down slot"))) { throw ise; } else { assassinateSlots(); LOG.error("Storm slots didn't shut down entirely cleanly *sigh*. " + "I gave them the old one-two-skadoo and killed the slots with prejudice. " + "If tests fail, we'll have to find a better way of killing them.", ise); } } } catch(Throwable t) { LOG.error(t.getMessage(), t); } finally { cleanupWorkerDir(); } } }
private int verifyContainerLog(int containerNum, List<String> expectedContent, boolean count, String expectedWord) { File logFolder = new File(yarnComponent.getYARNCluster().getNodeManager(0).getConfig() .get(YarnConfiguration.NM_LOG_DIRS, YarnConfiguration.DEFAULT_NM_LOG_DIRS));
final ZKServerComponent zkServerComponent = getZKServerComponent(topologyProperties); final KafkaComponent kafkaComponent = getKafkaComponent(topologyProperties, new ArrayList<KafkaComponent.Topic>() {{ add(new KafkaComponent.Topic(Constants.INDEXING_TOPIC, 1)); add(new KafkaComponent.Topic(ERROR_TOPIC, 1)); }}); List<Map<String, Object>> inputDocs = new ArrayList<>(); FluxTopologyComponent fluxComponent = new FluxTopologyComponent.Builder() .withTopologyLocation(new File(getFluxPath())) .withTopologyName("test") .withTemplateLocation(new File(getTemplatePath())) .withTopologyProperties(topologyProperties) .build(); Thread.sleep(100); fluxComponent.submitTopology(); kafkaComponent.writeMessages(Constants.INDEXING_TOPIC, inputMessages); List<Map<String, Object>> docs = cleanDocs(runner.process(getProcessor(inputMessages))); Assert.assertEquals(docs.size(), inputMessages.size());
try { runner.start(); kafkaComponent.writeMessages(sensorType, inputMessages); Processor allResultsProcessor = new AllResultsProcessor(inputMessages, Constants.ENRICHMENT_TOPIC); ProcessorResult<Set<JSONObject>> result = runner.process(allResultsProcessor);
@Override public ReadinessState process(ComponentRunner runner) { KafkaComponent kafkaComponent = runner.getComponent("kafka", KafkaComponent.class); try { docs = readDocsFromDisk(hdfsDir); } catch (IOException e) { throw new IllegalStateException("Unable to retrieve indexed documents.", e); } if (docs.size() < inputMessages.size()) { errors = kafkaComponent.readMessages(ERROR_TOPIC); if(errors.size() > 0 && errors.size() + docs.size() == inputMessages.size()){ return ReadinessState.READY; } return ReadinessState.NOT_READY; } else { return ReadinessState.READY; } }
try { runner.start(); kafkaComponent.writeMessages(sensorType, inputMessages); KafkaProcessor<Map<String, List<JSONObject>>> kafkaProcessor = getKafkaProcessor( parserConfig.getOutputTopic(), parserConfig.getErrorTopic(), kafkaMessageSet -> kafkaMessageSet.getMessages().size() == 3 && kafkaMessageSet.getErrors().isEmpty());
@Override public ReadinessState process(ComponentRunner runner) { ElasticSearchComponent elasticSearchComponent = runner.getComponent("search", ElasticSearchComponent.class); KafkaComponent kafkaComponent = runner.getComponent("kafka", KafkaComponent.class); if (elasticSearchComponent.hasIndex(index)) { try { docs = elasticSearchComponent.getAllIndexedDocs(index, testSensorType + "_doc"); } catch (IOException e) { throw new IllegalStateException("Unable to retrieve indexed documents.", e); } if (docs.size() < inputMessages.size() ) { errors = kafkaComponent.readMessages(ERROR_TOPIC); if(errors.size() > 0 && errors.size() + docs.size() == inputMessages.size()){ return ReadinessState.READY; } return ReadinessState.NOT_READY; } else { return ReadinessState.READY; } } else { return ReadinessState.NOT_READY; } }
try { runner.start(); kafkaComponent.writeMessages(sensorType, inputMessages); KafkaProcessor<Map<String, List<JSONObject>>> kafkaProcessor = getKafkaProcessor( parserConfig.getOutputTopic(), parserConfig.getErrorTopic());