private void makeJarDirReadOnly() { try { Files.setPosixFilePermissions(jarDir, new HashSet<>(Arrays.asList( PosixFilePermission.OTHERS_READ, PosixFilePermission.GROUP_READ, PosixFilePermission.OWNER_READ, PosixFilePermission.OTHERS_EXECUTE, PosixFilePermission.GROUP_EXECUTE, PosixFilePermission.OWNER_EXECUTE))); } catch (final Exception e) { Assume.assumeNoException(e); } }
@Override public List<Path> compact(ThroughputController throughputController, User user) throws IOException { try { isInCompact = true; synchronized (this) { this.wait(); } } catch (InterruptedException e) { Assume.assumeNoException(e); } return new ArrayList<>(); }
@Test public void testRecursiveUploadForYarnS3n() throws Exception { try { Class.forName("org.apache.hadoop.fs.s3native.NativeS3FileSystem"); } catch (ClassNotFoundException e) { // not in the classpath, cannot run this test String msg = "Skipping test because NativeS3FileSystem is not in the class path"; log.info(msg); assumeNoException(msg, e); } testRecursiveUploadForYarn("s3n", "testYarn-s3n"); }
@Test public void testRecursiveUploadForYarnS3a() throws Exception { try { Class.forName("org.apache.hadoop.fs.s3a.S3AFileSystem"); } catch (ClassNotFoundException e) { // not in the classpath, cannot run this test String msg = "Skipping test because S3AFileSystem is not in the class path"; log.info(msg); assumeNoException(msg, e); } testRecursiveUploadForYarn("s3a", "testYarn-s3a"); } }
@Test public void testDoNotCancelJobIfSavepointFails() throws Exception { setUpWithCheckpointInterval(10L); try { Files.setPosixFilePermissions(savepointDirectory, Collections.emptySet()); } catch (IOException e) { Assume.assumeNoException(e); } try { cancelWithSavepoint(); } catch (Exception e) { assertThat(ExceptionUtils.findThrowable(e, CheckpointTriggerException.class).isPresent(), equalTo(true)); } final JobStatus jobStatus = clusterClient.getJobStatus(jobGraph.getJobID()).get(60, TimeUnit.SECONDS); assertThat(jobStatus, equalTo(JobStatus.RUNNING)); // assert that checkpoints are continued to be triggered triggerCheckpointLatch = new CountDownLatch(1); assertThat(triggerCheckpointLatch.await(60L, TimeUnit.SECONDS), equalTo(true)); }
@Before public void setUp() { testSubject = new Oracle11EventTableFactory(); eventSchema = new EventSchema(); try { connection = DriverManager.getConnection("jdbc:oracle:thin:@//localhost:1521/xe", "system", "oracle"); } catch (SQLException e) { assumeNoException("Ignoring test. Machine does not have a local Oracle 11 instance running", e); } }
@Before public void setUp() { sagaSchema = new SagaSchema(); testSubject = new Oracle11SagaSqlSchema(sagaSchema); try { connection = DriverManager.getConnection("jdbc:oracle:thin:@//localhost:1521/xe", "system", "oracle"); } catch (SQLException e) { assumeNoException("Ignoring test. Machine does not have a local Oracle 11 instance running", e); } }
private static Cluster createCluster() { try { return newCluster().in(new File("build/cluster")).withServiceFragment(RESOURCE_CONFIG).build(); } catch (IllegalArgumentException e) { assumeNoException(e); return null; } }
@BeforeClass public static void setup() throws Exception { try { MiniDFSCluster cluster = util.startMiniDFSCluster(3); // Need 3 DNs for RS-3-2 policy DistributedFileSystem fs = cluster.getFileSystem(); Method enableAllECPolicies = DFSTestUtil.class.getMethod("enableAllECPolicies", DistributedFileSystem.class); enableAllECPolicies.invoke(null, fs); DFSClient client = fs.getClient(); Method setErasureCodingPolicy = DFSClient.class.getMethod("setErasureCodingPolicy", String.class, String.class); setErasureCodingPolicy.invoke(client, "/", "RS-3-2-1024k"); // try a built-in policy try (FSDataOutputStream out = fs.create(new Path("/canary"))) { // If this comes back as having hflush then some test setup assumption is wrong. // Fail the test so that a developer has to look and triage assertFalse("Did not enable EC!", CommonFSUtils.hasCapability(out, HFLUSH)); } } catch (NoSuchMethodException e) { // We're not testing anything interesting if EC is not available, so skip the rest of the test Assume.assumeNoException("Using an older version of hadoop; EC not available.", e); } util.getConfiguration().setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true); util.startMiniCluster(); }
@Override public Statement apply(Statement base, Description description) { try { SplunkServer splunkServer = new SplunkServer(); splunkServer.setPassword("admin"); splunkServer.setUsername("admin"); SplunkServiceFactory splunkServiceFactory = new SplunkServiceFactory(splunkServer); splunkServiceFactory.getService().open(this.port); System.setProperty("splunk.port", "" + this.port); } catch (Exception e) { logger.warn( "Not executing tests because basic connectivity test failed"); Assume.assumeNoException(e); } return super.apply(base, description); }
@Override protected Class getReaderClass() { try { return Class.forName("com.sun.imageio.plugins.png.PNGImageReader"); } catch (ClassNotFoundException e) { assumeNoException(e); } return null; }
@Override protected Class getReaderClass() { try { return Class.forName("com.sun.imageio.plugins.jpeg.JPEGImageReader"); } catch (ClassNotFoundException e) { assumeNoException(e); } return null; }
@Test @LoadGraphWith(MODERN) public void shouldSucceedWithProperTraverserRequirements() throws Exception { final VertexProgramQ vp = VertexProgramQ.build().property("pl").create(); final Map<String, List<Integer>> expected = new HashMap<>(); expected.put("vadas", Collections.singletonList(2)); expected.put("lop", Arrays.asList(2, 2, 2, 3)); expected.put("josh", Collections.singletonList(2)); expected.put("ripple", Arrays.asList(2, 3)); try { g.V().repeat(__.out()).emit().program(vp).dedup() .valueMap("name", "pl").forEachRemaining((Map<Object, Object> map) -> { final String name = (String) ((List) map.get("name")).get(0); final List<Integer> pathLengths = (List<Integer>) map.get("pl"); assertTrue(expected.containsKey(name)); final List<Integer> expectedPathLengths = expected.remove(name); assertTrue(expectedPathLengths.containsAll(pathLengths)); assertTrue(pathLengths.containsAll(expectedPathLengths)); }); assertTrue(expected.isEmpty()); } catch (VerificationException ex) { assumeNoException(ex); } }
private ImageReader createReferenceReader() { try { @SuppressWarnings("unchecked") Class<ImageReaderSpi> spiClass = (Class<ImageReaderSpi>) Class.forName("com.sun.imageio.plugins.jpeg.JPEGImageReaderSpi"); ImageReaderSpi provider = spiClass.newInstance(); ImageReader reader = provider.createReaderInstance(); assumeNotNull(reader); return reader; } catch (Throwable t) { assumeNoException(t); } return null; }
@Test public void test_checksum_md5() throws Exception { try { MessageDigest.getInstance("MD5"); } catch (NoSuchAlgorithmException e) { assumeNoException(e); } Fs fs = startCrawlerDefinition() .setChecksum("MD5") .build(); startCrawler(getCrawlerName(), fs, endCrawlerDefinition(getCrawlerName()), null); ESSearchResponse searchResponse = countTestHelper(new ESSearchRequest().withIndex(getCrawlerName()), 1L, null); for (ESSearchHit hit : searchResponse.getHits()) { Object checksum = extractFromPath(hit.getSourceAsMap(), Doc.FIELD_NAMES.FILE).get(File.FIELD_NAMES.CHECKSUM); assertThat(checksum, is("caa71e1914ecbcf5ae4f46cf85de8648")); } }
@Test public void test_checksum_sha1() throws Exception { try { MessageDigest.getInstance("SHA-1"); } catch (NoSuchAlgorithmException e) { assumeNoException(e); } Fs fs = startCrawlerDefinition() .setChecksum("SHA-1") .build(); startCrawler(getCrawlerName(), fs, endCrawlerDefinition(getCrawlerName()), null); ESSearchResponse searchResponse = countTestHelper(new ESSearchRequest().withIndex(getCrawlerName()), 1L, null); for (ESSearchHit hit : searchResponse.getHits()) { Object checksum = extractFromPath(hit.getSourceAsMap(), Doc.FIELD_NAMES.FILE).get(File.FIELD_NAMES.CHECKSUM); assertThat(checksum, is("81bf7dba781a1efbea6d9f2ad638ffe772ba4eab")); } } }
private void testReverseGeocodeAddress(Object latitude, Object longitude) { try { testResult(db,"CALL apoc.spatial.reverseGeocode({latitude},{longitude})",map("latitude", latitude, "longitude", longitude), (row)->{ row.forEachRemaining((r)->{ assertNotNull(r.get("description")); assertNotNull(r.get("location")); assertNotNull(r.get("data")); }); }); } catch(Exception e) { Assume.assumeNoException("out of quota", e); } }
}); } catch(Exception e) { Assume.assumeNoException("out of quota", e);
@Override public Statement apply(Statement base, Description description) { assumeTrue(brokerOnline.get(port)); String url = "tcp://localhost:" + port; IMqttClient client = null; try { client = new DefaultMqttPahoClientFactory().getClientInstance(url, "junit-" + System.currentTimeMillis()); client.connect(); } catch (MqttException e) { logger.warn("Tests not running because no broker on " + url + ":", e); assumeNoException(e); } finally { if (client != null) { try { client.disconnect(); client.close(); } catch (MqttException e) { } } } return super.apply(base, description); }
@Test @LoadGraphWith(MODERN) public void shouldFailWithImproperTraverserRequirements() throws Exception { final VertexProgramQ vp = VertexProgramQ.build().property("pl").useTraverserRequirements(false).create(); try { g.V().repeat(__.out()).emit().program(vp).dedup() .forEachRemaining((Vertex v) -> assertFalse(v.property("pl").isPresent())); } catch (VerificationException ex) { assumeNoException(ex); } }