public static void deletePathIfExists(JobConf conf, String stepOutputPath) throws IOException { Path path = new Path(stepOutputPath); FileSystem fs = path.getFileSystem(conf); if(fs.exists(path)) { fs.delete(path, true); } }
private void deleteDir(Path dir, String peerId) throws IOException { if (!fs.delete(dir, true) && fs.exists(dir)) { throw new IOException("Failed to remove dir " + dir + " for peer id=" + peerId); } }
@Override public void createAlias(String storeName, String original, String alias) throws IOException { Path originalTablePath = new Path(new Path(this.storeRootDir, storeName), original); if (!this.fs.exists(originalTablePath)) { throw new IOException(String.format("State file %s does not exist for table %s", originalTablePath, original)); } Path aliasTablePath = new Path(new Path(this.storeRootDir, storeName), alias); Path tmpAliasTablePath = new Path(aliasTablePath.getParent(), new Path(TMP_FILE_PREFIX, aliasTablePath.getName())); // Make a copy of the original table as a work-around because // Hadoop version 1.2.1 has no support for symlink yet. HadoopUtils.copyFile(this.fs, originalTablePath, this.fs, aliasTablePath, tmpAliasTablePath, true, this.conf); }
private static void checkDir(final FileSystem fs, final Path dir) throws IOException { if (fs.exists(dir)) { throw new RuntimeException("The " + dir + " exists"); } if (!fs.mkdirs(dir)) { throw new IOException("Failed to create the " + dir); } }
private void commitOutputPathToFinalPath(Path specPath, boolean ignoreNonExisting) throws IOException { Path outPath = getOperatorOutputPath(specPath); Path finalPath = getOperatorFinalPath(specPath); FileSystem fs = outPath.getFileSystem(hconf); if (ignoreNonExisting && !fs.exists(outPath)) { return; } if (!fs.rename(outPath, finalPath)) { throw new IOException("Unable to rename output to: " + finalPath); } }
@After public void cleanupFS() throws Exception { if (fs.exists(root)) { if (!fs.delete(root, true)) { throw new IOException("Failed to delete root test dir: " + root); } if (!fs.mkdirs(root)) { throw new IOException("Failed to create root test dir: " + root); } } EnvironmentEdgeManagerTestHelper.reset(); }
public void start() throws IOException { random = new SecureRandom(); userProvider = UserProvider.instantiate(conf); ugiReferenceCounter = new ConcurrentHashMap<>(); fs = FileSystem.get(conf); baseStagingDir = new Path(FSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME); if (conf.get("hbase.bulkload.staging.dir") != null) { LOG.warn("hbase.bulkload.staging.dir " + " is deprecated. Bulkload staging directory is " + baseStagingDir); } if (!fs.exists(baseStagingDir)) { fs.mkdirs(baseStagingDir, PERM_HIDDEN); } }
@Override public boolean connect(StatsCollectionContext context) { conf = context.getHiveConf(); List<String> statsDirs = context.getStatsTmpDirs(); assert statsDirs.size() == 1 : "Found multiple stats dirs: " + statsDirs; Path statsDir = new Path(statsDirs.get(0)); LOG.debug("Connecting to : " + statsDir); statsMap = new HashMap<String, Map<String,String>>(); try { return statsDir.getFileSystem(conf).exists(statsDir); } catch (IOException e) { LOG.error("Failed to check if dir exists", e); return false; } }
private static String getMetadataContents(String exportPath) throws IOException { Path mdFilePath = new Path(exportPath,"_metadata"); FileSystem fs = FileSystem.get(mdFilePath.toUri(), hconf); assertTrue(mdFilePath.toUri().toString() + "does not exist",fs.exists(mdFilePath)); BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(mdFilePath))); StringBuilder sb = new StringBuilder(); String line; while ((line = reader.readLine()) != null) { sb.append(line); } reader.close(); return sb.toString(); }
/** * Shamelessly cloned from GenericOptionsParser. */ public static String realFile(String newFile, Configuration conf) throws IOException { Path path = new Path(newFile); URI pathURI = path.toUri(); FileSystem fs; if (pathURI.getScheme() == null) { fs = FileSystem.getLocal(conf); } else { fs = path.getFileSystem(conf); } if (!fs.exists(path)) { return null; } String file = path.makeQualified(fs).toString(); return file; }
private void moveUpFiles(Path specPath, Configuration hconf, Logger log) throws IOException, HiveException { FileSystem fs = specPath.getFileSystem(hconf); if (fs.exists(specPath)) { FileStatus[] taskOutputDirs = fs.listStatus(specPath); if (taskOutputDirs != null) { for (FileStatus dir : taskOutputDirs) { Utilities.renameOrMoveFiles(fs, dir.getPath(), specPath); fs.delete(dir.getPath(), true); } } } }
private BufferedWriter writer() throws IOException { Path exportToFile = new Path(exportRootDataDir, EximUtil.FILES_NAME); if (exportFileSystem.exists(exportToFile)) { throw new IllegalArgumentException( exportToFile.toString() + " already exists and cant export data from path(dir) " + dataPathList); } logger.debug("exporting data files in dir : " + dataPathList + " to " + exportToFile); return new BufferedWriter( new OutputStreamWriter(exportFileSystem.create(exportToFile)) ); }
private Path generateUniqTempDir(boolean withDirCreated) throws IOException { FileSystem fs = FSUtils.getCurrentFileSystem(getConf()); Path dir = new Path(fs.getWorkingDirectory(), NAME); if (!fs.exists(dir)) { fs.mkdirs(dir); } Path newDir = new Path(dir, UUID.randomUUID().toString()); if (withDirCreated) { fs.mkdirs(newDir); } return newDir; }
@Test public void testMiniDFSCluster() throws Exception { HBaseTestingUtility hbt = new HBaseTestingUtility(); MiniDFSCluster cluster = hbt.startMiniDFSCluster(null); FileSystem dfs = cluster.getFileSystem(); Path dir = new Path("dir"); Path qualifiedDir = dfs.makeQualified(dir); LOG.info("dir=" + dir + ", qualifiedDir=" + qualifiedDir); assertFalse(dfs.exists(qualifiedDir)); assertTrue(dfs.mkdirs(qualifiedDir)); assertTrue(dfs.delete(qualifiedDir, true)); hbt.shutdownMiniCluster(); }
@Test public void TestGetJobWorkingDir() throws IOException { FileSystem fileSystem = FileSystem.get(new Configuration()); Path jobWorkDirPath = null; KylinConfig kylinConfig = mock(KylinConfig.class); try (SetAndUnsetThreadLocalConfig autoUnset = KylinConfig.setAndUnsetThreadLocalConfig(kylinConfig)) { when(kylinConfig.getHiveTableDirCreateFirst()).thenReturn(true); when(kylinConfig.getHdfsWorkingDirectory()).thenReturn("/tmp/kylin/"); DefaultChainedExecutable defaultChainedExecutable = mock(DefaultChainedExecutable.class); defaultChainedExecutable.setId(RandomUtil.randomUUID().toString()); String jobWorkingDir = HiveInputBase.getJobWorkingDir(defaultChainedExecutable, KylinConfig.getInstanceFromEnv().getHdfsWorkingDirectory()); jobWorkDirPath = new Path(jobWorkingDir); Assert.assertTrue(fileSystem.exists(jobWorkDirPath)); } finally { if (jobWorkDirPath != null) fileSystem.deleteOnExit(jobWorkDirPath); } }
private List<DataSegment> fetchSegmentsMetadata(Path segmentDescriptorDir) throws IOException { if (!segmentDescriptorDir.getFileSystem(getConf()).exists(segmentDescriptorDir)) { LOG.info("Directory {} does not exist, ignore this if it is create statement or inserts of 0 rows," + " no Druid segments to move, cleaning working directory {}", segmentDescriptorDir.toString(), getStagingWorkingDir().toString()); return Collections.emptyList(); } return DruidStorageHandlerUtils.getCreatedSegments(segmentDescriptorDir, getConf()); }