protected void moveToOutputDirectory(FileSystem fs) throws Exception { LOGGER.info("Moving Segment Tar files from {} to: {}", _stagingDir + "/output/segmentTar", _outputDir); FileStatus[] segmentArr = fs.listStatus(new Path(_stagingDir + "/output/segmentTar")); for (FileStatus segment : segmentArr) { fs.rename(segment.getPath(), new Path(_outputDir, segment.getPath().getName())); } }
public static Collection<String> getAllFilesInPath(final Path path, final Configuration conf) throws IOException { List<String> filesInPath = new ArrayList<>(); FileSystem fs = path.getFileSystem(conf); FileStatus fileStatus = fs.getFileStatus(path); if (fileStatus.isDir()) { FileStatus[] fileStatuses = fs.listStatus(path, FileDump.HIDDEN_AND_SIDE_FILE_FILTER); for (FileStatus fileInPath : fileStatuses) { if (fileInPath.isDir()) { filesInPath.addAll(getAllFilesInPath(fileInPath.getPath(), conf)); } else { filesInPath.add(fileInPath.getPath().toString()); } } } else { filesInPath.add(path.toString()); } return filesInPath; } }
public static void listStatusRecursively(FileSystem fs, FileStatus fileStatus, PathFilter filter, List<FileStatus> results) throws IOException { if (fileStatus.isDir()) { for (FileStatus stat : fs.listStatus(fileStatus.getPath(), filter)) { listStatusRecursively(fs, stat, results); } } else { results.add(fileStatus); } }
/** * @param fs * @param rootdir * @return All the table directories under <code>rootdir</code>. Ignore non table hbase folders such as * .logs, .oldlogs, .corrupt folders. * @throws IOException */ public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir) throws IOException { // presumes any directory under hbase.rootdir is a table FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs)); List<Path> tabledirs = new ArrayList<>(dirs.length); for (FileStatus dir: dirs) { tabledirs.add(dir.getPath()); } return tabledirs; }
public static List<String> getBucketFilePathsOfPartition( Path location, ParseContext pGraphContext) throws SemanticException { List<String> fileNames = new ArrayList<String>(); try { FileSystem fs = location.getFileSystem(pGraphContext.getConf()); FileStatus[] files = fs.listStatus(new Path(location.toString()), FileUtils.HIDDEN_FILES_PATH_FILTER); if (files != null) { for (FileStatus file : files) { fileNames.add(file.getPath().toString()); } } } catch (IOException e) { throw new SemanticException(e); } return fileNames; }
@Before public void setUp() throws Exception { mvcc = new MultiVersionConcurrencyControl(); FileStatus[] entries = fs.listStatus(new Path("/")); for (FileStatus dir : entries) { fs.delete(dir.getPath(), true); } }
/** @throws Exception If failed. */ @Test public void testSetWorkingDirectory() throws Exception { Path dir = new Path("/tmp/nested/dir"); Path file = new Path("file"); fs.mkdirs(dir); fs.setWorkingDirectory(dir); FSDataOutputStream os = fs.create(file); os.close(); String filePath = fs.getFileStatus(new Path(dir, file)).getPath().toString(); assertTrue(filePath.contains("/tmp/nested/dir/file")); }
/** * Clean up directories with prefix "exportSnapshot-", which are generated when exporting * snapshots. * @throws IOException exception */ protected static void cleanupExportSnapshotLog(Configuration conf) throws IOException { FileSystem fs = FSUtils.getCurrentFileSystem(conf); Path stagingDir = new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() .toString())); FileStatus[] files = FSUtils.listStatus(fs, stagingDir); if (files == null) { return; } for (FileStatus file : files) { if (file.getPath().getName().startsWith("exportSnapshot-")) { LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); if (FSUtils.delete(fs, file.getPath(), true) == false) { LOG.warn("Can not delete " + file.getPath()); } } } }
Path betaInput = new Path(inputPath); FileSystem fs = betaInput.getFileSystem(job.getConfiguration()); Set<String> paths = new TreeSet<>(); Pattern fileMatcher = Pattern.compile(filePattern); Path granularPath = new Path(betaInput, intervalPath); log.info("Checking path[%s]", granularPath); for (FileStatus status : FSSpideringIterator.spiderIterable(fs, granularPath)) { final Path filePath = status.getPath(); if (fileMatcher.matcher(filePath.toString()).matches()) { paths.add(filePath.toString());
/** * Calculate the target filePath of the jar file to be copied on HDFS, * given the {@link FileStatus} of a jarFile and the path of directory that contains jar. */ private Path calculateDestJarFile(FileStatus status, Path jarFileDir) { // SNAPSHOT jars should not be shared, as different jobs may be using different versions of it Path baseDir = status.getPath().getName().contains("SNAPSHOT") ? this.unsharedJarsDir : jarFileDir; // DistributedCache requires absolute path, so we need to use makeQualified. return new Path(this.fs.makeQualified(baseDir), status.getPath().getName()); }
protected Iterator<String> listKeys(Path path) throws IOException { ArrayList<String> ret = new ArrayList<String>(); FileStatus[] files = _fs.listStatus(new Path[]{path}); if (files != null) { for (FileStatus sub : files) { try { ret.add(sub.getPath().getName().toString()); } catch (IllegalArgumentException e) { //Ignored the file did not match LOG.debug("Found an unexpected file in {} {}", path, sub.getPath().getName()); } } } return ret.iterator(); }
private void moveUpFiles(Path specPath, Configuration hconf, Logger log) throws IOException, HiveException { FileSystem fs = specPath.getFileSystem(hconf); if (fs.exists(specPath)) { FileStatus[] taskOutputDirs = fs.listStatus(specPath); if (taskOutputDirs != null) { for (FileStatus dir : taskOutputDirs) { Utilities.renameOrMoveFiles(fs, dir.getPath(), specPath); fs.delete(dir.getPath(), true); } } } }
@Override public void setMode(String path, short mode) throws IOException { FileSystem hdfs = getFs(); try { FileStatus fileStatus = hdfs.getFileStatus(new Path(path)); hdfs.setPermission(fileStatus.getPath(), new FsPermission(mode)); } catch (IOException e) { LOG.warn("Fail to set permission for {} with perm {} : {}", path, mode, e.getMessage()); throw e; } }
private long sizeOfPath(FileSystem fs, Path path) throws IOException { long size = 0; FileStatus[] statuses = fs.listStatus(path); if(statuses != null) { for(FileStatus status: statuses) { if(status.isDir()) size += sizeOfPath(fs, status.getPath()); else size += status.getLen(); } } return size; }