/** * Returns the {@link org.apache.hadoop.hbase.TableName} object representing * the table directory under * path rootdir * * @param tablePath path of table * @return {@link org.apache.hadoop.fs.Path} for table */ public static TableName getTableName(Path tablePath) { return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName()); }
private Path getReplSyncUpPath(Path path) throws IOException { FileStatus[] rss = fs.listStatus(manager.getLogDir()); for (FileStatus rs : rss) { Path p = rs.getPath(); FileStatus[] logs = fs.listStatus(p); for (FileStatus log : logs) { p = new Path(p, log.getPath().getName()); if (p.getName().equals(path.getName())) { LOG.info("Log " + p.getName() + " found at " + p); return p; } } } LOG.error("Didn't find path for: " + path.getName()); return path; }
public static Path backupOutputPath(FileSystem fs, Path outpath, JobConf job) throws IOException, HiveException { if (fs.exists(outpath)) { Path backupPath = new Path(outpath.getParent(), BACKUP_PREFIX + outpath.getName()); Utilities.rename(fs, outpath, backupPath); return backupPath; } else { return null; } }
private Path renameCompletedFile(Path file) throws IOException { String fileName = file.toString(); String fileNameMinusSuffix = fileName.substring(0, fileName.indexOf(inprogress_suffix)); String newName = new Path(fileNameMinusSuffix).getName(); Path newFile = new Path(archiveDirPath + Path.SEPARATOR + newName); LOG.info("Completed consuming file {}", fileNameMinusSuffix); if (!hdfs.rename(file, newFile)) { throw new IOException("Rename failed for file: " + file); } LOG.debug("Renamed file {} to {} ", file, newFile); return newFile; }
/** * Returns the corresponding input file in the 'sourceDirPath' for the specified lock file. If no such file is found then returns null */ private Path getFileForLockFile(Path lockFile, Path sourceDirPath) throws IOException { String lockFileName = lockFile.getName(); Path dataFile = new Path(sourceDirPath + Path.SEPARATOR + lockFileName + inprogress_suffix); if (hdfs.exists(dataFile)) { return dataFile; } dataFile = new Path(sourceDirPath + Path.SEPARATOR + lockFileName); if (hdfs.exists(dataFile)) { return dataFile; } return null; }
protected Iterator<String> listKeys(Path path) throws IOException { ArrayList<String> ret = new ArrayList<String>(); FileStatus[] files = _fs.listStatus(new Path[]{path}); if (files != null) { for (FileStatus sub : files) { try { ret.add(sub.getPath().getName().toString()); } catch (IllegalArgumentException e) { //Ignored the file did not match LOG.debug("Found an unexpected file in {} {}", path, sub.getPath().getName()); } } } return ret.iterator(); }
/** * Calculate the target filePath of the jar file to be copied on HDFS, * given the {@link FileStatus} of a jarFile and the path of directory that contains jar. */ private Path calculateDestJarFile(FileStatus status, Path jarFileDir) { // SNAPSHOT jars should not be shared, as different jobs may be using different versions of it Path baseDir = status.getPath().getName().contains("SNAPSHOT") ? this.unsharedJarsDir : jarFileDir; // DistributedCache requires absolute path, so we need to use makeQualified. return new Path(this.fs.makeQualified(baseDir), status.getPath().getName()); }
public String getStagingPartitionLocation() { Path originalPartitionLocation = getLocation(); if (PartitionUtils.isUnixTimeStamp(originalPartitionLocation.getName())) { return StringUtils.join(Arrays.asList(getLocation().getParent().toString(), this.timeStamp), '/'); } else { return StringUtils.join(Arrays.asList(getLocation().toString(), this.timeStamp), '/'); } }
/** * @return a path with a write for that path. caller should close. */ WriterAndPath createWAP(byte[] region, Entry entry) throws IOException { String tmpDirName = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY); Path regionedits = getRegionSplitEditsPath(entry, fileBeingSplit.getPath().getName(), tmpDirName, conf); if (regionedits == null) { return null; } FileSystem walFs = FSUtils.getWALFileSystem(conf); if (walFs.exists(regionedits)) { LOG.warn("Found old edits file. It could be the " + "result of a previous failed split attempt. Deleting " + regionedits + ", length=" + walFs.getFileStatus(regionedits).getLen()); if (!walFs.delete(regionedits, false)) { LOG.warn("Failed delete of old {}", regionedits); } } Writer w = createWriter(regionedits); LOG.debug("Creating writer path={}", regionedits); return new WriterAndPath(regionedits, w, entry.getKey().getSequenceId()); }
private FileStatus[] listFilesInDir(Path path) throws IOException { return dataFileSystem.listStatus(path, p -> { String name = p.getName(); return !name.startsWith("_") && !name.startsWith("."); }); }
protected void moveToOutputDirectory(FileSystem fs) throws Exception { LOGGER.info("Moving Segment Tar files from {} to: {}", _stagingDir + "/output/segmentTar", _outputDir); FileStatus[] segmentArr = fs.listStatus(new Path(_stagingDir + "/output/segmentTar")); for (FileStatus segment : segmentArr) { fs.rename(segment.getPath(), new Path(_outputDir, segment.getPath().getName())); } }
private void markFileAsBad(Path file) { String fileName = file.toString(); String fileNameMinusSuffix = fileName.substring(0, fileName.indexOf(inprogress_suffix)); String originalName = new Path(fileNameMinusSuffix).getName(); Path newFile = new Path(badFilesDirPath + Path.SEPARATOR + originalName); LOG.info("Moving bad file {} to {}. Processed it till offset {}. SpoutID= {}", originalName, newFile, tracker.getCommitPosition(), spoutId); try { if (!hdfs.rename(file, newFile)) { // seems this can fail by returning false or throwing exception throw new IOException("Move failed for bad file: " + file); // convert false ret value to exception } } catch (IOException e) { LOG.warn("Error moving bad file: " + file + " to destination " + newFile + " SpoutId =" + spoutId, e); } closeReaderAndResetTrackers(); }
private void initReplLogger() { try { Path dbDumpPath = currentDatabaseIterator.dbLevelPath(); FileSystem fs = dbDumpPath.getFileSystem(hiveConf); long numTables = getSubDirs(fs, dbDumpPath).length; long numFunctions = 0; Path funcPath = new Path(dbDumpPath, ReplicationSemanticAnalyzer.FUNCTIONS_ROOT_DIR_NAME); if (fs.exists(funcPath)) { numFunctions = getSubDirs(fs, funcPath).length; } String dbName = StringUtils.isBlank(dbNameToLoadIn) ? dbDumpPath.getName() : dbNameToLoadIn; replLogger = new BootstrapLoadLogger(dbName, dumpDirectory, numTables, numFunctions); replLogger.startLog(); } catch (IOException e) { // Ignore the exception } }
/** * Chooses 1 representative file from {@code baseOrDeltaDir} * This assumes that all files in the dir are of the same type: either written by an acid * write or Load Data. This should always be the case for an Acid table. */ private static Path chooseFile(Path baseOrDeltaDir, FileSystem fs) throws IOException { if(!(baseOrDeltaDir.getName().startsWith(BASE_PREFIX) || baseOrDeltaDir.getName().startsWith(DELTA_PREFIX))) { throw new IllegalArgumentException(baseOrDeltaDir + " is not a base/delta"); } FileStatus[] dataFiles = fs.listStatus(new Path[] {baseOrDeltaDir}, originalBucketFilter); return dataFiles != null && dataFiles.length > 0 ? dataFiles[0].getPath() : null; }