@Override public Path getBucketPath(Clock clock, Path basePath, Tuple4<Integer, Long, Integer, String> element) { return basePath.suffix(String.valueOf(element.f0)); } }
private Path getPendingPathFor(Path path) { return new Path(path.getParent(), pendingPrefix + path.getName()).suffix(pendingSuffix); }
private Path getValidLengthPathFor(Path path) { return new Path(path.getParent(), validLengthPrefix + path.getName()).suffix(validLengthSuffix); }
private Path getValidLengthPathFor(Path path) { return new Path(path.getParent(), validLengthPrefix + path.getName()).suffix(validLengthSuffix); }
private Path getPendingPathFor(Path path) { return new Path(path.getParent(), pendingPrefix + path.getName()).suffix(pendingSuffix); }
private Path getInProgressPathFor(Path path) { return new Path(path.getParent(), inProgressPrefix + path.getName()).suffix(inProgressSuffix); }
private Path getInProgressPathFor(Path path) { return new Path(path.getParent(), inProgressPrefix + path.getName()).suffix(inProgressSuffix); }
public static Path getPeerReplayWALDir(Path remoteWALDir, String peerId) { return getPeerRemoteWALDir(remoteWALDir, peerId).suffix(REMOTE_WAL_REPLAY_SUFFIX); }
public static Path getPeerSnapshotWALDir(String remoteWALDir, String peerId) { return getPeerRemoteWALDir(remoteWALDir, peerId).suffix(REMOTE_WAL_SNAPSHOT_SUFFIX); }
public static Path getPeerSnapshotWALDir(Path remoteWALDir, String peerId) { return getPeerRemoteWALDir(remoteWALDir, peerId).suffix(REMOTE_WAL_SNAPSHOT_SUFFIX); }
@Override public Path getPathForURI(Path jobConfDirPath, URI uri) { return super.getPathForURI(jobConfDirPath, uri).suffix(CONF_EXTENSION); }
/** sort calls this to generate the final merged output */ private int mergePass(Path tmpDir) throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("running merge pass"); } Writer writer = cloneFileAttributes( outFile.suffix(".0"), outFile, null); RawKeyValueIterator r = merge(outFile.suffix(".0"), outFile.suffix(".0.index"), tmpDir); writeFile(r, writer); writer.close(); return 0; }
private Path getWALSplitDir(ServerName serverName) { Path logDir = new Path(this.rootDir, AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); return logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); }
private void createCheckpoint(Path trashRoot, Date date) throws IOException { if (!fs.exists(new Path(trashRoot, CURRENT))) { return; } Path checkpointBase; synchronized (CHECKPOINT) { checkpointBase = new Path(trashRoot, CHECKPOINT.format(date)); } Path checkpoint = checkpointBase; Path current = new Path(trashRoot, CURRENT); int attempt = 0; while (true) { try { fs.rename(current, checkpoint, Rename.NONE); LOG.info("Created trash checkpoint: " + checkpoint.toUri().getPath()); break; } catch (FileAlreadyExistsException e) { if (++attempt > 1000) { throw new IOException("Failed to checkpoint trash: " + checkpoint); } checkpoint = checkpointBase.suffix("-" + attempt); } } }
/** * Move a path to trash. The absolute path of the input path will be replicated under the trash directory. * @param path {@link org.apache.hadoop.fs.FileSystem} path to move to trash. * @return true if move to trash was done successfully. * @throws IOException */ @Override public boolean moveToTrash(Path path) throws IOException { Path fullyResolvedPath = path.isAbsolute() ? path : new Path(this.fs.getWorkingDirectory(), path); Path targetPathInTrash = PathUtils.mergePaths(this.trashLocation, fullyResolvedPath); if (!this.fs.exists(targetPathInTrash.getParent())) { this.fs.mkdirs(targetPathInTrash.getParent()); } else if (this.fs.exists(targetPathInTrash)) { targetPathInTrash = targetPathInTrash.suffix("_" + System.currentTimeMillis()); } return this.fs.rename(fullyResolvedPath, targetPathInTrash); }
private boolean containMetaWals(ServerName serverName) throws IOException { Path logDir = new Path(master.getWALRootDir(), AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); Path checkDir = master.getFileSystem().exists(splitDir) ? splitDir : logDir; return master.getFileSystem().listStatus(checkDir, META_FILTER).length > 0; }
private void corrupt(FileStatus file) throws IOException { LOG.info("Corrupt " + file); Path tmpFile = file.getPath().suffix(".tmp"); // remove the last byte to make the trailer corrupted try (FSDataInputStream in = fs.open(file.getPath()); FSDataOutputStream out = fs.create(tmpFile)) { ByteStreams.copy(ByteStreams.limit(in, file.getLen() - 1), out); } fs.delete(file.getPath(), false); fs.rename(tmpFile, file.getPath()); }
private FileStatus[] getTrashContents() throws Exception { FileSystem fs = FileSystem.get(hiveConf); Path trashDir = ShimLoader.getHadoopShims().getCurrentTrashPath(hiveConf, fs); return fs.globStatus(trashDir.suffix("/*")); }
Table table = createSimpleTable(schemaTableName, columns, session, targetPath.suffix("_2"), "q2"); transaction.getMetastore(schemaName) .createTable(session, table, privileges, Optional.empty(), false, EMPTY_TABLE_STATISTICS); Table table = createSimpleTable(schemaTableName, columns, session, targetPath.suffix("_3"), "q3"); transaction.getMetastore(schemaName) .createTable(session, table, privileges, Optional.empty(), true, EMPTY_TABLE_STATISTICS); Table table = createSimpleTable(schemaTableName, columns, session, targetPath.suffix("_4"), "q4"); transaction.getMetastore(schemaName) .createTable(session, table, privileges, Optional.empty(), true, EMPTY_TABLE_STATISTICS);
@Test public void testDataDeletion() throws HiveException, IOException, TException { Database db = new Database(); db.setName(dbName); hive.createDatabase(db); Table table = new Table(dbName, tableName); table.setDbName(dbName); table.setInputFormatClass(TextInputFormat.class); table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); table.setPartCols(partCols); hive.createTable(table); table = hive.getTable(dbName, tableName); Path fakeTable = table.getPath().getParent().suffix( Path.SEPARATOR + "faketable"); fs = fakeTable.getFileSystem(hive.getConf()); fs.mkdirs(fakeTable); fs.deleteOnExit(fakeTable); Path fakePart = new Path(table.getDataLocation().toString(), "fakepartition=fakevalue"); fs.mkdirs(fakePart); fs.deleteOnExit(fakePart); hive.dropTable(dbName, tableName, true, true); assertFalse(fs.exists(fakePart)); hive.dropDatabase(dbName); assertFalse(fs.exists(fakeTable)); }