@Override public Void run() throws IOException { FileSystem fs = renamedScriptPath.getFileSystem(conf); fs.rename(new Path(jstormMasterContext.scriptPath), renamedScriptPath); return null; } });
public static void deletePathIfExists(JobConf conf, String stepOutputPath) throws IOException { Path path = new Path(stepOutputPath); FileSystem fs = path.getFileSystem(conf); if(fs.exists(path)) { fs.delete(path, true); } }
static String getQualifiedPath(Configuration conf, Path path) throws IOException { FileSystem fs; if (path == null) { return null; } fs = path.getFileSystem(conf); return fs.makeQualified(path).toString(); }
private BufferedWriter writer(Context withinContext, Path dataPath) throws IOException { Path filesPath = new Path(dataPath, EximUtil.FILES_NAME); FileSystem fs = dataPath.getFileSystem(withinContext.hiveConf); return new BufferedWriter(new OutputStreamWriter(fs.create(filesPath))); }
/** * Check whether the backup path exist * @param backupStr backup * @param conf configuration * @return Yes if path exists * @throws IOException exception */ public static boolean checkPathExist(String backupStr, Configuration conf) throws IOException { boolean isExist = false; Path backupPath = new Path(backupStr); FileSystem fileSys = backupPath.getFileSystem(conf); String targetFsScheme = fileSys.getUri().getScheme(); if (LOG.isTraceEnabled()) { LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme); } if (fileSys.exists(backupPath)) { isExist = true; } return isExist; }
public static String runTask(String[] args) throws Exception { String workingPath = args[0]; log.info("Deleting indexing hadoop working path [%s].", workingPath); Path p = new Path(workingPath); FileSystem fs = p.getFileSystem(new Configuration()); fs.delete(p, true); return null; } }
protected static void removeFile() throws IOException { FileSystem fs = file.getFileSystem(conf); if (fs.exists(file)) { fs.delete(file, true); } }
@Test(expected = MetastoreException.class) public void testInvalidPartitionKeyName() throws HiveException, AlreadyExistsException, IOException, MetastoreException { Table table = createTestTable(); List<Partition> partitions = hive.getPartitions(table); assertEquals(2, partitions.size()); // add a fake partition dir on fs fs = partitions.get(0).getDataLocation().getFileSystem(hive.getConf()); Path fakePart = new Path(table.getDataLocation().toString(), "fakedate=2009-01-01/fakecity=sanjose"); fs.mkdirs(fakePart); fs.deleteOnExit(fakePart); checker.checkMetastore(catName, dbName, tableName, null, new CheckResult()); }
public static String externalTableLocation(HiveConf hiveConf, String location) throws SemanticException { String baseDir = hiveConf.get(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname); Path basePath = new Path(baseDir); Path currentPath = new Path(location); String targetPathWithoutSchemeAndAuth = basePath.toUri().getPath() + currentPath.toUri().getPath(); Path dataLocation; try { dataLocation = PathBuilder.fullyQualifiedHDFSUri( new Path(targetPathWithoutSchemeAndAuth), basePath.getFileSystem(hiveConf) ); } catch (IOException e) { throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e); } LOG.info("Incoming external table location: {} , new location: {}", location, dataLocation.toString()); return dataLocation.toString(); }
/** * Given the target rootPath, check if there's common properties existed. Return false if so. * @param rootPath * @return */ private boolean checkCommonPropExistance(Path rootPath, String noExtFileName) throws IOException { Configuration conf = new Configuration(); FileStatus[] children = rootPath.getFileSystem(conf).listStatus(rootPath); for (FileStatus aChild : children) { if (aChild.getPath().getName().contains(noExtFileName)) { return false; } } return true; } }
/** * createTezDir creates a temporary directory in the scratchDir folder to * be used with Tez. Assumes scratchDir exists. */ private Path createTezDir(String sessionId) throws IOException { // tez needs its own scratch dir (per session) Path tezDir = new Path(SessionState.get().getHdfsScratchDirURIString(), TEZ_DIR); tezDir = new Path(tezDir, sessionId); FileSystem fs = tezDir.getFileSystem(conf); FsPermission fsPermission = new FsPermission(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION)); fs.mkdirs(tezDir, fsPermission); // Make sure the path is normalized (we expect validation to pass since we just created it). tezDir = DagUtils.validateTargetDir(tezDir, conf).getPath(); // Directory removal will be handled by cleanup at the SessionState level. return tezDir; }
private void createDirIfNotExists(Path path) throws IOException { FileSystem fileSystem = path.getFileSystem(conf); try { if (!fileSystem.exists(path)) { fileSystem.mkdirs(path); fileSystem.setPermission(path, DIR_PERMISSION); } } catch (IOException e) { // Ignore this exception, if there is a problem it'll fail when trying to read or write. LOG.warn("Error while trying to set permission: ", e); } }
@Override public void killAll() throws IOException { log.info("Deleting all segment files from hdfs dir [%s].", storageDirectory.toUri().toString()); final FileSystem fs = storageDirectory.getFileSystem(config); fs.delete(storageDirectory, true); }
private List<DataSegment> fetchSegmentsMetadata(Path segmentDescriptorDir) throws IOException { if (!segmentDescriptorDir.getFileSystem(getConf()).exists(segmentDescriptorDir)) { LOG.info("Directory {} does not exist, ignore this if it is create statement or inserts of 0 rows," + " no Druid segments to move, cleaning working directory {}", segmentDescriptorDir.toString(), getStagingWorkingDir().toString()); return Collections.emptyList(); } return DruidStorageHandlerUtils.getCreatedSegments(segmentDescriptorDir, getConf()); }
private void moveUpFiles(Path specPath, Configuration hconf, Logger log) throws IOException, HiveException { FileSystem fs = specPath.getFileSystem(hconf); if (fs.exists(specPath)) { FileStatus[] taskOutputDirs = fs.listStatus(specPath); if (taskOutputDirs != null) { for (FileStatus dir : taskOutputDirs) { Utilities.renameOrMoveFiles(fs, dir.getPath(), specPath); fs.delete(dir.getPath(), true); } } } }
private BufferedWriter writer(Context withinContext, Path dataPath) throws IOException { Path filesPath = new Path(dataPath, EximUtil.FILES_NAME); FileSystem fs = dataPath.getFileSystem(withinContext.hiveConf); return new BufferedWriter(new OutputStreamWriter(fs.create(filesPath))); }
@Override public boolean connect(StatsCollectionContext context) { conf = context.getHiveConf(); List<String> statsDirs = context.getStatsTmpDirs(); assert statsDirs.size() == 1 : "Found multiple stats dirs: " + statsDirs; Path statsDir = new Path(statsDirs.get(0)); LOG.debug("Connecting to : " + statsDir); statsMap = new HashMap<String, Map<String,String>>(); try { return statsDir.getFileSystem(conf).exists(statsDir); } catch (IOException e) { LOG.error("Failed to check if dir exists", e); return false; } }
@SuppressWarnings("unchecked") public static HadoopDruidIndexerConfig fromDistributedFileSystem(String path) { try { Path pt = new Path(path); FileSystem fs = pt.getFileSystem(new Configuration()); Reader reader = new InputStreamReader(fs.open(pt), StandardCharsets.UTF_8); return fromMap( HadoopDruidIndexerConfig.JSON_MAPPER.readValue(reader, JacksonUtils.TYPE_REFERENCE_MAP_STRING_OBJECT) ); } catch (Exception e) { throw Throwables.propagate(e); } }
@Override public void killAll() throws IOException { log.info("Deleting all task logs from hdfs dir [%s].", config.getDirectory()); Path taskLogDir = new Path(config.getDirectory()); FileSystem fs = taskLogDir.getFileSystem(hadoopConfig); fs.delete(taskLogDir, true); }
public static void clearWork(Configuration conf) { Path mapPath = getPlanPath(conf, MAP_PLAN_NAME); Path reducePath = getPlanPath(conf, REDUCE_PLAN_NAME); // if the plan path hasn't been initialized just return, nothing to clean. if (mapPath == null && reducePath == null) { return; } try { FileSystem fs = mapPath.getFileSystem(conf); if (fs.exists(mapPath)) { fs.delete(mapPath, true); } if (fs.exists(reducePath)) { fs.delete(reducePath, true); } } catch (Exception e) { LOG.warn("Failed to clean-up tmp directories.", e); } finally { // where a single process works with multiple plans - we must clear // the cache before working with the next plan. clearWorkMapForConf(conf); } }