Refine search
/** * Gets the qualified root dir of the mob files. * @param conf The current configuration. * @return The qualified root dir. * @throws IOException */ public static Path getQualifiedMobRootDir(Configuration conf) throws IOException { Path hbaseDir = new Path(conf.get(HConstants.HBASE_DIR)); Path mobRootDir = new Path(hbaseDir, MobConstants.MOB_DIR_NAME); FileSystem fs = mobRootDir.getFileSystem(conf); return mobRootDir.makeQualified(fs.getUri(), fs.getWorkingDirectory()); }
private URI initializeFromURI(String fromPath, boolean isLocal) throws IOException, URISyntaxException, SemanticException { URI fromURI = new Path(fromPath).toUri(); try { path = new String(URLCodec.decodeUrl( new Path(System.getProperty("user.dir"), fromPath).toUri().toString() .getBytes("US-ASCII")), "US-ASCII"); } catch (DecoderException de) { } else { URI defaultURI = FileSystem.get(conf).getUri(); fromScheme = defaultURI.getScheme(); fromAuthority = defaultURI.getAuthority(); URI defaultURI = FileSystem.get(conf).getUri(); fromAuthority = defaultURI.getAuthority();
private URI initializeFromURI(String fromPath, boolean isLocal) throws IOException, URISyntaxException { URI fromURI = new Path(fromPath).toUri(); if (isLocal) { path = URIUtil.decode( new Path(System.getProperty("user.dir"), fromPath).toUri().toString()); } else { path = new Path(new Path("/user/" + System.getProperty("user.name")), path).toString(); } else { URI defaultURI = FileSystem.get(conf).getUri(); fromScheme = defaultURI.getScheme(); fromAuthority = defaultURI.getAuthority(); URI defaultURI = FileSystem.get(conf).getUri(); fromAuthority = defaultURI.getAuthority();
/** * @param c configuration * @return {@link Path} to hbase root directory from * configuration as a qualified Path. * @throws IOException e */ public static Path getRootDir(final Configuration c) throws IOException { Path p = new Path(c.get(HConstants.HBASE_DIR)); FileSystem fs = p.getFileSystem(c); return p.makeQualified(fs.getUri(), fs.getWorkingDirectory()); }
private ParallelRunner getParallelRunner(FileSystem fs) { String uri = fs.getUri().toString(); if (!this.parallelRunners.containsKey(uri)) { this.parallelRunners .put(uri, this.parallelRunnerCloser.register(new ParallelRunner(this.parallelRunnerThreads, fs))); } return this.parallelRunners.get(uri); }
@Override public int doWork() throws IOException, InterruptedException { if (remoteDir != null) { URI defaultFs = remoteDir.getFileSystem(conf).getUri(); FSUtils.setFsDefault(conf, new Path(defaultFs)); FSUtils.setRootDir(conf, remoteDir); LOG.debug("fs=" + fs.getUri().toString() + " root=" + rootDir);
public FsStateStore(String storeUrl, Class<T> stateClass) throws IOException { this.conf = getConf(null); Path storePath = new Path(storeUrl); this.fs = storePath.getFileSystem(this.conf); this.useTmpFileForPut = !FS_SCHEMES_NON_ATOMIC.contains(this.fs.getUri().getScheme()); this.storeRootDir = storePath.toUri().getPath(); this.stateClass = stateClass; }
/** * Get the StorageStatistics for this FileSystem object. These statistics are * per-instance. They are not shared with any other FileSystem object. * * <p>This is a default method which is intended to be overridden by * subclasses. The default implementation returns an empty storage statistics * object.</p> * * @return The StorageStatistics for this FileSystem instance. * Will never be null. */ public StorageStatistics getStorageStatistics() { return new EmptyStorageStatistics(getUri().toString()); }
private List<String> getSeqFileContents(FileSystem fs, Path... seqFiles) throws IOException { ArrayList<String> result = new ArrayList<>(); for (Path seqFile : seqFiles) { Path file = new Path(fs.getUri().toString() + seqFile.toString()); SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file)); try { Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf); Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf); while (reader.next(key, value)) { String keyValStr = Arrays.asList(key, value).toString(); result.add(keyValStr); } } finally { reader.close(); } }// for return result; }
/** * Check whether the backup path exist * @param backupStr backup * @param conf configuration * @return Yes if path exists * @throws IOException exception */ public static boolean checkPathExist(String backupStr, Configuration conf) throws IOException { boolean isExist = false; Path backupPath = new Path(backupStr); FileSystem fileSys = backupPath.getFileSystem(conf); String targetFsScheme = fileSys.getUri().getScheme(); if (LOG.isTraceEnabled()) { LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme); } if (fileSys.exists(backupPath)) { isExist = true; } return isExist; }
/** * Specify job should run in MR mode. */ public EmbeddedGobblin mrMode() throws IOException { this.sysConfigOverrides.put(ConfigurationKeys.JOB_LAUNCHER_TYPE_KEY, JobLauncherFactory.JobLauncherType.MAPREDUCE.name()); this.builtConfigMap.put(ConfigurationKeys.FS_URI_KEY, FileSystem.get(new Configuration()).getUri().toString()); this.builtConfigMap.put(ConfigurationKeys.MR_JOB_ROOT_DIR_KEY, "/tmp/EmbeddedGobblin_" + System.currentTimeMillis()); this.distributeJarsFunction = new Runnable() { @Override public void run() { // Add jars needed at runtime to the sys config so MR job launcher will add them to distributed cache. EmbeddedGobblin.this.sysConfigOverrides.put(ConfigurationKeys.JOB_JAR_FILES_KEY, Joiner.on(",").join(getPrioritizedDistributedJars())); } }; return this; }
@BeforeClass public static void setup() throws Exception { System.clearProperty("mapred.job.tracker"); String testDir = System.getProperty("test.tmp.dir", "./"); testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/"; workDir = new File(new File(testDir).getCanonicalPath()); FileUtil.fullyDelete(workDir); workDir.mkdirs(); warehousedir = new Path(System.getProperty("test.warehouse.dir")); HiveConf metastoreConf = new HiveConf(); metastoreConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehousedir.toString()); // Run hive metastore server MetaStoreTestUtils.startMetaStoreWithRetry(metastoreConf); // Read the warehouse dir, which can be changed so multiple MetaStore tests could be run on // the same server warehousedir = new Path(MetastoreConf.getVar(metastoreConf, MetastoreConf.ConfVars.WAREHOUSE)); // LocalJobRunner does not work with mapreduce OutputCommitter. So need // to use MiniMRCluster. MAPREDUCE-2350 Configuration conf = new Configuration(true); conf.set("yarn.scheduler.capacity.root.queues", "default"); conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); FileSystem fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf)); mrConf = mrCluster.createJobConf(); initializeSetup(metastoreConf); warehousedir.getFileSystem(conf).mkdirs(warehousedir); }
/** * Hadoop File System reverse lookups paths with raw ip addresses The File * System URI always contains the canonical DNS name of the Namenode. * Subsequently, operations on paths with raw ip addresses cause an exception * since they don't match the file system URI. * * This routine solves this problem by replacing the scheme and authority of a * path with the scheme and authority of the FileSystem that it maps to. * * @param path * Path to be canonicalized * @return Path with canonical scheme and authority */ public static Path getDnsPath(Path path, Configuration conf) throws MetaException { FileSystem fs = getFs(path, conf); String uriPath = path.toUri().getPath(); if (StringUtils.isEmpty(uriPath)) { uriPath = "/"; } return (new Path(fs.getUri().getScheme(), fs.getUri().getAuthority(), uriPath)); }
private FsRenameCommitStep(Builder<? extends Builder<?>> builder) throws IOException { super(builder); this.srcPath = builder.srcPath; this.dstPath = builder.dstPath; this.srcFs = builder.srcFs != null ? builder.srcFs : getFileSystem(this.props.getProp(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI)); this.srcFsUri = this.srcFs.getUri().toString(); this.dstFs = builder.dstFs != null ? builder.dstFs : getFileSystem(this.props.getProp(ConfigurationKeys.FS_URI_KEY, ConfigurationKeys.LOCAL_FS_URI)); this.dstFsUri = this.dstFs.getUri().toString(); this.overwrite = builder.overwrite; }
@BeforeClass public static void setup() throws Exception { File workDir = handleWorkDir(); Path tmpDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test" + File.separator + "tmp")); conf.set("yarn.app.mapreduce.am.staging-dir", tmpDir + File.separator + testName mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf)); mrConf = mrCluster.createJobConf();
private static ParallelRunner getParallelRunner(FileSystem fs, Closer closer, int parallelRunnerThreads, Map<String, ParallelRunner> parallelRunners) { String uriAndHomeDir = new Path(new Path(fs.getUri()), fs.getHomeDirectory()).toString(); if (!parallelRunners.containsKey(uriAndHomeDir)) { parallelRunners.put(uriAndHomeDir, closer.register(new ParallelRunner(parallelRunnerThreads, fs))); } return parallelRunners.get(uriAndHomeDir); } }
fs.copyFromLocalFile(new Path(outerJarFile.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + outerJarFile.getName(); assertTrue("Copy jar file to HDFS failed.", fs.exists(new Path(jarFileOnHDFS))); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);