public static boolean isDefaultFs(DistributedFileSystem fs) { URI uri = fs.getUri(); String scheme = uri.getScheme(); if (scheme == null) return true; // Assume that relative URI resolves to default FS. URI defaultUri = FileSystem.getDefaultUri(fs.getConf()); if (!defaultUri.getScheme().equalsIgnoreCase(scheme)) return false; // Mismatch. String defaultAuthority = defaultUri.getAuthority(), authority = uri.getAuthority(); if (authority == null) return true; // Schemes match, no authority - assume default. if (defaultAuthority == null) return false; // TODO: What does this even mean? if (!defaultUri.getHost().equalsIgnoreCase(uri.getHost())) return false; // Mismatch. int defaultPort = defaultUri.getPort(), port = uri.getPort(); if (port == -1) return true; // No port, assume default. // Note - this makes assumptions that are DFS-specific; DFS::getDefaultPort is not visible. return (defaultPort == -1) ? (port == NameNode.DEFAULT_PORT) : (port == defaultPort); } }
URI uri = fs.getUri(); int port = uri.getPort(); if (port < 0) {
@Before public void setup() throws Exception { fs = dfsClusterRule.getDfscluster().getFileSystem(); hdfsURI = fs.getUri() + "/"; }
private void setupDFS() throws Exception { Configuration conf = UTIL.getConfiguration(); MiniDFSCluster dfs = UTIL.startMiniDFSCluster(3); CommonFSUtils.setWALRootDir(conf, new Path(conf.get("fs.defaultFS"), "/tmp/wal")); Path logDir = new Path(new Path(dfs.getFileSystem().getUri()), "/test-logs"); store = ProcedureTestingUtility.createWalStore(conf, logDir); store.registerListener(stopProcedureListener); store.start(8); store.recoverLease(); }
out.writeBytes(RECORD_SCHEMA); out.close(); String onHDFS = miniDfs.getFileSystem().getUri() + "/path/to/schema/schema.avsc";
DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); dfs.metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + dfs.getUri());
URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) {
URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
URI dfsUri = dfs.getUri();
URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
URI dfsUri = dfs.getUri(); boolean isHaAndLogicalUri = HAUtilClient.isLogicalUri(dfsConf, dfsUri); if (isHaAndLogicalUri) {
URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
String[] racks = null; String[] hosts = null; miniMR = new MiniMRCluster(numTaskTrackers, miniDFS.getFileSystem().getUri().toString(), numTaskTrackerDirectories, racks, hosts, new JobConf(conf)); JobConf jobConf = miniMR.createJobConf(new JobConf(conf)); System.out.println("-------" + jobConf.get("fs.defaultFS")); System.out.println("-------" + miniDFS.getFileSystem().getUri().toString()); System.setProperty("mapred.job.tracker", jobConf.get("mapred.job.tracker")); } catch (IOException e) {
@Before public void setup() throws Exception { Configuration hdfsConf = new HdfsConfiguration(); hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name"). getAbsolutePath(); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir); hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir); hdfsConf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(15).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy(ecPolicy.getName()); fs.setErasureCodingPolicy(new Path("/"), ecPolicy.getName()); cluster.waitActive(); conf = new Configuration(); submitDir = new Path("/"); testFile = new Path("/testfile"); DFSTestUtil.writeFile(fs, testFile, StripedFileTestUtil.generateBytes(BLOCKSIZE)); conf.set(FileInputFormat.INPUT_DIR, fs.getUri().toString() + testFile.toString()); }
@Override public Path doCall(final Path p) throws IOException { HdfsFileStatus fi = dfs.getFileLinkInfo(getPathName(p)); if (fi != null) { return fi.makeQualified(getUri(), p).getSymlink(); } else { throw new FileNotFoundException("File does not exist: " + p); } } @Override
private void setupDFS() throws Exception { MiniDFSCluster dfs = UTIL.startMiniDFSCluster(3); Path logDir = new Path(new Path(dfs.getFileSystem().getUri()), "/test-logs"); store = ProcedureTestingUtility.createWalStore(UTIL.getConfiguration(), logDir); store.registerListener(stopProcedureListener); store.start(8); store.recoverLease(); }