/** * Interprets the passed string as a URI. In case of error it * assumes the specified string is a file. * * @param s the string to interpret * @return the resulting URI */ static URI stringAsURI(String s) throws IOException { URI u = null; // try to make a URI try { u = new URI(s); } catch (URISyntaxException e){ LOG.error("Syntax error in URI " + s + ". Please check hdfs configuration.", e); } // if URI is null or scheme is undefined, then assume it's file:// if(u == null || u.getScheme() == null){ LOG.info("Assuming 'file' scheme for path " + s + " in configuration."); u = fileAsURI(new File(s)); } return u; }
/** * Return the storage directory corresponding to the passed URI. * @param uri URI of a storage directory * @return The matching storage directory or null if none found */ public StorageDirectory getStorageDirectory(URI uri) { try { uri = Util.fileAsURI(new File(uri)); Iterator<StorageDirectory> it = dirIterator(); while (it.hasNext()) { StorageDirectory sd = it.next(); if (Util.fileAsURI(sd.getRoot()).equals(uri)) { return sd; } } } catch (IOException ioe) { LOG.warn("Error converting file to URI", ioe); } return null; }
/** * Return the list of locations being used for a specific purpose. * i.e. Image or edit log storage. * * @param dirType Purpose of locations requested. * @throws IOException */ Collection<URI> getDirectories(NameNodeDirType dirType) throws IOException { ArrayList<URI> list = new ArrayList<>(); Iterator<StorageDirectory> it = (dirType == null) ? dirIterator() : dirIterator(dirType); for ( ; it.hasNext();) { StorageDirectory sd = it.next(); try { list.add(Util.fileAsURI(sd.getRoot())); } catch (IOException e) { throw new IOException("Exception while processing " + "StorageDirectory " + sd.getRoot(), e); } } return list; }
private Configuration getConf() throws IOException { String baseDir = MiniDFSCluster.getBaseDirectory(); String nameDirs = fileAsURI(new File(baseDir, "name1")) + "," + fileAsURI(new File(baseDir, "name2")); Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDirs); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDirs); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); return conf; } }
/** * secnn-6 * checkpoint for edits and image is the same directory * @throws IOException */ public void testChkpointStartup2() throws IOException{ LOG.info("--starting checkpointStartup2 - same directory for checkpoint"); // different name dirs config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "edits")).toString()); // same checkpoint dirs config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString()); createCheckPoint(); corruptNameNodeFiles(); checkNameNodeFiles(); }
/** * Return the storage directory corresponding to the passed URI * @param uri URI of a storage directory * @return The matching storage directory or null if none found */ StorageDirectory getStorageDirectory(URI uri) { try { uri = Util.fileAsURI(new File(uri)); Iterator<StorageDirectory> it = dirIterator(); for (; it.hasNext(); ) { StorageDirectory sd = it.next(); if (Util.fileAsURI(sd.getRoot()).equals(uri)) { return sd; } } } catch (IOException ioe) { LOG.warn("Error converting file to URI", ioe); } return null; }
/** * seccn-8 * checkpoint for edits and image are different directories * @throws IOException */ public void testChkpointStartup1() throws IOException{ //setUpConfig(); LOG.info("--starting testStartup Recovery"); // different name dirs config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "edits")).toString()); // same checkpoint dirs config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt_edits")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString()); createCheckPoint(); corruptNameNodeFiles(); checkNameNodeFiles(); }
public static URI formatSharedEditsDir(File baseDir, int minNN, int maxNN) throws IOException { return fileAsURI(new File(baseDir, "shared-edits-" + minNN + "-through-" + maxNN)); }
protected void setUp() throws Exception { config = new HdfsConfiguration(); hdfsDir = new File(MiniDFSCluster.getBaseDirectory()); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath()); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "secondary")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, WILDCARD_HTTP_HOST + "0"); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); }
/** * Return the storage directory corresponding to the passed URI * @param uri URI of a storage directory * @return The matching storage directory or null if none found */ StorageDirectory getStorageDirectory(URI uri) { try { uri = Util.fileAsURI(new File(uri)); Iterator<StorageDirectory> it = dirIterator(); for (; it.hasNext(); ) { StorageDirectory sd = it.next(); if (Util.fileAsURI(sd.getRoot()).equals(uri)) { return sd; } } } catch (IOException ioe) { LOG.warn("Error converting file to URI", ioe); } return null; }
/** * Return the list of locations being used for a specific purpose. * i.e. Image or edit log storage. * * @param dirType Purpose of locations requested. * @throws IOException */ Collection<URI> getDirectories(NameNodeDirType dirType) throws IOException { ArrayList<URI> list = new ArrayList<URI>(); Iterator<StorageDirectory> it = (dirType == null) ? dirIterator() : dirIterator(dirType); for ( ;it.hasNext(); ) { StorageDirectory sd = it.next(); try { list.add(Util.fileAsURI(sd.getRoot())); } catch (IOException e) { throw new IOException("Exception while processing " + "StorageDirectory " + sd.getRoot(), e); } } return list; }
/** * Start the BackupNode */ public BackupNode startBackupNode(Configuration conf) throws IOException { String dataDir = getTestingDir(); // Set up testing environment directories hdfsDir = new File(dataDir, "backupNode"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } File currDir = new File(hdfsDir, "name2"); File currDir2 = new File(currDir, "current"); File currDir3 = new File(currDir, "image"); assertTrue(currDir.mkdirs()); assertTrue(currDir2.mkdirs()); assertTrue(currDir3.mkdirs()); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name2")).toString()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${dfs.name.dir}"); // Start BackupNode String[] args = new String [] { StartupOption.BACKUP.getName() }; BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf); return bu; }
/** * Start the namenode. */ public NameNode startNameNode(boolean withService) throws IOException { String dataDir = getTestingDir(); hdfsDir = new File(dataDir, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } config = new HdfsConfiguration(); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name1")).toString()); FileSystem.setDefaultUri(config, "hdfs://" + THIS_HOST); if (withService) { NameNode.setServiceAddress(config, THIS_HOST); } config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST); NameNode.format(config); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return NameNode.createNameNode(args, config); }
String makeDataNodeDirs(int dnIndex, StorageType[] storageTypes) throws IOException { StringBuilder sb = new StringBuilder(); assert storageTypes == null || storageTypes.length == storagesPerDatanode; for (int j = 0; j < storagesPerDatanode; ++j) { File dir = getInstanceStorageDir(dnIndex, j); dir.mkdirs(); if (!dir.isDirectory()) { throw new IOException("Mkdirs failed to create directory for DataNode " + dir); } sb.append((j > 0 ? "," : "") + "[" + (storageTypes == null ? StorageType.DEFAULT : storageTypes[j]) + "]" + fileAsURI(dir)); } return sb.toString(); }
private Configuration getConf() throws IOException { String baseDir = MiniDFSCluster.getBaseDirectory(); String nameDirs = fileAsURI(new File(baseDir, "name1")) + "," + fileAsURI(new File(baseDir, "name2")); Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDirs); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDirs); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); return conf; } }
/** * secnn-6 * checkpoint for edits and image is the same directory * @throws IOException */ @Test public void testChkpointStartup2() throws IOException{ LOG.info("--starting checkpointStartup2 - same directory for checkpoint"); // different name dirs config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "edits")).toString()); // same checkpoint dirs config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "chkpt")).toString()); createCheckPoint(1); corruptNameNodeFiles(); checkNameNodeFiles(); }
@Before public void setUp() throws IOException { conf = new Configuration(); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(MiniDFSCluster.getBaseDirectory(), "namenode")).toString()); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); fs = null; fsIsReady = true; }
@Before public void setUp() throws Exception { config = new HdfsConfiguration(); hdfsDir = new File(MiniDFSCluster.getBaseDirectory()); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath()); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath()); config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0"); config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, fileAsURI(new File(hdfsDir, "secondary")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, WILDCARD_HTTP_HOST + "0"); FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0"); }
/** * Sets up a MiniDFSCluster, configures it to create one edits file, * starts DelegationTokenSecretManager (to get security op codes) * * @param dfsDir DFS directory (where to setup MiniDFS cluster) */ public void startCluster(String dfsDir) throws IOException { // same as manageDfsDirs but only one edits file instead of two config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(new File(dfsDir, "name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, Util.fileAsURI(new File(dfsDir, "namesecondary1")).toString()); // blocksize for concat (file size must be multiple of blocksize) config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // for security to work (fake JobTracker user) config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); config.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); cluster = new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build(); cluster.waitClusterUp(); }
/** * Start the namenode. */ public NameNode startNameNode(boolean withService) throws IOException { hdfsDir = new File(TEST_DATA_DIR, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } config = new HdfsConfiguration(); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, fileAsURI(new File(hdfsDir, "name1")).toString()); FileSystem.setDefaultUri(config, "hdfs://" + THIS_HOST); if (withService) { NameNode.setServiceAddress(config, THIS_HOST); } config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST); DFSTestUtil.formatNameNode(config); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return NameNode.createNameNode(args, config); }