/** Instantiate a single datanode object, along with its secure resources. * This must be run by invoking{@link DataNode#runDatanodeDaemon()} * subsequently. */ public static DataNode instantiateDataNode(String args [], Configuration conf, SecureResources resources) throws IOException { if (conf == null) conf = new HdfsConfiguration(); if (args != null) { // parse generic hadoop options GenericOptionsParser hParser = new GenericOptionsParser(conf, args); args = hParser.getRemainingArgs(); } if (!parseArguments(args, conf)) { printUsage(System.err); return null; } Collection<StorageLocation> dataLocations = getStorageLocations(conf); UserGroupInformation.setConfiguration(conf); SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY, DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, getHostName(conf)); return makeInstance(dataLocations, conf, resources); }
/** Instantiate a single datanode object. This must be run by invoking * {@link DataNode#runDatanodeDaemon(DataNode)} subsequently. */ public static DataNode instantiateDataNode(String args[], Configuration conf) throws IOException { if (conf == null) conf = new Configuration(); if (!parseArguments(args, conf)) { printUsage(); return null; } if (conf.get("dfs.network.script") != null) { LOG.error("This configuration for rack identification is not supported" + " anymore. RackID resolution is handled by the NameNode."); System.exit(-1); } String[] dataDirs = conf.getStrings("dfs.data.dir"); dnThreadName = "DataNode: [" + StringUtils.arrayToString(dataDirs) + "]"; return makeInstance(dataDirs, conf); }
/** Instantiate a single datanode object. This must be run by invoking * {@link DataNode#runDatanodeDaemon(DataNode)} subsequently. */ public static DataNode instantiateDataNode(String args[], Configuration conf) throws IOException { if (conf == null) conf = new Configuration(); if (!parseArguments(args, conf)) { printUsage(); return null; } if (conf.get("dfs.network.script") != null) { LOG.error("This configuration for rack identification is not supported" + " anymore. RackID resolution is handled by the NameNode."); System.exit(-1); } String[] dataDirs = conf.getStrings("dfs.data.dir"); dnThreadName = "DataNode: [" + StringUtils.arrayToString(dataDirs) + "]"; return makeInstance(dataDirs, conf); }
/** Instantiate a single datanode object. This must be run by invoking * {@link DataNode#runDatanodeDaemon(DataNode)} subsequently. * @param resources Secure resources needed to run under Kerberos */ public static DataNode instantiateDataNode(String args[], Configuration conf, SecureResources resources) throws IOException { if (conf == null) conf = new Configuration(); if (!parseArguments(args, conf)) { printUsage(); System.exit(-2); } if (conf.get("dfs.network.script") != null) { LOG.error("This configuration for rack identification is not supported" + " anymore. RackID resolution is handled by the NameNode."); System.exit(-1); } String[] dataDirs = conf.getStrings(DATA_DIR_KEY); dnThreadName = "DataNode: [" + StringUtils.arrayToString(dataDirs) + "]"; DefaultMetricsSystem.initialize("DataNode"); return makeInstance(dataDirs, conf, resources); }
/** * Process the given arg list as command line arguments to the DataNode * to make sure we get the expected result. If the expected result is * success then further validate that the parsed startup option is the * same as what was expected. * * @param expectSuccess * @param expectedOption * @param conf * @param arg */ private static void checkExpected(boolean expectSuccess, StartupOption expectedOption, Configuration conf, String ... arg) { String[] args = new String[arg.length]; int i = 0; for (String currentArg : arg) { args[i++] = currentArg; } boolean returnValue = DataNode.parseArguments(args, conf); StartupOption option = DataNode.getStartupOption(conf); assertThat(returnValue, is(expectSuccess)); if (expectSuccess) { assertThat(option, is(expectedOption)); } }
/** Instantiate a single datanode object, along with its secure resources. * This must be run by invoking{@link DataNode#runDatanodeDaemon()} * subsequently. */ public static DataNode instantiateDataNode(String args [], Configuration conf, SecureResources resources) throws IOException { if (conf == null) conf = new HdfsConfiguration(); if (args != null) { // parse generic hadoop options GenericOptionsParser hParser = new GenericOptionsParser(conf, args); args = hParser.getRemainingArgs(); } if (!parseArguments(args, conf)) { printUsage(System.err); return null; } Collection<StorageLocation> dataLocations = getStorageLocations(conf); UserGroupInformation.setConfiguration(conf); SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY, DFS_DATANODE_KERBEROS_PRINCIPAL_KEY); return makeInstance(dataLocations, conf, resources); }
/** Instantiate a single datanode object, along with its secure resources. * This must be run by invoking{@link DataNode#runDatanodeDaemon()} * subsequently. */ public static DataNode instantiateDataNode(String args [], Configuration conf, SecureResources resources) throws IOException { if (conf == null) conf = new HdfsConfiguration(); if (args != null) { // parse generic hadoop options GenericOptionsParser hParser = new GenericOptionsParser(conf, args); args = hParser.getRemainingArgs(); } if (!parseArguments(args, conf)) { printUsage(System.err); return null; } Collection<StorageLocation> dataLocations = getStorageLocations(conf); UserGroupInformation.setConfiguration(conf); SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY, DFS_DATANODE_KERBEROS_PRINCIPAL_KEY); return makeInstance(dataLocations, conf, resources); }