/** * Given the configuration for this node, return a list of configurations * for the other nodes in an HA setup. * * @param myConf the configuration of this node * @return a list of configuration of other nodes in an HA setup */ public static List<Configuration> getConfForOtherNodes( Configuration myConf) { String nsId = DFSUtil.getNamenodeNameServiceId(myConf); List<String> otherNodes = getNameNodeIdOfOtherNodes(myConf, nsId); // Look up the address of the other NNs List<Configuration> confs = new ArrayList<Configuration>(otherNodes.size()); myConf = new Configuration(myConf); // unset independent properties for (String idpKey : HA_SPECIAL_INDEPENDENT_KEYS) { myConf.unset(idpKey); } for (String nn : otherNodes) { Configuration confForOtherNode = new Configuration(myConf); NameNode.initializeGenericKeys(confForOtherNode, nsId, nn); confs.add(confForOtherNode); } return confs; }
/** * @return a collection of all configured NN Kerberos principals. */ public static Set<String> getAllNnPrincipals(Configuration conf) throws IOException { Set<String> principals = new HashSet<String>(); for (String nsId : DFSUtilClient.getNameServiceIds(conf)) { if (HAUtil.isHAEnabled(conf, nsId)) { for (String nnId : DFSUtilClient.getNameNodeIds(conf, nsId)) { Configuration confForNn = new Configuration(conf); NameNode.initializeGenericKeys(confForNn, nsId, nnId); String principal = SecurityUtil.getServerPrincipal(confForNn .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY), DFSUtilClient.getNNAddress(confForNn).getHostName()); principals.add(principal); } } else { Configuration confForNn = new Configuration(conf); NameNode.initializeGenericKeys(confForNn, nsId, null); String principal = SecurityUtil.getServerPrincipal(confForNn .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY), DFSUtilClient.getNNAddress(confForNn).getHostName()); principals.add(principal); } } return principals; }
public SecondaryNameNode(Configuration conf, CommandLineOpts commandLineOpts) throws IOException { try { String nsId = DFSUtil.getSecondaryNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nsId)) { throw new IOException( "Cannot use SecondaryNameNode in an HA cluster." + " The Standby Namenode will perform checkpointing."); } NameNode.initializeGenericKeys(conf, nsId, null); initialize(conf, commandLineOpts); } catch (IOException e) { shutdown(); throw e; } catch (HadoopIllegalArgumentException e) { shutdown(); throw e; } }
@VisibleForTesting public static boolean doRollback(Configuration conf, boolean isConfirmationNeeded) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf)); System.err.print( "\"rollBack\" will remove the current state of the file system,\n" + "returning you to the state prior to initiating your recent.\n" + "upgrade. This action is permanent and cannot be undone. If you\n" + "are performing a rollback in an HA environment, you should be\n" + "certain that no NameNode process is running on any host."); if (isConfirmationNeeded) { if (!confirmPrompt("Roll back file system state?")) { System.err.println("Rollback aborted."); return true; } } nsys.getFSImage().doRollback(nsys); return false; }
NameNode.initializeGenericKeys(conf, nsId, nnId);
/** * Verify that configured directories exist, then print the metadata versions * of the software and the image. * * @param conf configuration to use * @throws IOException */ private static boolean printMetadataVersion(Configuration conf) throws IOException { final String nsId = DFSUtil.getNamenodeNameServiceId(conf); final String namenodeId = HAUtil.getNameNodeId(conf, nsId); NameNode.initializeGenericKeys(conf, nsId, namenodeId); final FSImage fsImage = new FSImage(conf); final FSNamesystem fs = new FSNamesystem(conf, fsImage, false); return fsImage.recoverTransitionRead( StartupOption.METADATAVERSION, fs, null); }
String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); if (startOpt.getForce() < MetaRecoveryContext.FORCE_ALL) { if (!confirmPrompt("You have selected Metadata Recovery mode. " +
NameNode.initializeGenericKeys(targetConf, nsId, nnId);
String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); checkAllowFormat(conf);
public static DFSZKFailoverController create(Configuration conf) { Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf); String nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(localNNConf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } String nnId = HAUtil.getNameNodeId(localNNConf, nsId); if (nnId == null) { String msg = "Could not get the namenode ID of this node. " + "You may run zkfc on the node other than namenode."; throw new HadoopIllegalArgumentException(msg); } NameNode.initializeGenericKeys(localNNConf, nsId, nnId); DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS); NNHAServiceTarget localTarget = new NNHAServiceTarget( localNNConf, nsId, nnId); return new DFSZKFailoverController(localNNConf, localTarget); }
String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId);
this.haContext = createHAContext(); try { initializeGenericKeys(conf, nsId, namenodeId); initialize(getConf()); try {
/** * Given the configuration for this node, return a Configuration object for * the other node in an HA setup. * * @param myConf the configuration of this node * @return the configuration of the other node in an HA setup */ public static Configuration getConfForOtherNode( Configuration myConf) { String nsId = DFSUtil.getNamenodeNameServiceId(myConf); String otherNn = getNameNodeIdOfOtherNode(myConf, nsId); // Look up the address of the active NN. Configuration confForOtherNode = new Configuration(myConf); NameNode.initializeGenericKeys(confForOtherNode, nsId, otherNn); return confForOtherNode; }
/** * Given the configuration for this node, return a Configuration object for * the other node in an HA setup. * * @param myConf the configuration of this node * @return the configuration of the other node in an HA setup */ public static Configuration getConfForOtherNode( Configuration myConf) { String nsId = DFSUtil.getNamenodeNameServiceId(myConf); String otherNn = getNameNodeIdOfOtherNode(myConf, nsId); // Look up the address of the active NN. Configuration confForOtherNode = new Configuration(myConf); NameNode.initializeGenericKeys(confForOtherNode, nsId, otherNn); return confForOtherNode; }
public SecondaryNameNode(Configuration conf, CommandLineOpts commandLineOpts) throws IOException { try { String nsId = DFSUtil.getSecondaryNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nsId)) { throw new IOException( "Cannot use SecondaryNameNode in an HA cluster." + " The Standby Namenode will perform checkpointing."); } NameNode.initializeGenericKeys(conf, nsId, null); initialize(conf, commandLineOpts); } catch (IOException e) { shutdown(); throw e; } catch (HadoopIllegalArgumentException e) { shutdown(); throw e; } }
/** * Verify that configured directories exist, then print the metadata versions * of the software and the image. * * @param conf configuration to use * @throws IOException */ private static boolean printMetadataVersion(Configuration conf) throws IOException { final String nsId = DFSUtil.getNamenodeNameServiceId(conf); final String namenodeId = HAUtil.getNameNodeId(conf, nsId); NameNode.initializeGenericKeys(conf, nsId, namenodeId); final FSImage fsImage = new FSImage(conf); final FSNamesystem fs = new FSNamesystem(conf, fsImage, false); return fsImage.recoverTransitionRead( StartupOption.METADATAVERSION, fs, null); }
/** * Verify that configured directories exist, then print the metadata versions * of the software and the image. * * @param conf configuration to use * @throws IOException */ private static boolean printMetadataVersion(Configuration conf) throws IOException { final String nsId = DFSUtil.getNamenodeNameServiceId(conf); final String namenodeId = HAUtil.getNameNodeId(conf, nsId); NameNode.initializeGenericKeys(conf, nsId, namenodeId); final FSImage fsImage = new FSImage(conf); final FSNamesystem fs = new FSNamesystem(conf, fsImage, false); return fsImage.recoverTransitionRead( StartupOption.METADATAVERSION, fs, null); }
@Test public void testGetOtherNNHttpAddress() throws IOException { // Use non-local addresses to avoid host address matching Configuration conf = getHAConf("ns1", "1.2.3.1", "1.2.3.2"); conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "ns1"); // This is done by the NN before the StandbyCheckpointer is created NameNode.initializeGenericKeys(conf, "ns1", "nn1"); // Since we didn't configure the HTTP address, and the default is // 0.0.0.0, it should substitute the address from the RPC configuration // above. StandbyCheckpointer checkpointer = new StandbyCheckpointer(conf, fsn); assertEquals(new URL("http", "1.2.3.2", DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, ""), checkpointer.getActiveNNAddress()); }
public static DFSZKFailoverController create(Configuration conf) { Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf); String nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(localNNConf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } String nnId = HAUtil.getNameNodeId(localNNConf, nsId); if (nnId == null) { String msg = "Could not get the namenode ID of this node. " + "You may run zkfc on the node other than namenode."; throw new HadoopIllegalArgumentException(msg); } NameNode.initializeGenericKeys(localNNConf, nsId, nnId); DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS); NNHAServiceTarget localTarget = new NNHAServiceTarget( localNNConf, nsId, nnId); return new DFSZKFailoverController(localNNConf, localTarget); }
/** * Ensure that fs.defaultFS is set in the configuration even if neither HA nor * Federation is enabled. * * Regression test for HDFS-3351. */ @Test public void testConfModificationNoFederationOrHa() { final HdfsConfiguration conf = new HdfsConfiguration(); String nsId = null; String nnId = null; conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1234"); assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY))); NameNode.initializeGenericKeys(conf, nsId, nnId); assertEquals("hdfs://localhost:1234", conf.get(FS_DEFAULT_NAME_KEY)); }