congrats Icon
New! Announcing Tabnine Chat Beta
Learn More
Tabnine Logo
HdfsServerConstants$StartupOption.getName
Code IndexAdd Tabnine to your IDE (free)

How to use
getName
method
in
org.apache.hadoop.hdfs.server.common.HdfsServerConstants$StartupOption

Best Java code snippets using org.apache.hadoop.hdfs.server.common.HdfsServerConstants$StartupOption.getName (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
@VisibleForTesting
static boolean parseArguments(String args[], Configuration conf) {
 StartupOption startOpt = StartupOption.REGULAR;
 int i = 0;
 if (args != null && args.length != 0) {
  String cmd = args[i++];
  if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
   LOG.error("-r, --rack arguments are not supported anymore. RackID " +
     "resolution is handled by the NameNode.");
   return false;
  } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
   startOpt = StartupOption.ROLLBACK;
  } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
   startOpt = StartupOption.REGULAR;
  } else {
   return false;
  }
 }
 setStartupOption(conf, startOpt);
 return (args == null || i == args.length);    // Fail if more than one cmd specified!
}
origin: org.apache.hadoop/hadoop-hdfs

+ "\" option if a rolling upgrade is already started;"
+ " or restart NameNode with the \""
+ StartupOption.UPGRADE.getName() + "\" option to start"
+ " a new upgrade.");
origin: org.apache.hadoop/hadoop-hdfs

for(int i=0; i < argsLen; i++) {
 String cmd = args[i];
 if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
  startOpt = StartupOption.FORMAT;
  for (i = i + 1; i < argsLen; i++) {
   if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
    i++;
    if (i >= argsLen) {
       + StartupOption.CLUSTERID.getName() + " flag");
     return null;
      clusterId.equalsIgnoreCase(StartupOption.FORCE.getName()) ||
      clusterId.equalsIgnoreCase(
        StartupOption.NONINTERACTIVE.getName())) {
     LOG.error("Must specify a valid cluster ID after the "
       + StartupOption.CLUSTERID.getName() + " flag");
     return null;
   if (args[i].equalsIgnoreCase(StartupOption.FORCE.getName())) {
    startOpt.setForceFormat(true);
   if (args[i].equalsIgnoreCase(StartupOption.NONINTERACTIVE.getName())) {
    startOpt.setInteractiveFormat(false);
 } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
  startOpt = StartupOption.GENCLUSTERID;
 } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
  startOpt = StartupOption.REGULAR;
origin: io.prestosql.hadoop/hadoop-apache

for(int i=0; i < argsLen; i++) {
 String cmd = args[i];
 if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
  startOpt = StartupOption.FORMAT;
  for (i = i + 1; i < argsLen; i++) {
   if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
    i++;
    if (i >= argsLen) {
       + StartupOption.CLUSTERID.getName() + " flag");
     return null;
      clusterId.equalsIgnoreCase(StartupOption.FORCE.getName()) ||
      clusterId.equalsIgnoreCase(
        StartupOption.NONINTERACTIVE.getName())) {
     LOG.error("Must specify a valid cluster ID after the "
       + StartupOption.CLUSTERID.getName() + " flag");
     return null;
   if (args[i].equalsIgnoreCase(StartupOption.FORCE.getName())) {
    startOpt.setForceFormat(true);
   if (args[i].equalsIgnoreCase(StartupOption.NONINTERACTIVE.getName())) {
    startOpt.setInteractiveFormat(false);
 } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
  startOpt = StartupOption.GENCLUSTERID;
 } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
  startOpt = StartupOption.REGULAR;
origin: ch.cern.hadoop/hadoop-hdfs

for(int i=0; i < argsLen; i++) {
 String cmd = args[i];
 if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
  startOpt = StartupOption.FORMAT;
  for (i = i + 1; i < argsLen; i++) {
   if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
    i++;
    if (i >= argsLen) {
       + StartupOption.CLUSTERID.getName() + " flag");
     return null;
      clusterId.equalsIgnoreCase(StartupOption.FORCE.getName()) ||
      clusterId.equalsIgnoreCase(
        StartupOption.NONINTERACTIVE.getName())) {
     LOG.error("Must specify a valid cluster ID after the "
       + StartupOption.CLUSTERID.getName() + " flag");
     return null;
   if (args[i].equalsIgnoreCase(StartupOption.FORCE.getName())) {
    startOpt.setForceFormat(true);
   if (args[i].equalsIgnoreCase(StartupOption.NONINTERACTIVE.getName())) {
    startOpt.setInteractiveFormat(false);
 } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
  startOpt = StartupOption.GENCLUSTERID;
 } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
  startOpt = StartupOption.REGULAR;
origin: ch.cern.hadoop/hadoop-hdfs

+ "\" option if a rolling upgrade is already started;"
+ " or restart NameNode with the \""
+ StartupOption.UPGRADE.getName() + "\" option to start"
+ " a new upgrade.");
origin: io.prestosql.hadoop/hadoop-apache

+ "\" option if a rolling upgrade is already started;"
+ " or restart NameNode with the \""
+ StartupOption.UPGRADE.getName() + "\" option to start"
+ " a new upgrade.");
origin: ch.cern.hadoop/hadoop-hdfs

"being upgraded but this NN has not been upgraded yet. You " +
"should restart this NameNode with the '" +
StartupOption.BOOTSTRAPSTANDBY.getName() + "' option to bring " +
"this NN in sync with the other.");
origin: io.prestosql.hadoop/hadoop-apache

"being upgraded but this NN has not been upgraded yet. You " +
"should restart this NameNode with the '" +
StartupOption.BOOTSTRAPSTANDBY.getName() + "' option to bring " +
"this NN in sync with the other.");
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
@VisibleForTesting
static boolean parseArguments(String args[], Configuration conf) {
 StartupOption startOpt = StartupOption.REGULAR;
 int i = 0;
 if (args != null && args.length != 0) {
  String cmd = args[i++];
  if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
   LOG.error("-r, --rack arguments are not supported anymore. RackID " +
     "resolution is handled by the NameNode.");
   return false;
  } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
   startOpt = StartupOption.ROLLBACK;
  } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
   startOpt = StartupOption.REGULAR;
  } else {
   return false;
  }
 }
 setStartupOption(conf, startOpt);
 return (args == null || i == args.length);    // Fail if more than one cmd specified!
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Parse and verify command line arguments and set configuration parameters.
 *
 * @return false if passed argements are incorrect
 */
@VisibleForTesting
static boolean parseArguments(String args[], Configuration conf) {
 StartupOption startOpt = StartupOption.REGULAR;
 int i = 0;
 if (args != null && args.length != 0) {
  String cmd = args[i++];
  if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
   LOG.error("-r, --rack arguments are not supported anymore. RackID " +
     "resolution is handled by the NameNode.");
   return false;
  } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
   startOpt = StartupOption.ROLLBACK;
  } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
   startOpt = StartupOption.REGULAR;
  } else {
   return false;
  }
 }
 setStartupOption(conf, startOpt);
 return (args == null || i == args.length);    // Fail if more than one cmd specified!
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Start the BackupNode
 */
public BackupNode startBackupNode(Configuration conf) throws IOException {
 // Set up testing environment directories
 hdfsDir = new File(TEST_DATA_DIR, "backupNode");
 if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
  throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
 }
 File currDir = new File(hdfsDir, "name2");
 File currDir2 = new File(currDir, "current");
 File currDir3 = new File(currDir, "image");
 
 assertTrue(currDir.mkdirs());
 assertTrue(currDir2.mkdirs());
 assertTrue(currDir3.mkdirs());
 
 conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
   fileAsURI(new File(hdfsDir, "name2")).toString());
 conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
   "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
 
 // Start BackupNode
 String[] args = new String [] { StartupOption.BACKUP.getName() };
 BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);
 return bu;
}

origin: ch.cern.hadoop/hadoop-hdfs

BackupNode startBackupNode(Configuration conf,
              StartupOption startupOpt,
              int idx) throws IOException {
 Configuration c = new HdfsConfiguration(conf);
 String dirs = getBackupNodeDir(startupOpt, idx);
 c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
 c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
   "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
 c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
   "127.0.0.1:0");
 c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
     "127.0.0.1:0");
 BackupNode bn = (BackupNode)NameNode.createNameNode(
   new String[]{startupOpt.getName()}, c);
 assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
 assertTrue(bn.getRole() + " must be in StandbyState",
       bn.getNamesystem().getHAState()
        .equalsIgnoreCase(HAServiceState.STANDBY.name()));
 return bn;
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Stores the information related to a namenode in the cluster
 */
public static class NameNodeInfo {
 final NameNode nameNode;
 final Configuration conf;
 final String nameserviceId;
 final String nnId;
 StartupOption startOpt;
 NameNodeInfo(NameNode nn, String nameserviceId, String nnId,
   StartupOption startOpt, Configuration conf) {
  this.nameNode = nn;
  this.nameserviceId = nameserviceId;
  this.nnId = nnId;
  this.startOpt = startOpt;
  this.conf = conf;
 }
 
 public void setStartOpt(StartupOption startOpt) {
  this.startOpt = startOpt;
 }
}

origin: ch.cern.hadoop/hadoop-hdfs

static String getBackupNodeDir(StartupOption t, int idx) {
 return BASE_DIR + "name" + t.getName() + idx + "/";
}
origin: io.prestosql.hadoop/hadoop-apache

public String getOptionString() {
 return StartupOption.ROLLINGUPGRADE.getName() + " "
   + StringUtils.toLowerCase(name());
}
origin: ch.cern.hadoop/hadoop-hdfs

public String getOptionString() {
 return StartupOption.ROLLINGUPGRADE.getName() + " "
   + StringUtils.toLowerCase(name());
}
origin: ch.cern.hadoop/hadoop-hdfs

private static String[] createArgs(StartupOption operation) {
 if (operation == StartupOption.ROLLINGUPGRADE) {
  return new String[]{operation.getName(),
    operation.getRollingUpgradeStartupOption().name()};
 }
 String[] args = (operation == null ||
   operation == StartupOption.FORMAT ||
   operation == StartupOption.REGULAR) ?
     new String[] {} : new String[] {operation.getName()};
 return args;
}

origin: org.apache.hadoop/hadoop-hdfs

"being upgraded but this NN has not been upgraded yet. You " +
"should restart this NameNode with the '" +
StartupOption.BOOTSTRAPSTANDBY.getName() + "' option to bring " +
"this NN in sync with the other.");
origin: org.apache.hadoop/hadoop-hdfs

public String getOptionString() {
 return StartupOption.ROLLINGUPGRADE.getName() + " "
   + StringUtils.toLowerCase(name());
}
org.apache.hadoop.hdfs.server.commonHdfsServerConstants$StartupOptiongetName

Popular methods of HdfsServerConstants$StartupOption

  • getClusterId
  • getEnum
  • getRollingUpgradeStartupOption
  • setClusterId
  • setForceFormat
  • setInteractiveFormat
  • createRecoveryContext
  • getForce
  • getForceFormat
  • getInteractiveFormat
  • name
  • setForce
  • name,
  • setForce,
  • setRollingUpgradeStartupOption,
  • toNodeRole,
  • toString,
  • valueOf

Popular in Java

  • Updating database using SQL prepared statement
  • putExtra (Intent)
  • onCreateOptionsMenu (Activity)
  • scheduleAtFixedRate (Timer)
  • BorderLayout (java.awt)
    A border layout lays out a container, arranging and resizing its components to fit in five regions:
  • Container (java.awt)
    A generic Abstract Window Toolkit(AWT) container object is a component that can contain other AWT co
  • NoSuchElementException (java.util)
    Thrown when trying to retrieve an element past the end of an Enumeration or Iterator.
  • XPath (javax.xml.xpath)
    XPath provides access to the XPath evaluation environment and expressions. Evaluation of XPath Expr
  • Options (org.apache.commons.cli)
    Main entry-point into the library. Options represents a collection of Option objects, which describ
  • Logger (org.apache.log4j)
    This is the central class in the log4j package. Most logging operations, except configuration, are d
  • Best IntelliJ plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now