Tabnine Logo
ExitStatus
Code IndexAdd Tabnine to your IDE (free)

How to use
ExitStatus
in
org.apache.hadoop.hdfs.server.balancer

Best Java code snippets using org.apache.hadoop.hdfs.server.balancer.ExitStatus (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

 @Override
 public int run(String[] args) throws Exception {
  final long startTime = Time.monotonicNow();
  final Configuration conf = getConf();
  try {
   final Map<URI, List<Path>> map = getNameNodePathsToMove(conf, args);
   return Mover.run(map, conf);
  } catch (IOException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.IO_EXCEPTION.getExitCode();
  } catch (InterruptedException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.INTERRUPTED.getExitCode();
  } catch (ParseException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.ILLEGAL_ARGUMENTS.getExitCode();
  } catch (IllegalArgumentException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.ILLEGAL_ARGUMENTS.getExitCode();
  } finally {
   System.out.format("%-24s ", DateFormat.getDateTimeInstance().format(new Date()));
   System.out.println("Mover took " + StringUtils.formatTime(Time.monotonicNow()-startTime));
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Parse arguments and then run Balancer.
 * 
 * @param args command specific arguments.
 * @return exit code. 0 indicates success, non-zero indicates failure.
 */
@Override
public int run(String[] args) {
 final long startTime = Time.monotonicNow();
 final Configuration conf = getConf();
 try {
  checkReplicationPolicyCompatibility(conf);
  final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
  return Balancer.run(namenodes, parse(args), conf);
 } catch (IOException e) {
  System.out.println(e + ".  Exiting ...");
  return ExitStatus.IO_EXCEPTION.getExitCode();
 } catch (InterruptedException e) {
  System.out.println(e + ".  Exiting ...");
  return ExitStatus.INTERRUPTED.getExitCode();
 } finally {
  System.out.format("%-24s ",
    DateFormat.getDateTimeInstance().format(new Date()));
  System.out.println("Balancing took "
    + time2Str(Time.monotonicNow() - startTime));
 }
}
origin: org.apache.hadoop/hadoop-hdfs

    return r.getExitCode();
 return ExitStatus.SUCCESS.getExitCode();
} finally {
 for (NameNodeConnector nnc : connectors) {
origin: org.apache.hadoop/hadoop-hdfs

    } else if (r.exitStatus != ExitStatus.SUCCESS) {
     return r.exitStatus.getExitCode();
return ExitStatus.SUCCESS.getExitCode();
origin: ch.cern.hadoop/hadoop-hdfs

 @Override
 public int run(String[] args) throws Exception {
  final long startTime = Time.monotonicNow();
  final Configuration conf = getConf();
  try {
   final Map<URI, List<Path>> map = getNameNodePathsToMove(conf, args);
   return Mover.run(map, conf);
  } catch (IOException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.IO_EXCEPTION.getExitCode();
  } catch (InterruptedException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.INTERRUPTED.getExitCode();
  } catch (ParseException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.ILLEGAL_ARGUMENTS.getExitCode();
  } catch (IllegalArgumentException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.ILLEGAL_ARGUMENTS.getExitCode();
  } finally {
   System.out.format("%-24s ", DateFormat.getDateTimeInstance().format(new Date()));
   System.out.println("Mover took " + StringUtils.formatTime(Time.monotonicNow()-startTime));
  }
 }
}
origin: io.prestosql.hadoop/hadoop-apache

 @Override
 public int run(String[] args) throws Exception {
  final long startTime = Time.monotonicNow();
  final Configuration conf = getConf();
  try {
   final Map<URI, List<Path>> map = getNameNodePathsToMove(conf, args);
   return Mover.run(map, conf);
  } catch (IOException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.IO_EXCEPTION.getExitCode();
  } catch (InterruptedException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.INTERRUPTED.getExitCode();
  } catch (ParseException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.ILLEGAL_ARGUMENTS.getExitCode();
  } catch (IllegalArgumentException e) {
   System.out.println(e + ".  Exiting ...");
   return ExitStatus.ILLEGAL_ARGUMENTS.getExitCode();
  } finally {
   System.out.format("%-24s ", DateFormat.getDateTimeInstance().format(new Date()));
   System.out.println("Mover took " + StringUtils.formatTime(Time.monotonicNow()-startTime));
  }
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Parse arguments and then run Balancer.
 * 
 * @param args command specific arguments.
 * @return exit code. 0 indicates success, non-zero indicates failure.
 */
@Override
public int run(String[] args) {
 final long startTime = Time.monotonicNow();
 final Configuration conf = getConf();
 try {
  checkReplicationPolicyCompatibility(conf);
  final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
  return Balancer.run(namenodes, parse(args), conf);
 } catch (IOException e) {
  System.out.println(e + ".  Exiting ...");
  return ExitStatus.IO_EXCEPTION.getExitCode();
 } catch (InterruptedException e) {
  System.out.println(e + ".  Exiting ...");
  return ExitStatus.INTERRUPTED.getExitCode();
 } finally {
  System.out.format("%-24s ",
    DateFormat.getDateTimeInstance().format(new Date()));
  System.out.println("Balancing took "
    + time2Str(Time.monotonicNow() - startTime));
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

   } else if (r != ExitStatus.IN_PROGRESS) {
    return r.getExitCode();
 return ExitStatus.SUCCESS.getExitCode();
} finally {
 for (NameNodeConnector nnc : connectors) {
origin: io.prestosql.hadoop/hadoop-apache

   } else if (r != ExitStatus.IN_PROGRESS) {
    return r.getExitCode();
 return ExitStatus.SUCCESS.getExitCode();
} finally {
 for (NameNodeConnector nnc : connectors) {
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Parse arguments and then run Balancer.
 * 
 * @param args command specific arguments.
 * @return exit code. 0 indicates success, non-zero indicates failure.
 */
@Override
public int run(String[] args) {
 final long startTime = Time.monotonicNow();
 final Configuration conf = getConf();
 try {
  checkReplicationPolicyCompatibility(conf);
  final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
  return Balancer.run(namenodes, parse(args), conf);
 } catch (IOException e) {
  System.out.println(e + ".  Exiting ...");
  return ExitStatus.IO_EXCEPTION.getExitCode();
 } catch (InterruptedException e) {
  System.out.println(e + ".  Exiting ...");
  return ExitStatus.INTERRUPTED.getExitCode();
 } finally {
  System.out.format("%-24s ",
    DateFormat.getDateTimeInstance().format(new Date()));
  System.out.println("Balancing took "
    + time2Str(Time.monotonicNow() - startTime));
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

   } else if (r.exitStatus != ExitStatus.SUCCESS) {
    return r.exitStatus.getExitCode();
return ExitStatus.SUCCESS.getExitCode();
origin: io.prestosql.hadoop/hadoop-apache

   } else if (r.exitStatus != ExitStatus.SUCCESS) {
    return r.exitStatus.getExitCode();
return ExitStatus.SUCCESS.getExitCode();
origin: ch.cern.hadoop/hadoop-hdfs

private void runMover() throws Exception {
 Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
 Map<URI, List<Path>> nnMap = Maps.newHashMap();
 for (URI nn : namenodes) {
  nnMap.put(nn, null);
 }
 int result = Mover.run(nnMap, conf);
 Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
}
origin: ch.cern.hadoop/hadoop-hdfs

private void runBalancerCanFinish(Configuration conf,
  long totalUsedSpace, long totalCapacity) throws Exception {
 waitForHeartBeat(totalUsedSpace, totalCapacity);
 // start rebalancing
 Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
 final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
 Assert.assertTrue(r == ExitStatus.SUCCESS.getExitCode() ||
   (r == ExitStatus.NO_MOVE_PROGRESS.getExitCode()));
 waitForHeartBeat(totalUsedSpace, totalCapacity);
 LOG.info("Rebalancing with default factor.");
}
origin: ch.cern.hadoop/hadoop-hdfs

private void runBalancer(Configuration conf,
  long totalUsedSpace, long totalCapacity, Balancer.Parameters p,
  int excludedNodes) throws Exception {
 waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
 // start rebalancing
 Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
 final int r = runBalancer(namenodes, p, conf);
 if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 
   DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) {
  assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
  return;
 } else {
  assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
 }
 waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
 LOG.info("  .");
 waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, excludedNodes);
}
origin: ch.cern.hadoop/hadoop-hdfs

   } else if (r.exitStatus != ExitStatus.SUCCESS) {
    return r.exitStatus.getExitCode();
   } else {
    if (iteration > 0) {
return ExitStatus.SUCCESS.getExitCode();
origin: ch.cern.hadoop/hadoop-hdfs

private void runBalancer(Configuration conf,
  long totalUsedSpace, long totalCapacity) throws Exception {
 waitForHeartBeat(totalUsedSpace, totalCapacity);
 // start rebalancing
 Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
 final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
 assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
 waitForHeartBeat(totalUsedSpace, totalCapacity);
 LOG.info("Rebalancing with default factor.");
 waitForBalancer(totalUsedSpace, totalCapacity);
}

origin: ch.cern.hadoop/hadoop-hdfs

  "-p", "/foo/bar", "/foo2");
int result = Mover.run(map, test.conf);
Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), result);
origin: ch.cern.hadoop/hadoop-hdfs

Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
origin: ch.cern.hadoop/hadoop-hdfs

 int exitCode = tool.run(args); // start balancing
 assertEquals("Exit status code mismatches",
   ExitStatus.IO_EXCEPTION.getExitCode(), exitCode);
   ExitStatus.SUCCESS.getExitCode(), exitCode);
} finally {
 cluster.shutdown();
org.apache.hadoop.hdfs.server.balancerExitStatus

Javadoc

Exit status - The values associated with each exit status is directly mapped to the process's exit code in command line.

Most used methods

  • getExitCode

Popular in Java

  • Start an intent from android
  • findViewById (Activity)
  • requestLocationUpdates (LocationManager)
  • addToBackStack (FragmentTransaction)
  • FileReader (java.io)
    A specialized Reader that reads from a file in the file system. All read requests made by calling me
  • HashMap (java.util)
    HashMap is an implementation of Map. All optional operations are supported.All elements are permitte
  • HashSet (java.util)
    HashSet is an implementation of a Set. All optional operations (adding and removing) are supported.
  • TimerTask (java.util)
    The TimerTask class represents a task to run at a specified time. The task may be run once or repeat
  • Collectors (java.util.stream)
  • JComboBox (javax.swing)
  • Top PhpStorm plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now