public static boolean isHadoop23() { String version = org.apache.hadoop.util.VersionInfo.getVersion(); if (version.matches("\\b0\\.23\\..+\\b")||version.matches("\\b2\\..*")||version.matches("\\b3\\..*")) return true; return false; } /**
/** * Checks if the Hadoop dependency is at least of the given version. */ public static boolean isMinHadoopVersion(int major, int minor) throws FlinkRuntimeException { String versionString = VersionInfo.getVersion(); String[] versionParts = versionString.split("\\."); if (versionParts.length < 2) { throw new FlinkRuntimeException( "Cannot determine version of Hadoop, unexpected version string: " + versionString); } int maj = Integer.parseInt(versionParts[0]); int min = Integer.parseInt(versionParts[1]); return maj > major || (maj == major && min >= minor); } }
public AppConfig() { init(); LOG.info("Using Hadoop version " + VersionInfo.getVersion()); }
/** * Return the "major" version of Hadoop currently on the classpath. * Releases in the 1.x and 2.x series are mapped to the appropriate * 0.x release series, e.g. 1.x is mapped to "0.20S" and 2.x * is mapped to "0.23". */ public static String getMajorVersion() { String vers = VersionInfo.getVersion(); String[] parts = vers.split("\\."); if (parts.length < 2) { throw new RuntimeException("Illegal Hadoop Version: " + vers + " (expected A.B.* format)"); } switch (Integer.parseInt(parts[0])) { case 2: case 3: return HADOOP23VERSIONNAME; default: throw new IllegalArgumentException("Unrecognized Hadoop major version number: " + vers); } }
public static HadoopCompat getCompat() { String ver = VersionInfo.getVersion();
private static boolean isHadoopVersionGreaterThanOrEquals(final int major, final int minor) { final String[] splitVersion = VersionInfo.getVersion().split("\\."); final int[] versions = Arrays.stream(splitVersion).mapToInt(Integer::parseInt).toArray(); return versions[0] >= major && versions[1] >= minor; }
public static void main(String[] args) { LOG.debug("version: "+ getVersion()); System.out.println("Hadoop " + getVersion()); System.out.println("Source code repository " + getUrl() + " -r " + getRevision()); System.out.println("Compiled by " + getUser() + " on " + getDate()); System.out.println("Compiled with protoc " + getProtocVersion()); System.out.println("From source with checksum " + getSrcChecksum()); System.out.println("This command was run using " + ClassUtil.findContainingJar(VersionInfo.class)); } }
protected void dumpVersionInfo(PrintWriter out) { VersionInfo.writeTo(out); out.println("Hadoop " + org.apache.hadoop.util.VersionInfo.getVersion()); out.println("Source code repository " + org.apache.hadoop.util.VersionInfo.getUrl() + " revision=" + org.apache.hadoop.util.VersionInfo.getRevision()); out.println("Compiled by " + org.apache.hadoop.util.VersionInfo.getUser() + " on " + org.apache.hadoop.util.VersionInfo.getDate()); }
/** * Set the number of locations in the split to SPLIT_MAX_NUM_LOCATIONS if it is larger than * SPLIT_MAX_NUM_LOCATIONS (MAPREDUCE-5186). */ private static List<InputSplit> cleanSplits(List<InputSplit> splits) throws IOException { if (VersionInfo.getVersion().compareTo("2.3.0") >= 0) { // This issue was fixed in 2.3.0, if newer version, no need to clean up splits return splits; } List<InputSplit> cleanedSplits = Lists.newArrayList(); for (int i = 0; i < splits.size(); i++) { CombineFileSplit oldSplit = (CombineFileSplit) splits.get(i); String[] locations = oldSplit.getLocations(); Preconditions.checkNotNull(locations, "CombineFileSplit.getLocations() returned null"); if (locations.length > SPLIT_MAX_NUM_LOCATIONS) { locations = Arrays.copyOf(locations, SPLIT_MAX_NUM_LOCATIONS); } cleanedSplits.add(new CombineFileSplit(oldSplit.getPaths(), oldSplit.getStartOffsets(), oldSplit.getLengths(), locations)); } return cleanedSplits; }
/** * This test needs to be skipped for earlier Hadoop versions because those * have a bug. */ @Override public void testMkdirsFailsForExistingFile() throws Exception { final String versionString = VersionInfo.getVersion(); final String prefix = versionString.substring(0, 3); final float version = Float.parseFloat(prefix); Assume.assumeTrue("Cannot execute this test on Hadoop prior to 2.8", version >= 2.8f); super.testMkdirsFailsForExistingFile(); } }
/** * Generate the text for the startup/shutdown message of processes. * @param classname short classname of the class * @param hostname hostname * @param args Command arguments * @return a string to log. */ public static String createStartupShutdownMessage(String classname, String hostname, String[] args) { return toStartupShutdownString("STARTUP_MSG: ", new String[] { "Starting " + classname, " host = " + hostname, " args = " + (args != null ? Arrays.asList(args) : new ArrayList<>()), " version = " + VersionInfo.getVersion(), " classpath = " + System.getProperty("java.class.path"), " build = " + VersionInfo.getUrl() + " -r " + VersionInfo.getRevision() + "; compiled by '" + VersionInfo.getUser() + "' on " + VersionInfo.getDate(), " java = " + System.getProperty("java.version") } ); }
private Response getHadoopVersion() throws IOException { String version = VersionInfo.getVersion(); return JsonBuilder.create() .put("module", "hadoop") .put("version", version) .build(); }
@Test public void testIsSameHdfs() throws IOException { String hadoopVersion = org.apache.hadoop.util.VersionInfo.getVersion(); LOG.info("hadoop version is: " + hadoopVersion); boolean isHadoop3_0_0 = hadoopVersion.startsWith("3.0.0"); if (isHadoop3_0_0) { // Hadoop 3.0.0 alpha1+ ~ 3.0.0 GA changed default nn port to 9820. // See HDFS-9427 testIsSameHdfs(9820); } else { // pre hadoop 3.0.0 defaults to port 8020 // Hadoop 3.0.1 changed it back to port 8020. See HDFS-12990 testIsSameHdfs(8020); } }
"HBase against Hadoop version " + org.apache.hadoop.util.VersionInfo.getVersion() + " or change your hadoop jars to start properly", t); } else {
@Test public void testGetMethodReflectiveHadoop22() { assumeTrue( "Method getContainersFromPreviousAttempts is not supported by Hadoop: " + VersionInfo.getVersion(), isHadoopVersionGreaterThanOrEquals(2, 2)); final RegisterApplicationMasterResponseReflector registerApplicationMasterResponseReflector = new RegisterApplicationMasterResponseReflector(LOG); final Method method = registerApplicationMasterResponseReflector.getMethod(); assertThat(method, notNullValue()); }
org.jamon.escaping.Escaping.NONE.write(org.jamon.emit.StandardEmitter.valueOf(org.apache.hadoop.util.VersionInfo.getVersion()), jamonWriter);
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(org.apache.hadoop.util.VersionInfo.getVersion()), jamonWriter);
public NamespaceInfo(int nsID, String clusterID, String bpID, long cT) { this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(), VersionInfo.getVersion()); }
@Override //ClientDatanodeProtocol public DatanodeLocalInfo getDatanodeInfo() { long uptime = ManagementFactory.getRuntimeMXBean().getUptime()/1000; return new DatanodeLocalInfo(VersionInfo.getVersion(), confVersion, uptime); }
public NamespaceInfo(int nsID, String clusterID, String bpID, long cT, HAServiceState st) { this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(), VersionInfo.getVersion()); this.state = st; }