/** * Checks that a cluster ID file exists in the HBase root directory * @param fs the root directory FileSystem * @param rootdir the HBase root directory in HDFS * @param wait how long to wait between retries * @return <code>true</code> if the file exists, otherwise <code>false</code> * @throws IOException if checking the FileSystem fails */ public static boolean checkClusterIdExists(FileSystem fs, Path rootdir, int wait) throws IOException { while (true) { try { Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME); return fs.exists(filePath); } catch (IOException ioe) { if (wait > 0) { LOG.warn("Unable to check cluster ID file in " + rootdir.toString() + ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe)); try { Thread.sleep(wait); } catch (InterruptedException e) { throw (InterruptedIOException)new InterruptedIOException().initCause(e); } } else { throw ioe; } } } }
public static String joinWithSeparator(Iterable<?> strings) { return org.apache.hadoop.util.StringUtils.join(TXN_WRITE_EVENT_FILE_SEPARATOR, strings); }
/** * Get the list of input {@link Path}s for the map-reduce job. * * @param conf The configuration of the job * @return the list of input {@link Path}s for the map-reduce job. */ static Path[] getInputPaths(Configuration conf) throws IOException { String dirs = conf.get("mapred.input.dir"); if (dirs == null) { throw new IOException("Configuration mapred.input.dir is not defined."); } String [] list = StringUtils.split(dirs); Path[] result = new Path[list.length]; for (int i = 0; i < list.length; i++) { result[i] = new Path(StringUtils.unEscapeString(list[i])); } return result; }
/** * Take an encode strings and decode it into an array of strings. */ public static String[] decodeArray(String s) { if (s == null) return null; String[] escaped = StringUtils.split(s); String[] plain = new String[escaped.length]; for (int i = 0; i < escaped.length; ++i) plain[i] = StringUtils.unEscapeString(escaped[i]); return plain; }
/** * Take an array of strings and encode it into one string. */ public static String encodeArray(String[] plain) { if (plain == null) return null; String[] escaped = new String[plain.length]; for (int i = 0; i < plain.length; ++i) { if (plain[i] == null) { plain[i] = ""; } escaped[i] = StringUtils.escapeString(plain[i]); } return StringUtils.arrayToString(escaped); }
if (conf.get(XLearningConfiguration.XLEARNING_INPUT_STRATEGY, XLearningConfiguration.DEFAULT_XLEARNING_INPUT_STRATEGY).equals("STREAM")) { buildInputStreamFileStatus(); } else { rmCallbackHandler.setNeededWorkerContainersCount(workerNum); int allocateInterval = conf.getInt(XLearningConfiguration.XLEARNING_ALLOCATE_INTERVAL, XLearningConfiguration.DEFAULT_XLEARNING_ALLOCATE_INTERVAL); amrmAsync.setHeartbeatInterval(allocateInterval); startAllocatedTimeStamp = System.currentTimeMillis(); if (startAllocatedContainer && (System.currentTimeMillis() - startAllocatedTimeStamp) > conf.getInt(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS, YarnConfiguration.DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS)) { this.appendMessage(failMessage, true); this.appendMessage("Unregister Application", true); if (fs.exists(tmpResultPath)) { LOG.info("Move from " + tmpResultPath.toString() + " to " + finalResultPath.toString()); fs.rename(tmpResultPath, finalResultPath); if (fs.exists(tmpPath)) { finalSuccess = false; this.appendMessage("Some error occurs" + org.apache.hadoop.util.StringUtils.stringifyException(e), true); diagnostics = e.getMessage();
if (!destFs.exists(destPath.getParent())) { destFs.mkdirs(destPath.getParent()); dc = new DataContainer(table.getTTable()); if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("loadTable called from " + tbd.getSourcePath() tableCols = table.getCols(); break; int errorCode = 1; if (he.getCanonicalErrorMsg() != ErrorMsg.GENERIC_ERROR) { errorCode = he.getCanonicalErrorMsg().getErrorCode(); + StringUtils.stringifyException(he)); console.printInfo("\n", StringUtils.stringifyException(he),false); } catch (Exception e) { console.printError("Failed with exception " + e.getMessage(), "\n" + StringUtils.stringifyException(e)); setException(e); return (1);
success = true; HiveFileFormatUtils.prepareJobOutput(job); job.setOutputFormat(HiveOutputFormatImpl.class); job.setMapperClass(work.getMapperClass()); LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e)); setException(e); return 5; job.setMapOutputKeyClass(NullWritable.class); job.setMapOutputValueClass(NullWritable.class); if(work.getNumMapTasks() != null) { Path tempOutPath = Utilities.toTempPath(outputPath); try { FileSystem fs = tempOutPath.getFileSystem(job); if (!fs.exists(tempOutPath)) { fs.mkdirs(tempOutPath); LOG.error(mesg, org.apache.hadoop.util.StringUtils.stringifyException(e)); setException(e);
public static void addInputPath(Configuration conf, Path path) throws IOException { path = path.getFileSystem(conf).makeQualified(path); String dirStr = org.apache.hadoop.util.StringUtils.escapeString(path.toString()); String dirs = conf.get(GuaguaYarnConstants.GUAGUA_YARN_INPUT_DIR); conf.set(GuaguaYarnConstants.GUAGUA_YARN_INPUT_DIR, dirs == null ? dirStr : dirs + "," + dirStr); }
@SuppressWarnings("SameParameterValue") static void addDependencyJars(Configuration conf, Class<?>... classes) throws IOException { FileSystem localFs = FileSystem.getLocal(conf); Set<String> jars = new HashSet<>(conf.getStringCollection("tmpjars")); for (Class<?> clazz : classes) { if (clazz == null) { continue; } final String path = Utilities.jarFinderGetJar(clazz); if (path == null) { throw new RuntimeException("Could not find jar for class " + clazz + " in order to ship it to the cluster."); } if (!localFs.exists(new Path(path))) { throw new RuntimeException("Could not validate jar file " + path + " for class " + clazz); } jars.add(path); } if (jars.isEmpty()) { return; } //noinspection ToArrayCallWithZeroLengthArrayArgument conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[jars.size()]))); }
FileSystem fs = emptyScratchDir.getFileSystem(job); fs.mkdirs(emptyScratchDir); } catch (IOException e) { e.printStackTrace(); console.printError("Error launching map-reduce job", "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); return 5; job.setOutputFormat(HiveOutputFormatImpl.class); job.setMapRunnerClass(ExecMapRunner.class); job.setMapperClass(ExecMapper.class); Path hdfsPath = mWork.getTmpHDFSPath(); hdfs.copyFromLocalFile(archivePath, hdfsFilePath); LOG.error("Sampling error", e); console.printError(e.toString(), "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); returnVal = 3; String mesg = "Job Commit failed with exception '" + Utilities.getNameMessage(e) + "'"; console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
StringBuilder str = new StringBuilder(); boolean ignoreInvalidPath =jobConf.getBoolean(HCatConstants.HCAT_INPUT_IGNORE_INVALID_PATH_KEY, HCatConstants.HCAT_INPUT_IGNORE_INVALID_PATH_DEFAULT); Iterator<String> pathIterator = pathStrings.iterator(); continue; Path path = new Path(pathString); FileSystem fs = path.getFileSystem(jobConf); if (ignoreInvalidPath && !fs.exists(path)) { pathIterator.remove(); continue; final String qualifiedPath = fs.makeQualified(path).toString(); str.append(separator) .append(StringUtils.escapeString(qualifiedPath)); separator = StringUtils.COMMA_STR; jobConf.set("mapred.input.dir", str.toString());
@Before public void setUp() { super.setUp(); Path p = new Path(getWarehouseDir()); try { FileSystem fs = FileSystem.get(new Configuration()); fs.delete(p); } catch (IOException e) { LOG.error("Setup fail with IOException: " + StringUtils.stringifyException(e)); fail("Setup fail with IOException: " + StringUtils.stringifyException(e)); } }
Configuration fsConfig = new Configuration(getConf()); fsConfig.setInt("test.io.file.buffer.size", bufferSize); fsConfig.setInt("test.io.sampling.interval",tputSampleInterval); FileSystem fs = FileSystem.get(fsConfig); JobConf dummyConf = new JobConf(fsConfig, TestDFSIOEnh.class); JobClient jc = new JobClient(dummyConf); int maxreduces = jc.getDefaultReduces(); System.err.print(StringUtils.stringifyException(e)); return -1;
Class<?>... classes) throws IOException { FileSystem localFs = FileSystem.getLocal(conf); Set<String> jars = new HashSet<>(); jars.addAll(conf.getStringCollection("tmpjars")); continue; if (!localFs.exists(path)) { LOG.warn("Could not validate jar file " + path + " for class " + clazz); continue; jars.add(path.toString()); conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[jars.size()])));
@Override protected void initialize(JobContext context) throws IOException { // Do we have to worry about mis-matches between the Configuration from setConf and the one // in this context? TableName tableName = TableName.valueOf(conf.get(INPUT_TABLE)); try { initializeTable(ConnectionFactory.createConnection(new Configuration(conf)), tableName); } catch (Exception e) { LOG.error(StringUtils.stringifyException(e)); } }
assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName(); boolean isTxnTable = AcidUtils.isTransactionalTable(tbl); boolean isMmTable = AcidUtils.isInsertOnlyTable(tbl); boolean isFullAcidTable = AcidUtils.isFullAcidTable(tbl); if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) { newFiles = Collections.synchronizedList(new ArrayList<Path>()); if (((isMmTable || isFullAcidTable) && loadPath.equals(tbl.getPath())) || (loadFileType == LoadFileType.IGNORE)) { assert !isAcidIUDoperation; destPath = new Path(destPath, isInsertOverwrite ? AcidUtils.baseDir(writeId) : AcidUtils.deltaSubdir(writeId, writeId, stmtId)); } else { try { FileSystem fs = tbl.getDataLocation().getFileSystem(conf); copyFiles(conf, loadPath, destPath, fs, isSrcLocal, isAcidIUDoperation, loadFileType == LoadFileType.OVERWRITE_EXISTING, newFiles, tbl.getNumBuckets() > 0, isFullAcidTable, isManaged); } catch (IOException e) { throw new HiveException("addFiles: filesystem error in check phase", e); LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e);
try { if (!fs.exists(destf)) { FileUtils.mkdir(fs, destf, conf); throw new HiveException( "copyFiles: error while checking/creating destination directory!!!", e); FileSystem srcFs; try { srcFs = srcf.getFileSystem(conf); srcs = srcFs.globStatus(srcf); } catch (IOException e) { LOG.error(StringUtils.stringifyException(e)); throw new HiveException("addFiles: filesystem error in check phase. " + e.getMessage(), e);
@Override protected FileStatus[] listStatus(JobConf job) throws IOException { String dirs = job.get("mapred.input.dir", ""); String[] list = StringUtils.split(dirs); List<FileStatus> status = new ArrayList<FileStatus>(); for(int i = 0; i < list.length; i++) { status.addAll(getAllSubFileStatus(job, new Path(list[i]))); } return status.toArray(new FileStatus[0]); }
/** * Set the array of string values for the <code>name</code> property as * as comma delimited values. * * @param name property name. * @param values The values */ public void setStrings(String name, String... values) { set(name, StringUtils.arrayToString(values)); }