private static void moveFile(FileSystem fs, FileStatus file, Path dst) throws IOException, HiveException { Path srcFilePath = file.getPath(); String fileName = srcFilePath.getName(); Path dstFilePath = new Path(dst, fileName); if (file.isDir()) { renameOrMoveFiles(fs, srcFilePath, dstFilePath); } else { if (fs.exists(dstFilePath)) { int suffix = 0; do { suffix++; dstFilePath = new Path(dst, fileName + "_" + suffix); } while (fs.exists(dstFilePath)); } if (!fs.rename(srcFilePath, dstFilePath)) { throw new HiveException("Unable to move: " + srcFilePath + " to: " + dst); } } }
private CommandProcessorResponse handleHiveException(HiveException e, int ret, String rootMsg) throws CommandProcessorResponse { errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e); if(rootMsg != null) { errorMessage += "\n" + rootMsg; } SQLState = e.getCanonicalErrorMsg() != null ? e.getCanonicalErrorMsg().getSQLState() : ErrorMsg.findSQLState(e.getMessage()); downstreamError = e; console.printError(errorMessage + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e)); throw createProcessorResponse(ret); } private boolean requiresLock() {
public void close() { try { elems.close(); } catch (HiveException e) { LOG.error(e.toString(), e); } }
try { Path location = new Path(table.getSd().getLocation()); FileSystem fs = location.getFileSystem(conf); HadoopShims.HdfsEncryptionShim shim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf); if (!shim.isPathEncrypted(location)) { HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(conf, fs, location); FileStatus targetStatus = fs.getFileStatus(location); String targetGroup = targetStatus == null ? null : targetStatus.getGroup(); FileUtils.moveToTrash(fs, location, conf, isAutopurge); fs.mkdirs(location); HdfsUtils.setFullFileStatus(conf, status, targetGroup, fs, location, false); } else { FileStatus[] statuses = fs.listStatus(location, FileUtils.HIDDEN_FILES_PATH_FILTER); if ((statuses != null) && (statuses.length > 0)) { boolean success = Hive.trashFiles(fs, statuses, conf, isAutopurge); if (!success) { throw new HiveException("Error in deleting the contents of " + location.toString());
private static class ThreadLocalHive extends ThreadLocal<Hive> { @Override protected Hive initialValue() { return null; } @Override public synchronized void set(Hive hiveObj) { Hive currentHive = this.get(); if (currentHive != hiveObj) { // Remove/close current thread-local Hive object before overwriting with new Hive object. remove(); super.set(hiveObj); } } @Override public synchronized void remove() { Hive currentHive = this.get(); if (currentHive != null) { // Close the metastore connections before removing it from thread local hiveDB. currentHive.close(false); super.remove(); } } }
private void listFilesCreatedByQuery(Path loadPath, long writeId, int stmtId, boolean isInsertOverwrite, List<Path> newFiles) throws HiveException { Path acidDir = new Path(loadPath, AcidUtils.baseOrDeltaSubdir(isInsertOverwrite, writeId, writeId, stmtId)); try { FileSystem srcFs = loadPath.getFileSystem(conf); if (srcFs.exists(acidDir) && srcFs.isDirectory(acidDir)){ // list out all the files in the path listFilesInsideAcidDirectory(acidDir, srcFs, newFiles); } else { LOG.info("directory does not exist: " + acidDir); return; } } catch (IOException e) { LOG.error("Error listing files", e); throw new HiveException(e); } return; }
public boolean isEmpty() throws HiveException { Preconditions.checkNotNull(getPath()); try { FileSystem fs = FileSystem.get(getPath().toUri(), SessionState.getSessionConf()); return !fs.exists(getPath()) || fs.listStatus(getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER).length == 0; } catch (IOException e) { throw new HiveException(e); } }
private boolean isFunctionAlreadyLoaded(Path funcDumpRoot) throws HiveException, IOException { Path metadataPath = new Path(funcDumpRoot, EximUtil.METADATA_NAME); FileSystem fs = FileSystem.get(metadataPath.toUri(), context.hiveConf); MetaData metadata = EximUtil.readMetaData(fs, metadataPath); Function function; try { String dbName = StringUtils.isBlank(dbNameToLoadIn) ? metadata.function.getDbName() : dbNameToLoadIn; function = context.hiveDb.getFunction(dbName, metadata.function.getFunctionName()); } catch (HiveException e) { if (e.getCause() instanceof NoSuchObjectException) { return false; } throw e; } return (function != null); }
private void commitOneOutPath(int idx, FileSystem fs, List<Path> commitPaths) throws IOException, HiveException { if ((bDynParts || isSkewedStoredAsSubDirectories) && !fs.exists(finalPaths[idx].getParent())) { if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("commit making path for dyn/skew: " + finalPaths[idx].getParent()); FileUtils.mkdir(fs, finalPaths[idx].getParent(), hconf); if(outPaths[idx] != null && fs.exists(outPaths[idx])) { if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { Utilities.FILE_OP_LOGGER.trace("committing " + outPaths[idx] + " to " assert outPaths[idx].equals(finalPaths[idx]); commitPaths.add(outPaths[idx]); } else if (!fs.rename(outPaths[idx], finalPaths[idx])) { FileStatus fileStatus = FileUtils.getFileStatusOrNull(fs, finalPaths[idx]); if (fileStatus != null) { LOG.warn("Target path " + finalPaths[idx] + " with a size " + fileStatus.getLen() + " exists. Trying to delete it."); if (!fs.delete(finalPaths[idx], true)) { throw new HiveException("Unable to delete existing target output: " + finalPaths[idx]); if (!fs.rename(outPaths[idx], finalPaths[idx])) { throw new HiveException("Unable to rename output from: " + outPaths[idx] + " to: " + finalPaths[idx]);
tgtFs = targetPath.getFileSystem(conf); } catch (IOException e) { LOG.error("Failed to get dest fs", e); throw new HiveException(e.getMessage(), e); srcFs = sourcePath.getFileSystem(conf); } catch (IOException e) { LOG.error("Failed to get src fs", e); throw new HiveException(e.getMessage(), e); if (srcFs.exists(sourcePath)) { Path deletePath = null; if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_INSERT_INTO_MULTILEVEL_DIRS)) { deletePath = createTargetPath(targetPath, tgtFs); try { if (deletePath != null) { tgtFs.delete(deletePath, true); deletePath); throw new HiveException("Unable to rename: " + sourcePath + " to: " + targetPath); } else if (!tgtFs.mkdirs(targetPath)) { throw new HiveException("Unable to make directory: " + targetPath);
Set<Path> validPartitions = new HashSet<Path>(); try { FileSystem fs = loadPath.getFileSystem(conf); FileStatus[] leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP, fs); if (!s.isDirectory()) { throw new HiveException("partition " + s.getPath() + " is not a directory!"); validPartitions.add(s.getPath()); throw new HiveException(e); if (partsToLoad > conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS)) { throw new HiveException("Number of dynamic partitions created is " + partsToLoad + ", which is more than " + conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS) +". To solve this try to set " + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname + " to at least " + partsToLoad + '.');
/** * Checks in partition is in custom (not-standard) location. * @param tbl - table in which partition is * @param p - partition * @return true if partition location is custom, false if it is standard */ boolean partitionInCustomLocation(Table tbl, Partition p) throws HiveException { String subdir = null; try { subdir = Warehouse.makePartName(tbl.getPartCols(), p.getValues()); } catch (MetaException e) { throw new HiveException("Unable to get partition's directory", e); } Path tableDir = tbl.getDataLocation(); if(tableDir == null) { throw new HiveException("Table has no location set"); } String standardLocation = (new Path(tableDir, subdir)).toString(); if(ArchiveUtils.isArchived(p)) { return !getOriginalLocation(p).equals(standardLocation); } else { return !p.getLocation().equals(standardLocation); } }
FileSystem srcFs = src.getPath().getFileSystem(hive.getConf()); FileStatus[] srcs = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); for (FileStatus status : srcs) { if (status.getModificationTime() > Long.parseLong(indexTs)) { LOG.info("Index is stale on table '" + src.getTableName() + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath() + "' is higher than index creation time (" + indexTs + ")."); return false; throw new HiveException("Failed to grab timestamp information from table '" + src.getTableName() + "': " + e.getMessage(), e);
/** * Check the index partitions on a partitioned table exist and are fresh */ private static boolean isIndexPartitionFresh(Hive hive, Index index, Partition part) throws HiveException { LOG.info("checking index staleness..."); try { String indexTs = index.getParameters().get(part.getSpec().toString()); if (indexTs == null) { return false; } FileSystem partFs = part.getDataLocation().getFileSystem(hive.getConf()); FileStatus[] parts = partFs.listStatus(part.getDataLocation(), FileUtils.HIDDEN_FILES_PATH_FILTER); for (FileStatus status : parts) { if (status.getModificationTime() > Long.parseLong(indexTs)) { LOG.info("Index is stale on partition '" + part.getName() + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath() + "' is higher than index creation time (" + indexTs + ")."); return false; } } } catch (IOException e) { throw new HiveException("Failed to grab timestamp information from partition '" + part.getName() + "': " + e.getMessage(), e); } return true; }
public void cleanUpOneDirectoryForReplace(Path path, FileSystem fs, PathFilter pathFilter, HiveConf conf, boolean purge, boolean isNeedRecycle) throws IOException, HiveException { if (isNeedRecycle && conf.getBoolVar(HiveConf.ConfVars.REPLCMENABLED)) { recycleDirToCmPath(path, purge); } FileStatus[] statuses = fs.listStatus(path, pathFilter); if (statuses == null || statuses.length == 0) { return; } if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) { String s = "Deleting files under " + path + " for replace: "; for (FileStatus file : statuses) { s += file.getPath().getName() + ", "; } Utilities.FILE_OP_LOGGER.trace(s); } if (!trashFiles(fs, statuses, conf, purge)) { throw new HiveException("Old path " + path + " has not been cleaned up."); } }
FileSystem fs = loadPath.getFileSystem(conf); int numDPCols = dpCtx.getNumDPCols(); List<FileStatus> status = HiveStatsUtils.getFileStatusRecurse(loadPath, numDPCols, fs); Path partPath = status.get(i).getPath(); assert fs.getFileStatus(partPath).isDir() : "partitions " + partPath + " is not a directory !"; throw new HiveException(e);
srcFs = tbd.getSourcePath().getFileSystem(conf); dirs = srcFs.globStatus(tbd.getSourcePath()); files = new ArrayList<FileStatus>(); for (int i = 0; (dirs != null && i < dirs.length); i++) { files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER))); throw new HiveException( "addFiles: filesystem error in check phase", e); if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECHECKFILEFORMAT)) { boolean flag = true; throw new HiveException(ErrorMsg.WRONG_FILE_FORMAT);
/** * Creates path where partitions matching prefix should lie in filesystem * @param tbl table in which partition is * @return expected location of partitions matching prefix in filesystem */ public Path createPath(Table tbl) throws HiveException { String prefixSubdir; try { prefixSubdir = Warehouse.makePartName(fields, values); } catch (MetaException e) { throw new HiveException("Unable to get partitions directories prefix", e); } Path tableDir = tbl.getDataLocation(); if (tableDir == null) { throw new HiveException("Table has no location set"); } return new Path(tableDir, prefixSubdir); } /**
private DataOutputStream getOutputStream(String resFile) throws HiveException { try { return getOutputStream(new Path(resFile)); } catch (HiveException e) { throw e; } catch (Exception e) { throw new HiveException(e); } }
Path srcPath = stat.getPath(); origBucketStats = fs.listStatus(srcPath, AcidUtils.originalBucketFilter); } catch (IOException e) { String msg = "Unable to look for bucket files in src path " + srcPath.toUri().toString(); LOG.error(msg); throw new HiveException(msg, e); Path origBucketPath = origBucketStat.getPath(); moveAcidDeltaFiles(AcidUtils.DELTA_PREFIX, AcidUtils.deltaFileFilter, fs, dst, origBucketPath, createdDeltaDirs, newFiles);