public void setupContext(List<Path> paths) throws HiveException { int segmentLen = paths.size(); FetchOperator.setFetchOperatorContext(jobConf, fetchWork.getPartDir()); FetchOperator[] segments = segmentsForSize(segmentLen); for (int i = 0 ; i < segmentLen; i++) { Path path = paths.get(i); if (segments[i] == null) { segments[i] = new FetchOperator(fetchWork, new JobConf(jobConf)); } segments[i].setupContext(Arrays.asList(path)); } initialize(segmentLen); for (int i = 0; i < segmentLen; i++) { if (nextHive(i)) { put(i); } } counter = 0; }
public void setupContext(List<Path> paths) throws HiveException { int segmentLen = paths.size(); FetchOperator.setFetchOperatorContext(jobConf, fetchWork.getPartDir()); FetchOperator[] segments = segmentsForSize(segmentLen); for (int i = 0 ; i < segmentLen; i++) { Path path = paths.get(i); if (segments[i] == null) { segments[i] = new FetchOperator(fetchWork, new JobConf(jobConf)); } segments[i].setupContext(Arrays.asList(path)); } initialize(segmentLen); for (int i = 0; i < segmentLen; i++) { if (nextHive(i)) { put(i); } } counter = 0; }
iterPath = work.getPartDir().iterator(); iterPartDesc = work.getPartDesc().iterator(); } else {
if (partitionedTable) { LOG.info("Printing orc file dump for files from partitioned directory.."); directories = fetchWork.getPartDir(); } else { LOG.info("Printing orc file dump for files from table directory..");
iterPath = work.getPartDir().iterator(); iterPartDesc = work.getPartDesc().iterator(); } else {
if (partitionedTable) { LOG.info("Printing orc file dump for files from partitioned directory.."); directories = fetchWork.getPartDir(); } else { LOG.info("Printing orc file dump for files from table directory..");
currWork.mergeAliasedInput(alias, fetchWork.getTblDir(), partitionInfo); } else { for (Path pathDir : fetchWork.getPartDir()) { currWork.mergeAliasedInput(alias, pathDir, partitionInfo);
currWork.mergeAliasedInput(alias, fetchWork.getTblDir(), partitionInfo); } else { for (Path pathDir : fetchWork.getPartDir()) { currWork.mergeAliasedInput(alias, pathDir, partitionInfo);
&& scanLimit < fTask.getWork().getPartDir().size()) { throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, "" + fTask.getWork().getPartDir().size(), "" + fTask.getWork().getTblDesc().getTableName(), "" + scanLimit);
public void setupContext(List<Path> paths) throws HiveException { int segmentLen = paths.size(); FetchOperator.setFetchOperatorContext(jobConf, fetchWork.getPartDir()); FetchOperator[] segments = segmentsForSize(segmentLen); for (int i = 0 ; i < segmentLen; i++) { Path path = paths.get(i); if (segments[i] == null) { segments[i] = new FetchOperator(fetchWork, new JobConf(jobConf)); } segments[i].setupContext(Arrays.asList(path)); } initialize(segmentLen); for (int i = 0; i < segmentLen; i++) { if (nextHive(i)) { put(i); } } counter = 0; }
iterPath = FetchWork.convertStringToPathArray(work.getPartDir()).iterator(); iterPartDesc = work.getPartDesc().iterator();
iterPath = work.getPartDir().iterator(); iterPartDesc = work.getPartDesc().iterator(); } else {
@Override protected void localizeMRTmpFilesImpl(Context ctx) { String s = work.getTblDir(); if ((s != null) && ctx.isMRTmpFileURI(s)) { work.setTblDir(ctx.localizeMRTmpFileURI(s)); } ArrayList<String> ls = work.getPartDir(); if (ls != null) { ctx.localizePaths(ls); } }
currWork.mergeAliasedInput(alias, fetchWork.getTblDir().toUri().toString(), partitionInfo); } else { for (Path pathDir : fetchWork.getPartDir()) { currWork.mergeAliasedInput(alias, pathDir.toUri().toString(), partitionInfo);
&& scanLimit < fTask.getWork().getPartDir().size()) { throw new SemanticException(ErrorMsg.PARTITION_SCAN_LIMIT_EXCEEDED, "" + fTask.getWork().getPartDir().size(), "" + fTask.getWork().getTblDesc().getTableName(), "" + scanLimit);