public static ExecutionSetupException fromThrowable(String message, Throwable cause) { Throwable t = cause instanceof InvocationTargetException ? ((InvocationTargetException)cause).getTargetException() : cause; if (t instanceof ExecutionSetupException) { return ((ExecutionSetupException) t); } return new ExecutionSetupException(message, t); } public ExecutionSetupException() {
@PUT @Produces(MediaType.APPLICATION_JSON) @Consumes(MediaType.APPLICATION_JSON) public SourceUI putSource(SourceUI source) throws NamespaceException, UserNotFoundException, SourceNotFoundException { try { //Following are set at server side. source.setName(sourceName.getName()); source.setCtime(System.currentTimeMillis()); SourceConfig sourceConfig = sourceService.registerSourceWithRuntime(source); final SourceState sourceState = sourceService.getSourceState(sourcePath.getSourceName().getName()); if (sourceState == null) { throw new SourceNotFoundException(sourcePath.getSourceName().getName()); } source.setState(sourceState); source.setTag(sourceConfig.getTag()); source.setId(sourceConfig.getId().getId()); return source; } catch (ExecutionSetupException e) { // if we are getting an ExecutionSetupException it will most likely contain a "useful" error message throw UserExceptionMapper.withStatus(UserException.resourceError(e), Response.Status.INTERNAL_SERVER_ERROR) .message("Failure while attempting to register source: %s", e.getMessage()) .build(logger); } } }
private static FSDataInputStream openFile(FileSystem fs, Path path) throws ExecutionSetupException { try { return fs.open(path); } catch (IOException e) { throw new ExecutionSetupException("Error opening or reading metadata for parquet file at location: " + path.getName(), e); } }
DeprecatedSingleStreamPageReader(ColumnReader<?> parentStatus, FSDataInputStream inputStream, Path path, ColumnChunkMetaData columnChunkMetaData) throws ExecutionSetupException { super(parentStatus, inputStream, path, columnChunkMetaData); try { lastPosition = inputStream.getPos(); } catch (IOException e) { throw new ExecutionSetupException("Error in getting current position for parquet file at location: " + path, e); } this.inputStream = inputStream; }
public WriterOperator getWriterBatch(OperatorContext context, ParquetWriter writer) throws ExecutionSetupException { try { return new WriterOperator(context, writer.getOptions(), getRecordWriter(context, writer)); } catch(IOException e) { throw new ExecutionSetupException(String.format("Failed to create the WriterRecordBatch. %s", e.getMessage()), e); } }
public WriterOperator getWriterBatch(OperatorContext context, EasyWriter writer) throws ExecutionSetupException { try { return new WriterOperator(context, writer.getOptions(), getRecordWriter(context, writer)); } catch(IOException e) { throw new ExecutionSetupException(String.format("Failed to create the WriterRecordBatch. %s", e.getMessage()), e); } }
/** * Update Wrappers with allocations received from scheduler */ public void updateWithAllocations(Map<Integer, Map<CoordinationProtos.NodeEndpoint, Long>> allocations) throws ExecutionSetupException { for (Wrapper wrapper : fragmentMap.values()) { if (!wrapper.isEndpointsAssignmentDone()) { throw new ExecutionSetupException("Node assignment is not done for major Fragment: " + wrapper.getMajorFragmentId()); } int majorFragmentId = wrapper.getMajorFragmentId(); final Map<CoordinationProtos.NodeEndpoint, Long> majorFragmentAllocations = allocations.get(majorFragmentId); Preconditions.checkNotNull(majorFragmentAllocations); wrapper.assignMemoryAllocations(allocations.get(majorFragmentId)); } } }
/** * To get quantity of resources per node * May need per major fragment though * @param planningSet * @return Map of MajorFragmentId to a Map of NodeEndPoint to number of those endpoints */ static Map<Integer, Map<NodeEndpoint, Integer>> getEndPoints(PlanningSet planningSet) throws ExecutionSetupException { Map<Integer, Map<NodeEndpoint, Integer>> endpointsPerMajorMap = Maps.newHashMap(); for(Wrapper wrapper : planningSet) { if (!wrapper.isEndpointsAssignmentDone()) { throw new ExecutionSetupException("Node assignment is not done for major Fragment: " + wrapper.getMajorFragmentId()); } final Map<NodeEndpoint, Integer> resourceNodeEndPointsMap = Maps.newHashMap(); final List<NodeEndpoint> endPointsPerMajor = wrapper.getAssignedEndpoints(); for (NodeEndpoint endpoint : endPointsPerMajor) { resourceNodeEndPointsMap.put(endpoint, Optional.ofNullable(resourceNodeEndPointsMap.get(endpoint)).orElse(0)+1); endpointsPerMajorMap.put(wrapper.getMajorFragmentId(), resourceNodeEndPointsMap); } } return endpointsPerMajorMap; }
@Override public void setup(final OutputMutator output) throws ExecutionSetupException { writer = new VectorContainerWriter(output); try { reader = new DataFileReader<>(new FsInput(hadoop, fsConf), new GenericDatumReader<GenericContainer>()); logger.debug("Processing file : {}, start position : {}, end position : {} ", hadoop, start, end); reader.sync(this.start); } catch (IOException e) { throw new ExecutionSetupException(e); } }
@Override public RecordReader getRecordReader(OperatorContext context, FileSystemWrapper fs, FileStatus status) throws ExecutionSetupException { try { final ParquetMetadata footer = ParquetFileReader.readFooter(fsPlugin.getFsConf(), status, ParquetMetadataConverter.NO_FILTER); if (footer.getBlocks().size() == 0) { return null; } return new PreviewReader(context, fs, status, footer); } catch (IOException e) { throw new ExecutionSetupException(e); } }
private org.apache.hadoop.mapred.RecordReader<BytesWritable, BytesWritable> getRecordReader( final InputFormat<BytesWritable, BytesWritable> inputFormat, final JobConf jobConf) throws ExecutionSetupException { try { return inputFormat.getRecordReader(split, jobConf, Reporter.NULL); } catch (IOException e) { throw new ExecutionSetupException( String.format("Error in creating sequencefile reader for file: %s, start: %d, length: %d", split.getPath(), split.getStart(), split.getLength()), e); } }
PageReader(ColumnReader<?> parentStatus, FSDataInputStream inputStream, Path path, ColumnChunkMetaData columnChunkMetaData) throws ExecutionSetupException { this.parentColumnReader = parentStatus; allocatedDictionaryBuffers = new ArrayList<ByteBuf>(); codecFactory = parentColumnReader.parentReader.getCodecFactory(); this.stats = parentColumnReader.parentReader.parquetReaderStats; long start = columnChunkMetaData.getFirstDataPageOffset(); this.inputStream = inputStream; try { this.dataReader = new ColumnDataReader(inputStream, start, columnChunkMetaData.getTotalSize()); loadDictionaryIfExists(parentStatus, columnChunkMetaData, inputStream); } catch (IOException e) { throw new ExecutionSetupException("Error opening or reading metadata for parquet file at location: " + path.getName(), e); } }
public VectorContainer loadDictionary(String fieldName) throws IOException, ExecutionSetupException { final StoragePluginId id = config.getDictionaryEncodedFields().get(fieldName).getStoragePluginId(); final StoragePlugin storagePlugin = config.getCatalogService().getSource(id); if (storagePlugin instanceof FileSystemPlugin) { final FileSystemPlugin fsPlugin = (FileSystemPlugin) storagePlugin; final FileSystem fs = FileSystemWrapper.get(fsPlugin.getFsConf()); return ParquetFormatPlugin.loadDictionary(fs, new Path(config.getDictionaryEncodedFields().get(fieldName).getDictionaryPath()), context.getAllocator()); } else { throw new ExecutionSetupException(format("Storage plugin %s is not a filesystem plugin", id.getName())); } }
w = new TimeStampMilliWriter(f); } else { throw new ExecutionSetupException(String.format("PojoRecord reader doesn't yet support conversions from type [%s].", type));
throw new ExecutionSetupException("Unable to get Hive table InputFormat class. There is neither " + "InputFormat class explicitly specified nor a StorageHandler class provided.");
public static GlobalDictionaries create(OperatorContext context, FileSystemWrapper fs, List<GlobalDictionaryFieldInfo> globalDictionaryColumns) throws ExecutionSetupException { if (globalDictionaryColumns != null && !globalDictionaryColumns.isEmpty()) { final Map<String, VectorContainer> dictionaries = Maps.newHashMap(); context.getStats().startProcessing(); try { for (GlobalDictionaryFieldInfo field : globalDictionaryColumns) { dictionaries.put(field.getFieldName(), ParquetFormatPlugin.loadDictionary(fs, new Path(field.getDictionaryPath()), context.getAllocator())); } return new GlobalDictionaries(dictionaries); } catch (IOException ioe) { throw new ExecutionSetupException(ioe); } finally { context.getStats().stopProcessing(); } } return null; // if no columns should be global dictionary encoded. }
/** * This method is a helper method added for DRILL-951 * TextRecordReader to call this method to get field names out * @return array of field data strings */ public String [] getTextOutput () throws ExecutionSetupException { if (recordCount == 0 || fieldIndex == -1) { return null; } //Currently only first line header is supported. Return only first record. int retSize = fieldIndex+1; String [] out = new String [retSize]; try { ListVector listVector = output.addField(new Field(COL_NAME, true, MinorType.LIST.getType(), null), ListVector.class); List outputlist = (List) listVector.getObject((int)(recordCount-1)); for (int i=0; i<retSize; i++){ out[i] = ((Text) outputlist.get(i)).toString(); } return out; } catch (SchemaChangeException e) { throw new ExecutionSetupException(e); } }
public static Class<? extends InputFormat<?, ?>> getInputFormatClass(final JobConf job, final Table table, final Partition partition) throws Exception { if(partition != null){ if(partition.getSd().getInputFormat() != null){ return (Class<? extends InputFormat<?, ?>>) Class.forName(partition.getSd().getInputFormat()); } if(partition.getParameters().get(META_TABLE_STORAGE) != null){ final HiveStorageHandler storageHandler = HiveUtils.getStorageHandler(job, partition.getParameters().get(META_TABLE_STORAGE)); return (Class<? extends InputFormat<?, ?>>) storageHandler.getInputFormatClass(); } } if(table.getSd().getInputFormat() != null){ return (Class<? extends InputFormat<?, ?>>) Class.forName(table.getSd().getInputFormat()); } if(table.getParameters().get(META_TABLE_STORAGE) != null){ final HiveStorageHandler storageHandler = HiveUtils.getStorageHandler(job, table.getParameters().get(META_TABLE_STORAGE)); return (Class<? extends InputFormat<?, ?>>) storageHandler.getInputFormatClass(); } throw new ExecutionSetupException("Unable to get Hive table InputFormat class. There is neither " + "InputFormat class explicitly specified nor a StorageHandler class provided."); }
@Override public void setup(OutputMutator output) throws ExecutionSetupException { final SequenceFileAsBinaryInputFormat inputFormat = new SequenceFileAsBinaryInputFormat(); final JobConf jobConf = new JobConf(dfs.getConf()); jobConf.setInputFormat(inputFormat.getClass()); reader = getRecordReader(inputFormat, jobConf); final Field keyField = new Field(keySchema, true, getArrowTypeForMajorType(KEY_TYPE), null); final Field valueField = new Field(valueSchema, true, getArrowTypeForMajorType(VALUE_TYPE), null); try { keyVector = output.addField(keyField, VarBinaryVector.class); valueVector = output.addField(valueField, VarBinaryVector.class); } catch (SchemaChangeException sce) { throw new ExecutionSetupException("Error in setting up sequencefile reader.", sce); } }
throw new ExecutionSetupException(e);