@Override public DatasetConfig getDataset() throws Exception { return datasetMutator.apply(delegate.getDataset()); }
void completeSave(SourceTableDefinition accessor, DatasetConfig oldConfig, NamespaceAttribute... attributes){ try { DatasetConfig newConfig = accessor.getDataset(); if (oldConfig != null) { NamespaceUtils.copyFromOldConfig(oldConfig, newConfig); } completeSave(newConfig, accessor.getSplits(), attributes); }catch(Exception ex){ logger.warn("Failure while retrieving and saving dataset {}.", accessor.getName(), ex); } }
public DatasetConfig getDataset(){ try{ return delegate.getDataset(); }catch(Exception e){ throw UserException.dataReadError(e).message("Failure while attempting to retrieve metadata information for table %s.", getName()).build(logger); } }
private void loadIfNecessary() { if(datasetPointer != null){ return; } SplitsPointer splitsPointer; if(datasetConfig.getReadDefinition() != null) { splitsPointer = DatasetSplitsPointer.of(ns, datasetConfig); } else { try{ final DatasetConfig newDatasetConfig = datasetAccessor.getDataset(); newDatasetConfig.setId(datasetConfig.getId()); newDatasetConfig.setTag(datasetConfig.getTag()); List<DatasetSplit> splits = datasetAccessor.getSplits(); ns.addOrUpdateDataset(getName(), newDatasetConfig, splits); datasetConfig = newDatasetConfig; splitsPointer = MaterializedSplitsPointer.of(splits, splits.size()); } catch (Exception e) { throw new RuntimeException(e); } } datasetPointer = new TableMetadataImpl(pluginId, datasetConfig, user, splitsPointer); }
private void saveInHomeSpace(NamespaceService namespaceService, SourceTableDefinition accessor, DatasetConfig nsConfig) { Preconditions.checkNotNull(nsConfig); final NamespaceKey key = new NamespaceKey(nsConfig.getFullPathList()); try{ // use key from namespace config DatasetConfig srcConfig = accessor.getDataset(); if (nsConfig.getId() == null) { nsConfig.setId(srcConfig.getId()); } // Merge namespace config with config obtained from underlying filesystem used to store user uploaded files. // Set schema, read definition and state from source accessor nsConfig.setRecordSchema(srcConfig.getRecordSchema()); nsConfig.setSchemaVersion(srcConfig.getSchemaVersion()); nsConfig.setReadDefinition(srcConfig.getReadDefinition()); // get splits from source List<DatasetSplit> splits = accessor.getSplits(); namespaceService.addOrUpdateDataset(key, nsConfig, splits); }catch(Exception ex){ logger.warn("Failure while retrieving and saving dataset {}.", key, ex); } }
private static SourceTableDefinition newDataset(final String dsPath) { final List<String> path = SqlUtils.parseSchemaPath(dsPath); SourceTableDefinition ret = mock(SourceTableDefinition.class); NamespaceKey datasetName = new NamespaceKey(path); when(ret.getName()).thenReturn(datasetName); BatchSchema schema = BatchSchema.newBuilder() .addField(new Field("string", FieldType.nullable(ArrowType.Utf8.INSTANCE), null)) .build(); DatasetConfig dsConfig = new DatasetConfig() .setName(Util.last(path)) .setFullPathList(path) .setType(DatasetType.PHYSICAL_DATASET_SOURCE_FILE) .setRecordSchema(ByteString.EMPTY) .setPhysicalDataset( new PhysicalDataset() .setFormatSettings(null)) .setSchemaVersion(DatasetHelper.CURRENT_VERSION) .setRecordSchema(schema.toByteString()) .setReadDefinition(new ReadDefinition()); try { when(ret.getDataset()).thenReturn(dsConfig); } catch (Exception ignored) { } when(ret.getType()).thenReturn(DatasetType.PHYSICAL_DATASET_SOURCE_FILE); when(ret.isSaveable()).thenReturn(true); return ret; }
final DatasetConfig newDatasetConfig = tableDefinition.getDataset(); final List<DatasetSplit> splits = tableDefinition.getSplits(); NamespaceUtils.copyFromOldConfig(datasetConfig, newDatasetConfig);