@Override protected CoprocessorJar createCoprocessorJar() throws IOException { boolean supportsIncrement = TableProperties.getReadlessIncrementSupport(spec.getProperties()); boolean transactional = DatasetsUtil.isTransactional(spec.getProperties()); return createCoprocessorJarInternal(conf, coprocessorManager, tableUtil, transactional, supportsIncrement); }
@Override protected CoprocessorJar createCoprocessorJar() throws IOException { boolean supportsIncrement = TableProperties.getReadlessIncrementSupport(spec.getProperties()); boolean transactional = DatasetsUtil.isTransactional(spec.getProperties()); return createCoprocessorJarInternal(conf, coprocessorManager, tableUtil, transactional, supportsIncrement); }
public TimePartitionedFileSetDataset(DatasetContext datasetContext, String name, FileSet fileSet, IndexedTable partitionTable, DatasetSpecification spec, Map<String, String> arguments, Provider<ExploreFacade> exploreFacadeProvider) { super(datasetContext, name, PARTITIONING, fileSet, partitionTable, spec, arguments, exploreFacadeProvider); // the first version of TPFS in CDAP 2.7 did not have the partitioning in the properties. It is not supported. if (PartitionedFileSetProperties.getPartitioning(spec.getProperties()) == null) { throw new DataSetException("Unsupported version of TimePartitionedFileSet. Dataset '" + name + "' is missing " + "the partitioning property. This probably means that it was created in CDAP 2.7, " + "which is not supported any longer."); } }
/** * Base constructor that only sets the name of the data set. */ TimeseriesDataset(DatasetSpecification spec, Table table) { super(spec.getName(), table); this.rowPartitionIntervalSize = getIntervalSize(spec.getProperties()); this.table = table; }
public TimePartitionedFileSetDataset(DatasetContext datasetContext, String name, FileSet fileSet, IndexedTable partitionTable, DatasetSpecification spec, Map<String, String> arguments, Provider<ExploreFacade> exploreFacadeProvider) { super(datasetContext, name, PARTITIONING, fileSet, partitionTable, spec, arguments, exploreFacadeProvider); // the first version of TPFS in CDAP 2.7 did not have the partitioning in the properties. It is not supported. if (PartitionedFileSetProperties.getPartitioning(spec.getProperties()) == null) { throw new DataSetException("Unsupported version of TimePartitionedFileSet. Dataset '" + name + "' is missing " + "the partitioning property. This probably means that it was created in CDAP 2.7, " + "which is not supported any longer."); } }
public Collection<DatasetSpecification> get(NamespaceId namespaceId, final Map<String, String> properties) { Predicate<DatasetSpecification> propertyFilter = input -> input != null && Maps.difference(properties, input.getProperties()).entriesOnlyOnLeft().isEmpty(); return getAll(namespaceId, propertyFilter); }
public Collection<DatasetSpecification> get(NamespaceId namespaceId, final Map<String, String> properties) { Predicate<DatasetSpecification> propertyFilter = input -> input != null && Maps.difference(properties, input.getProperties()).entriesOnlyOnLeft().isEmpty(); return getAll(namespaceId, propertyFilter); }
@Override public DatasetAdmin getAdmin(DatasetContext datasetContext, DatasetSpecification spec, ClassLoader classLoader) throws IOException { Map<String, DatasetAdmin> admins = new HashMap<>(); admins.put("entity", metricsTableDef.getAdmin(datasetContext, spec.getSpecification("entity"), classLoader)); int[] resolutions = getResolutions(spec.getProperties()); for (int resolution : resolutions) { String resolutionTable = String.valueOf(resolution); admins.put(resolutionTable, tableDef.getAdmin(datasetContext, spec.getSpecification(resolutionTable), classLoader)); } return new CubeDatasetAdmin(spec, admins); }
@Override public DatasetAdmin getAdmin(DatasetContext datasetContext, DatasetSpecification spec, ClassLoader classLoader) throws IOException { Map<String, DatasetAdmin> admins = new HashMap<>(); admins.put("entity", metricsTableDef.getAdmin(datasetContext, spec.getSpecification("entity"), classLoader)); int[] resolutions = getResolutions(spec.getProperties()); for (int resolution : resolutions) { String resolutionTable = String.valueOf(resolution); admins.put(resolutionTable, tableDef.getAdmin(datasetContext, spec.getSpecification(resolutionTable), classLoader)); } return new CubeDatasetAdmin(spec, admins); }
public InMemoryTable(DatasetContext datasetContext, DatasetSpecification spec, CConfiguration cConf) { super(PrefixedNamespaces.namespace(cConf, datasetContext.getNamespaceId(), spec.getName()), false, spec.getProperties()); }
public InMemoryTable(DatasetContext datasetContext, DatasetSpecification spec, CConfiguration cConf) { super(PrefixedNamespaces.namespace(cConf, datasetContext.getNamespaceId(), spec.getName()), false, spec.getProperties()); }
@Override public DatasetSpecification reconfigure(String instanceName, DatasetProperties properties, DatasetSpecification currentSpec) throws IncompatibleUpdateException { if (!Objects.equals(currentSpec.getProperties().get(NOT_RECONFIGURABLE), properties.getProperties().get(NOT_RECONFIGURABLE))) { throw new IncompatibleUpdateException(String.format("Can't change %s from %s to %s. ", NOT_RECONFIGURABLE, currentSpec.getProperties().get(NOT_RECONFIGURABLE), properties.getProperties().get(NOT_RECONFIGURABLE))); } return configure(instanceName, properties); }
public LevelDBTable(DatasetContext datasetContext, String tableName, LevelDBTableService service, CConfiguration cConf, DatasetSpecification spec) throws IOException { super(PrefixedNamespaces.namespace(cConf, datasetContext.getNamespaceId(), tableName), false, spec.getProperties()); this.core = new LevelDBTableCore(getTableName(), service); }
public LevelDBTable(DatasetContext datasetContext, String tableName, LevelDBTableService service, CConfiguration cConf, DatasetSpecification spec) throws IOException { super(PrefixedNamespaces.namespace(cConf, datasetContext.getNamespaceId(), tableName), false, spec.getProperties()); this.core = new LevelDBTableCore(getTableName(), service); }
@Override public DatasetSpecification reconfigure(String instanceName, DatasetProperties newProperties, DatasetSpecification currentSpec) throws IncompatibleUpdateException { if (!currentSpec.getProperties().get("schema").equals(getSchemaString(newProperties))) { throw new IncompatibleUpdateException("Attempt to alter schema"); } return configure(instanceName, newProperties); }
public HBaseMetricsTable(DatasetContext datasetContext, DatasetSpecification spec, Configuration hConf, HBaseTableUtil tableUtil, CConfiguration cConf) throws IOException { this.tableUtil = tableUtil; this.tableId = tableUtil.createHTableId(new NamespaceId(datasetContext.getNamespaceId()), spec.getName()); initializeVars(cConf, spec); HTable hTable = tableUtil.createHTable(hConf, tableId); // todo: make configurable hTable.setWriteBufferSize(HBaseTableUtil.DEFAULT_WRITE_BUFFER_SIZE); hTable.setAutoFlushTo(false); this.hTable = hTable; this.columnFamily = TableProperties.getColumnFamilyBytes(spec.getProperties()); }
public HBaseMetricsTable(DatasetContext datasetContext, DatasetSpecification spec, Configuration hConf, HBaseTableUtil tableUtil, CConfiguration cConf) throws IOException { this.tableUtil = tableUtil; this.tableId = tableUtil.createHTableId(new NamespaceId(datasetContext.getNamespaceId()), spec.getName()); initializeV3Vars(cConf, spec); HTable hTable = tableUtil.createHTable(hConf, tableId); // todo: make configurable hTable.setWriteBufferSize(HBaseTableUtil.DEFAULT_WRITE_BUFFER_SIZE); hTable.setAutoFlushTo(false); this.hTable = hTable; this.columnFamily = TableProperties.getColumnFamilyBytes(spec.getProperties()); }
@Override public DatasetSpecification configure(String instanceName, DatasetProperties properties) { // Use ConflictDetection.NONE as we only need a flag whether a program uses a dataset/stream. // Having conflict detection will lead to failures when programs start, and all try to register at the same time. DatasetProperties datasetProperties = TableProperties.builder() .setConflictDetection(ConflictDetection.NONE) .addAll(properties.getProperties()) .build(); DatasetSpecification spec = tableDefinition.configure(instanceName, datasetProperties); return DatasetSpecification.builder(instanceName, getName()) .properties(spec.getProperties()) .build(); }
@Override public DatasetSpecification configure(String instanceName, DatasetProperties properties) { // Use ConflictDetection.NONE as we only need a flag whether a program uses a dataset/stream. // Having conflict detection will lead to failures when programs start, and all try to register at the same time. DatasetProperties datasetProperties = TableProperties.builder() .setConflictDetection(ConflictDetection.NONE) .addAll(properties.getProperties()) .build(); DatasetSpecification spec = tableDefinition.configure(instanceName, datasetProperties); return DatasetSpecification.builder(instanceName, getName()) .properties(spec.getProperties()) .build(); }
@Test public void testPermissions() throws Exception { // validate that the fileset permissions and group were applied to the embedded fileset (just sanity test) PartitionedFileSet pfs = dsFrameworkUtil.getInstance(pfsInstance); Location loc = pfs.getEmbeddedFileSet().getLocation("some/random/path"); loc.getOutputStream().close(); Assert.assertEquals(fsPermissions, loc.getPermissions()); Assert.assertEquals(group, loc.getGroup()); Map<String, String> props = dsFrameworkUtil.getSpec(pfsInstance).getSpecification("partitions").getProperties(); Assert.assertEquals(tablePermissions, TableProperties.getTablePermissions(props)); }