@Override protected StorageKey initialValue() { return new StorageKey(descriptor.getPartitionStrategy()); } };
@Override @SuppressWarnings("deprecation") public long increment(Key key, String fieldName, long amount) { return dao.increment(keyFor(getDescriptor().getPartitionStrategy(), key), fieldName, amount); }
@Override @SuppressWarnings("deprecation") public void delete(Key key) { dao.delete(keyFor(getDescriptor().getPartitionStrategy(), key)); }
@Override @SuppressWarnings("deprecation") public E get(Key key) { return dao.get(keyFor(getDescriptor().getPartitionStrategy(), key)); }
@Override @SuppressWarnings("deprecation") public E get(Key key) { return dao.get(keyFor(getDescriptor().getPartitionStrategy(), key)); }
@Override @SuppressWarnings("deprecation") public void delete(Key key) { dao.delete(keyFor(getDescriptor().getPartitionStrategy(), key)); }
@Override @SuppressWarnings("deprecation") public long increment(Key key, String fieldName, long amount) { return dao.increment(keyFor(getDescriptor().getPartitionStrategy(), key), fieldName, amount); }
@SuppressWarnings("deprecation") PartitionKey toPartitionKey(MarkerRange.Boundary boundary) { if (boundary == null || boundary.getBound() == null) { return null; } return keyFor(dataset.getDescriptor().getPartitionStrategy(), boundary.getBound()); }
@SuppressWarnings("deprecation") PartitionKey toPartitionKey(MarkerRange.Boundary boundary) { if (boundary == null || boundary.getBound() == null) { return null; } return keyFor(dataset.getDescriptor().getPartitionStrategy(), boundary.getBound()); }
/** * Construct a {@link Builder} for a {@link RandomAccessDataset}. */ public Builder(RandomAccessDataset dataset) { this.schema = dataset.getDescriptor().getSchema(); this.strategy = dataset.getDescriptor().getPartitionStrategy(); this.fieldNames = Sets.newHashSet(); for (FieldPartitioner fp : strategy.getFieldPartitioners()) { fieldNames.add(fp.getSourceName()); fieldNames.add(fp.getName()); } this.values = Maps.newHashMap(); }
public static void checkPartitionedBy(DatasetDescriptor descriptor, String fieldName) { Preconditions.checkArgument(descriptor.isPartitioned(), "Descriptor %s is not partitioned", descriptor); Preconditions.checkArgument( Accessor.getDefault().hasPartitioner(descriptor.getPartitionStrategy(), fieldName), "Descriptor %s is not partitioned by '%s'", descriptor, fieldName); }
@Override public AvroKeySchema parseKeySchema(String rawSchema) { DatasetDescriptor descriptor = new DatasetDescriptor.Builder() .schemaLiteral(rawSchema) .build(); return new AvroKeySchema( descriptor.getSchema(), descriptor.getPartitionStrategy()); }
private FileSystemPartitionIterator partitionIterator() { DatasetDescriptor descriptor = dataset.getDescriptor(); try { return new FileSystemPartitionIterator( fs, root, descriptor.getPartitionStrategy(), descriptor.getSchema(), getKeyPredicate()); } catch (IOException ex) { throw new DatasetException("Cannot list partitions in view:" + this, ex); } }
public Builder(Dataset dataset) { if (!dataset.getDescriptor().isPartitioned()) { throw new DatasetException("Dataset is not partitioned"); } this.strategy = dataset.getDescriptor().getPartitionStrategy(); this.values = Maps.newHashMap(); }
@Override public AvroKeySchema parseKeySchema(String rawSchema) { DatasetDescriptor descriptor = new DatasetDescriptor.Builder() .schemaLiteral(rawSchema) .build(); return new AvroKeySchema( descriptor.getSchema(), descriptor.getPartitionStrategy()); }
@Override public AvroKeySchema parseKeySchema(String rawSchema, PartitionStrategy partitionStrategy) { // use DatasetDescriptor.Builder because it checks consistency DatasetDescriptor descriptor = new DatasetDescriptor.Builder() .schemaLiteral(rawSchema) .partitionStrategy(partitionStrategy) .build(); return new AvroKeySchema( descriptor.getSchema(), descriptor.getPartitionStrategy()); }
@Override public AvroKeySchema parseKeySchema(String rawSchema, PartitionStrategy partitionStrategy) { // use DatasetDescriptor.Builder because it checks consistency DatasetDescriptor descriptor = new DatasetDescriptor.Builder() .schemaLiteral(rawSchema) .partitionStrategy(partitionStrategy) .build(); return new AvroKeySchema( descriptor.getSchema(), descriptor.getPartitionStrategy()); }
@Test public void testLoad() { ensureCreated(); DatasetDescriptor loaded = provider.load(NAMESPACE, NAME); Assert.assertNotNull("DatasetDescriptor should be returned", loaded); Assert.assertEquals("Schema should match", testDescriptor.getSchema(), loaded.getSchema()); Assert.assertEquals("PartitionStrategy should match", testDescriptor.getPartitionStrategy(), loaded.getPartitionStrategy()); Assert.assertEquals("Format should match", testDescriptor.getFormat(), loaded.getFormat()); }
@Test public void testBasic() { DatasetDescriptor desc = provider.create("default", tableName + ".TestEntity", new DatasetDescriptor.Builder().schemaLiteral(testEntity).build()); ColumnMapping columnMapping = desc.getColumnMapping(); PartitionStrategy partStrat = desc.getPartitionStrategy(); assertEquals(9, columnMapping.getFieldMappings().size()); assertEquals(2, Accessor.getDefault().getFieldPartitioners(partStrat).size()); }
@Test public void testDescriptorValidationPasses() { DatasetDescriptor descriptor = new DatasetDescriptor.Builder() .schema(schema) .partitionStrategy(strategy) .build(); Assert.assertEquals("Descriptor should have correct schema", schema, descriptor.getSchema()); Assert.assertEquals("Descriptor should have correct strategy", strategy, descriptor.getPartitionStrategy()); }