protected String writeNewMetadata(TableMetadata metadata, int version) { if (baseLocation == null) { baseLocation = metadata.location(); } String newTableMetadataFilePath = newTableMetadataFilePath(baseLocation, version); OutputFile newMetadataLocation = fileIo.newOutputFile(newTableMetadataFilePath); // write the new metadata TableMetadataParser.write(metadata, newMetadataLocation); return newTableMetadataFilePath; }
static PartitionSpec fromJsonFields(Schema schema, int specId, JsonNode json) { PartitionSpec.Builder builder = PartitionSpec.builderFor(schema).withSpecId(specId); buildFromJsonFields(builder, json); return builder.build(); }
@Override public void commit() { // rollback does not refresh or retry. it only operates on the state of the table when rollback // was called to create the transaction. ops.commit(base, base.rollbackTo(apply())); } }
private static PartitionData copyPartitionData(PartitionSpec spec, StructLike partitionData, PartitionData reuse) { PartitionData data = reuse; if (data == null) { data = newPartitionData(spec); } Class<?>[] javaClasses = spec.javaClasses(); List<PartitionField> fields = spec.fields(); for (int i = 0; i < fields.size(); i += 1) { data.set(i, partitionData.get(i, javaClasses[i])); } return data; }
public Table create(Schema schema, PartitionSpec spec, Map<String, String> properties, String database, String table) { TableOperations ops = newTableOps(conf, database, table); if (ops.current() != null) { throw new AlreadyExistsException("Table already exists: " + database + "." + table); } String location = defaultWarehouseLocation(conf, database, table); TableMetadata metadata = newTableMetadata(ops, schema, spec, location, properties); ops.commit(null, metadata); return new BaseTable(ops, database + "." + table); }
public Transaction beginCreate(Schema schema, PartitionSpec spec, Map<String, String> properties, String database, String table) { TableOperations ops = newTableOps(conf, database, table); if (ops.current() != null) { throw new AlreadyExistsException("Table already exists: " + database + "." + table); } String location = defaultWarehouseLocation(conf, database, table); TableMetadata metadata = newTableMetadata(ops, schema, spec, location, properties); return BaseTransaction.createTableTransaction(ops, metadata); }
ManifestWriter(PartitionSpec spec, OutputFile file, long snapshotId) { this.location = file.location(); this.file = file; this.specId = spec.specId(); this.writer = newAppender(FileFormat.AVRO, spec, file); this.snapshotId = snapshotId; this.reused = new ManifestEntry(spec.partitionType()); this.stats = new PartitionSummary(spec); }
/** * Delete a file tracked by a {@link DataFile} from the underlying table. * * @param file a DataFile to remove from the table * @return this for method chaining */ default DeleteFiles deleteFile(DataFile file) { deleteFile(file.path()); return this; }
SchemaUpdate(TableOperations ops) { this.ops = ops; this.base = ops.current(); this.schema = base.schema(); this.lastColumnId = base.lastColumnId(); }
MergingSnapshotUpdate(TableOperations ops) { super(ops); this.ops = ops; this.spec = ops.current().spec(); this.manifestTargetSizeBytes = ops.current() .propertyAsLong(MANIFEST_TARGET_SIZE_BYTES, MANIFEST_TARGET_SIZE_BYTES_DEFAULT); this.minManifestsCountToMerge = ops.current() .propertyAsInt(MANIFEST_MIN_MERGE_COUNT, MANIFEST_MIN_MERGE_COUNT_DEFAULT); }
@Override public void commit() { TableMetadata update = base.updateSchema(apply(), lastColumnId); ops.commit(base, update); }
private static Long currentId(TableMetadata meta) { if (meta != null) { if (meta.currentSnapshot() != null) { return meta.currentSnapshot().snapshotId(); } } return null; }
public void delete(ManifestEntry entry) { // Use the current Snapshot ID for the delete. It is safe to delete the data file from disk // when this Snapshot has been removed or when there are no Snapshots older than this one. add(reused.wrapDelete(snapshotId, entry.file())); }
@Override public void accept(String file) { ops.io().deleteFile(file); } };
@Override public ReplacePartitions addFile(DataFile file) { dropPartition(file.partition()); add(file); return this; }
public void addExisting(Iterable<ManifestEntry> entries) { for (ManifestEntry entry : entries) { if (entry.status() != DELETED) { addExisting(entry); } } }
@Override public void add(DataFile file) { // TODO: this assumes that file is a GenericDataFile that can be written directly to Avro // Eventually, this should check in case there are other DataFile implementations. add(reused.wrapAppend(snapshotId, file)); }