@Override public String apply(TableReference from) { return toJsonString(from); } }
@Override public String apply(TableSchema from) { return toJsonString(from); } }
@Override public String apply(TimePartitioning partitioning) { return toJsonString(partitioning); } }
public TableDestination( String tableSpec, @Nullable String tableDescription, TimePartitioning timePartitioning) { this( tableSpec, tableDescription, timePartitioning != null ? BigQueryHelpers.toJsonString(timePartitioning) : null); }
static void removeTemporaryTables(DatasetService tableService, List<TableReference> tempTables) { for (TableReference tableRef : tempTables) { try { LOG.debug("Deleting table {}", BigQueryHelpers.toJsonString(tableRef)); tableService.deleteTable(tableRef); } catch (Exception e) { LOG.warn("Failed to delete the table {}", BigQueryHelpers.toJsonString(tableRef), e); } } }
public TableDestination( TableReference tableReference, @Nullable String tableDescription, TimePartitioning timePartitioning) { this( BigQueryHelpers.toTableSpec(tableReference), tableDescription, timePartitioning != null ? BigQueryHelpers.toJsonString(timePartitioning) : null); }
/** * Uses the specified schema for rows to be written. * * <p>The schema is <i>required</i> only if writing to a table that does not already exist, and * {@link CreateDisposition} is set to {@link CreateDisposition#CREATE_IF_NEEDED}. */ public Write<T> withSchema(TableSchema schema) { checkArgument(schema != null, "schema can not be null"); return withJsonSchema(StaticValueProvider.of(BigQueryHelpers.toJsonString(schema))); }
/** * Allows newly created tables to include a {@link TimePartitioning} class. Can only be used * when writing to a single table. If {@link #to(SerializableFunction)} or {@link * #to(DynamicDestinations)} is used to write dynamic tables, time partitioning can be directly * in the returned {@link TableDestination}. */ public Write<T> withTimePartitioning(TimePartitioning partitioning) { checkArgument(partitioning != null, "partitioning can not be null"); return withJsonTimePartitioning( StaticValueProvider.of(BigQueryHelpers.toJsonString(partitioning))); }
/** * Cause a given {@link TableRow} object to fail when it's inserted. The errors link the list will * be returned on subsequent retries, and the insert will succeed when the errors run out. */ public void failOnInsert( Map<TableRow, List<TableDataInsertAllResponse.InsertErrors>> insertErrors) { synchronized (tables) { for (Map.Entry<TableRow, List<TableDataInsertAllResponse.InsertErrors>> entry : insertErrors.entrySet()) { List<String> errorStrings = Lists.newArrayList(); for (TableDataInsertAllResponse.InsertErrors errors : entry.getValue()) { errorStrings.add(BigQueryHelpers.toJsonString(errors)); } this.insertErrors.put(BigQueryHelpers.toJsonString(entry.getKey()), errorStrings); } } }
@ProcessElement public void processElement(ProcessContext c) throws Exception { String jobUuid = c.element(); BigQuerySourceBase<T> source = createSource(jobUuid, coder); BigQueryOptions options = c.getPipelineOptions().as(BigQueryOptions.class); ExtractResult res = source.extractFiles(options); LOG.info("Extract job produced {} files", res.extractedFiles.size()); source.cleanupTempResource(options); for (ResourceId file : res.extractedFiles) { c.output(file.toString()); } c.output(tableSchemaTag, BigQueryHelpers.toJsonString(res.schema)); } })
/** * Returns the table to write, or {@code null} if writing with {@code tableFunction}. * * <p>If the table's project is not specified, use the executing project. */ @Nullable ValueProvider<TableReference> getTableWithDefaultProject(BigQueryOptions bqOptions) { ValueProvider<TableReference> table = getTable(); if (table == null) { return table; } if (!table.isAccessible()) { LOG.info( "Using a dynamic value for table input. This must contain a project" + " in the table reference: {}", table); return table; } if (Strings.isNullOrEmpty(table.get().getProjectId())) { // If user does not specify a project we assume the table to be located in // the default project. TableReference tableRef = table.get(); tableRef.setProjectId(bqOptions.getProject()); return NestedValueProvider.of( StaticValueProvider.of(BigQueryHelpers.toJsonString(tableRef)), new JsonTableRefToTableRef()); } return table; }
@Test public void testRemoveTemporaryTables() throws Exception { FakeDatasetService datasetService = new FakeDatasetService(); String projectId = "project"; String datasetId = "dataset"; datasetService.createDataset(projectId, datasetId, "", "", null); List<TableReference> tableRefs = Lists.newArrayList( BigQueryHelpers.parseTableSpec( String.format("%s:%s.%s", projectId, datasetId, "table1")), BigQueryHelpers.parseTableSpec( String.format("%s:%s.%s", projectId, datasetId, "table2")), BigQueryHelpers.parseTableSpec( String.format("%s:%s.%s", projectId, datasetId, "table3"))); for (TableReference tableRef : tableRefs) { datasetService.createTable(new Table().setTableReference(tableRef)); } // Add one more table to delete that does not actually exist. tableRefs.add( BigQueryHelpers.parseTableSpec(String.format("%s:%s.%s", projectId, datasetId, "table4"))); WriteRename.removeTemporaryTables(datasetService, tableRefs); for (TableReference ref : tableRefs) { loggedWriteRename.verifyDebug("Deleting table " + toJsonString(ref)); checkState(datasetService.getTable(ref) == null, "Table " + ref + " was not deleted!"); } }
String tableJson = toJsonString(tempTable); tempTables.put(tableDestination, tableJson); tempTablesElement.add(KV.of(tableDestination, tableJson));
toJsonString( new TableSchema() .setFields( toJsonString( fakeDatasetService .getTable(