@Provides public DatumWriter<MetricValues> providesDatumWriter(SchemaGenerator schemaGenerator, DatumWriterFactory datumWriterFactory) { try { return datumWriterFactory.create(METRIC_RECORD_TYPE, schemaGenerator.generate(METRIC_RECORD_TYPE.getType())); } catch (UnsupportedTypeException e) { throw Throwables.propagate(e); } } }
@Provides public DatumWriter<MetricValues> providesDatumWriter(SchemaGenerator schemaGenerator, DatumWriterFactory datumWriterFactory) { try { return datumWriterFactory.create(METRIC_RECORD_TYPE, schemaGenerator.generate(METRIC_RECORD_TYPE.getType())); } catch (UnsupportedTypeException e) { throw Throwables.propagate(e); } } }
private SchemaCache createSchemaCache(Program program) throws Exception { ImmutableSet.Builder<Schema> schemas = ImmutableSet.builder(); for (FlowSpecification flowSpec : program.getApplicationSpecification().getFlows().values()) { for (FlowletDefinition flowletDef : flowSpec.getFlowlets().values()) { schemas.addAll(Iterables.concat(flowletDef.getInputs().values())); schemas.addAll(Iterables.concat(flowletDef.getOutputs().values())); } } // Temp fix for ENG-3949. Always add old stream event schema. // TODO: Remove it later. The right thing to do is to have schemas history being stored to support schema // evolution. By design, as long as the schema cache is populated with old schema, the type projection logic // in the decoder would handle it correctly. schemas.add(schemaGenerator.generate(StreamEventData.class)); return new SchemaCache(schemas.build(), program.getClassLoader()); }
/** * Sets the type of object stored in the table. The schema of the Hive table for an ObjectMappedTable * is derived from the object type set here and the row key explore name set by * {@link #setRowKeyExploreName(String)}. * * For example, if the type set here has three fields - "id", "name", and "price", the corresponding Hive table * for this Dataset will contain four columns - "rowkey", "id", "name", and "price". */ public Builder setType(Type type) throws UnsupportedTypeException { add(OBJECT_TYPE, gson.toJson(new TypeRepresentation(type))); add(OBJECT_SCHEMA, schemaGenerator.generate(type, false).toString()); return this; }
this.messagingService = messagingService; try { this.metricSchema = schemaGenerator.generate(MetricValues.class); this.metricReader = readerFactory.create(TypeToken.of(MetricValues.class), metricSchema); } catch (UnsupportedTypeException e) {
this.messagingService = messagingService; try { this.metricSchema = schemaGenerator.generate(MetricValues.class); this.metricReader = readerFactory.create(TypeToken.of(MetricValues.class), metricSchema); } catch (UnsupportedTypeException e) {
Schema schema = schemaGenerator.generate(dataType.getType()); if (queueSpec.getInputSchema().equals(schema) && (inputNames.contains(queueSpec.getQueueName().getSimpleName())
schema = schemaGenerator.generate(dataType.getType()); } catch (UnsupportedTypeException e) { throw Throwables.propagate(e);
List<String> consumerFlowlets = Lists.newLinkedList(); Node flowlet = Node.flowlet(flowletName); Schema schema = schemaGenerator.generate(type.getType()); for (Map.Entry<String, Set<QueueSpecification>> entry : queueSpecs.row(flowlet).entrySet()) { for (QueueSpecification queueSpec : entry.getValue()) {
throw new SerDeException("Dataset " + datasetId + " is not explorable."); schema = schemaGenerator.generate(recordType); } catch (UnsupportedTypeException e) { throw new SerDeException("Dataset " + datasetId + " has an unsupported schema.", e);
throw new SerDeException("Dataset " + datasetId + " is not explorable."); schema = schemaGenerator.generate(recordType); } catch (UnsupportedTypeException e) { throw new SerDeException("Dataset " + datasetId + " has an unsupported schema.", e);