private static JsonNode rawSchemaAsJsonNode(String rawSchema) { ObjectMapper mapper = new ObjectMapper(); JsonNode avroRecordSchemaJson; try { avroRecordSchemaJson = mapper.readValue(rawSchema, JsonNode.class); } catch (IOException e) { throw new ValidationException( "Could not parse the avro record as JSON.", e); } return avroRecordSchemaJson; } }
private static void checkNotChanged(String what, @Nullable Object existing, @Nullable Object test) { ValidationException.check( (existing == test) || (existing != null && existing.equals(test)), "Dataset %s is not compatible with existing: %s != %s", what, String.valueOf(existing), String.valueOf(test)); }
getLogger().error(e.getMessage()); getLogger().debug("Incompatible schema error", e); session.transfer(flowFile, INCOMPATIBLE);
ValidationException.check(mappingNode.isObject(), "A column mapping must be a JSON record"); ValidationException.check(mappingNode.has(TYPE), "Column mappings must have a %s.", TYPE); String type = mappingNode.get(TYPE).asText(); ValidationException.check(family != null && !family.isEmpty(), "Column mapping %s must have a %s", source, FAMILY); ValidationException.check(qualifier != null && !qualifier.isEmpty(), "Column mapping %s must have a %s", source, QUALIFIER); return FieldMapping.column(source, family, qualifier); ValidationException.check(family != null && !family.isEmpty(), "Column mapping %s must have a %s", source, FAMILY); ValidationException.check(qualifier == null, "Key-as-column mapping %s cannot have a %s", source, QUALIFIER); if (mappingNode.has(PREFIX)) { ValidationException.check(family != null && !family.isEmpty(), "Counter mapping %s must have a %s", source, FAMILY); ValidationException.check(qualifier != null && !qualifier.isEmpty(), "Counter mapping %s must have a %s", source, QUALIFIER); return FieldMapping.counter(source, family, qualifier); throw new ValidationException("Invalid mapping type: " + type);
private static PartitionStrategy buildPartitionStrategy(JsonNode node) { ValidationException.check(node.isArray(), "A partition strategy must be a JSON array of partitioners"); for (Iterator<JsonNode> it = node.elements(); it.hasNext();) { JsonNode fieldPartitioner = it.next(); ValidationException.check(fieldPartitioner.isObject(), "A partitioner must be a JSON record"); ValidationException.check(fieldPartitioner.has(TYPE), "Partitioners must have a %s", TYPE); String type = fieldPartitioner.get(TYPE).asText(); ValidationException.check(isProvided || fieldPartitioner.has(SOURCE), "Partitioners must have a %s", SOURCE); builder.identity(source, name); } else if (type.equals("hash")) { ValidationException.check(fieldPartitioner.has(BUCKETS), "Hash partitioner %s must have attribute %s", name == null ? source : name, BUCKETS); int buckets = fieldPartitioner.get(BUCKETS).asInt(); ValidationException.check(buckets > 0, "Invalid number of buckets for hash partitioner %s: %s", name == null ? source : name, builder.hash(source, name, buckets); } else if (type.equals("range")) { ValidationException.check(fieldPartitioner.has(SIZE), "Range partitioner %s must have attribute %s",
private static JsonNode rawSchemaAsJsonNode(String rawSchema) { ObjectMapper mapper = new ObjectMapper(); JsonNode avroRecordSchemaJson; try { avroRecordSchemaJson = mapper.readValue(rawSchema, JsonNode.class); } catch (IOException e) { throw new ValidationException( "Could not parse the avro record as JSON.", e); } return avroRecordSchemaJson; } }
private static void checkPartitionStrategy( Schema schema, @Nullable PartitionStrategy strategy) { if (strategy == null) { return; } for (FieldPartitioner fp : strategy.getFieldPartitioners()) { if (fp instanceof ProvidedFieldPartitioner) { // provided partitioners are not based on the entity fields continue; } // check the entity is a record if there is a non-provided partitioner ValidationException.check(schema.getType() == Schema.Type.RECORD, "Cannot partition non-records: %s", schema); // the source name should be a field in the schema, but not necessarily // the record. Schema fieldSchema; try { fieldSchema = SchemaUtil.fieldSchema(schema, fp.getSourceName()); } catch (IllegalArgumentException e) { throw new ValidationException( "Cannot partition on " + fp.getSourceName(), e); } ValidationException.check( SchemaUtil.isConsistentWithExpectedType( fieldSchema.getType(), fp.getSourceType()), "Field type %s does not match partitioner %s", fieldSchema.getType(), fp); } } }
/** * Parses the FieldMapping from an annotated schema field. * * @param mappingNode * The value of the "mapping" node * @return FieldMapping */ public static FieldMapping parseFieldMapping(JsonNode mappingNode) { ValidationException.check(mappingNode.isObject(), "A column mapping must be a JSON record"); ValidationException.check(mappingNode.has(SOURCE), "Partitioners must have a %s.", SOURCE); String source = mappingNode.get("source").asText(); return parseFieldMapping(source, mappingNode); }
console.error("Validation error", e); } else { console.error("Validation error: {}", e.getMessage());
public static String valuesString(Class<? extends Comparable> type) { if (String.class.isAssignableFrom(type)) { return STRING_TYPE; } else if (Integer.class.isAssignableFrom(type)) { return INT_TYPE; } else if (Long.class.isAssignableFrom(type)) { return LONG_TYPE; } throw new ValidationException("Not a valid provided type: " + type); } }
return; ValidationException.check(schema.getType() == Schema.Type.RECORD, "Cannot map non-records: %s", schema); Set<String> keyMappedFields = Sets.newHashSet(); for (FieldMapping fm : mappings.getFieldMappings()) { Schema fieldSchema = SchemaUtil.fieldSchema(schema, fm.getFieldName()); ValidationException.check( SchemaUtil.isConsistentWithMappingType( fieldSchema.getType(), fm.getMappingType()), throw new ValidationException( "Fields are key-mapped without identity partitioners: " + Joiner.on(", ").join(keyMappedFields));
/** * Validate that a {@link FieldMapping} is compatible with this builder's * current set of mappings and add it to the set of mappings. * * A mapping is not compatible if it results in: * <pre> * 1. Multiple occVersion mappings in the mapping set * 2. Both a counter and an occVersion mapping in the mapping set * </pre> * * @param fm a {@code FieldMapping} to add to this builder */ private void addField(FieldMapping fm) { // validate! if (fm.getMappingType() == FieldMapping.MappingType.OCC_VERSION) { ValidationException.check(!hasOCCVersion, "Cannot use multiple occVersion fields"); ValidationException.check(!hasCounter, "Cannot use both counter and occVersion fields"); hasOCCVersion = true; } else if (fm.getMappingType() == FieldMapping.MappingType.COUNTER) { ValidationException.check(!hasOCCVersion, "Cannot use both counter and occVersion fields"); hasCounter = true; } fieldMappings.add(fm); } }
getLogger().error(e.getMessage()); getLogger().debug("Incompatible schema error", e); session.transfer(flowFile, INCOMPATIBLE);
public static <T> T parse(File file, Class<T> returnType) { ObjectMapper mapper = new ObjectMapper(); try { return mapper.readValue(file, returnType); } catch (JsonParseException e) { throw new ValidationException("Invalid JSON", e); } catch (JsonMappingException e) { throw new ValidationException("Invalid JSON", e); } catch (IOException e) { throw new DatasetIOException("Cannot initialize JSON parser", e); } }
if (m.matches()) { String fieldName = m.group(1); ValidationException.check(fieldName != null && !fieldName.isEmpty(), "Invalid field name: %s", String.valueOf(fieldName)); String partitionerType = m.group(2); if ("hash".equals(partitionerType)) { String width = m.group(3); ValidationException.check(width != null, "Missing number of hash partitions: %s:hash[?]", fieldName); strategyBuilder.hash(fieldName, Integer.parseInt(width)); strategyBuilder.provided(fieldName); } else { throw new ValidationException( "Unknown partitioner type: " + partitionerType); throw new ValidationException( "Invalid partition <field:type>: " + partition);
private void add(FieldPartitioner fp) { ValidationException.check(!names.contains(fp.getName()), "Partition name %s conflicts with an existing field or partition name", fp.getName()); fieldPartitioners.add(fp); names.add(fp.getName()); } }
public static <T> T parse(InputStream in, Class<T> returnType) { ObjectMapper mapper = new ObjectMapper(); try { return mapper.readValue(in, returnType); } catch (JsonParseException e) { throw new ValidationException("Invalid JSON", e); } catch (JsonMappingException e) { throw new ValidationException("Invalid JSON", e); } catch (IOException e) { throw new DatasetIOException("Cannot initialize JSON parser", e); } }
if (matcher.matches()) { String source = matcher.group(1); ValidationException.check(schema.getField(source) != null, "Not a schema field: %s", source); mappingBuilder.occ(source); if (matcher.matches()) { String source = matcher.group(1); ValidationException.check(schema.getField(source) != null, "Not a schema field: %s", source); mappingBuilder.key(source); String qualOrPrefix = matcher.group(3); Schema.Field field = schema.getField(source); ValidationException.check(field != null, "Not a schema field: %s", source); ValidationException.check(family != null && !family.isEmpty(), "Missing column family: %s:?", source); Schema.Type type = schema.getField(source).schema().getType(); throw new ValidationException("Unknown mapping: " + mapping);
private static void checkCompressionType(Format format, @Nullable CompressionType compressionType) { if (compressionType == null) { return; } ValidationException.check(format.getSupportedCompressionTypes() .contains(compressionType), "Format %s doesn't support compression format %s", format.getName(), compressionType.getName()); }
public static Class<? extends Comparable> valuesType(@Nullable String type) { if (type == null) { return String.class; } else if (STRING_TYPE.equals(type)) { return String.class; } else if (INT_TYPE.equals(type)) { return Integer.class; } else if (LONG_TYPE.equals(type)) { return Long.class; } throw new ValidationException("Not a valid provided type: " + type); }