private static void checkNotChanged(String what, @Nullable Object existing, @Nullable Object test) { ValidationException.check( (existing == test) || (existing != null && existing.equals(test)), "Dataset %s is not compatible with existing: %s != %s", what, String.valueOf(existing), String.valueOf(test)); }
/** * Parses the FieldMapping from an annotated schema field. * * @param mappingNode * The value of the "mapping" node * @return FieldMapping */ public static FieldMapping parseFieldMapping(JsonNode mappingNode) { ValidationException.check(mappingNode.isObject(), "A column mapping must be a JSON record"); ValidationException.check(mappingNode.has(SOURCE), "Partitioners must have a %s.", SOURCE); String source = mappingNode.get("source").asText(); return parseFieldMapping(source, mappingNode); }
/** * Validate that a {@link FieldMapping} is compatible with this builder's * current set of mappings and add it to the set of mappings. * * A mapping is not compatible if it results in: * <pre> * 1. Multiple occVersion mappings in the mapping set * 2. Both a counter and an occVersion mapping in the mapping set * </pre> * * @param fm a {@code FieldMapping} to add to this builder */ private void addField(FieldMapping fm) { // validate! if (fm.getMappingType() == FieldMapping.MappingType.OCC_VERSION) { ValidationException.check(!hasOCCVersion, "Cannot use multiple occVersion fields"); ValidationException.check(!hasCounter, "Cannot use both counter and occVersion fields"); hasOCCVersion = true; } else if (fm.getMappingType() == FieldMapping.MappingType.COUNTER) { ValidationException.check(!hasOCCVersion, "Cannot use both counter and occVersion fields"); hasCounter = true; } fieldMappings.add(fm); } }
private void add(FieldPartitioner fp) { ValidationException.check(!names.contains(fp.getName()), "Partition name %s conflicts with an existing field or partition name", fp.getName()); fieldPartitioners.add(fp); names.add(fp.getName()); } }
private static void checkCompressionType(Format format, @Nullable CompressionType compressionType) { if (compressionType == null) { return; } ValidationException.check(format.getSupportedCompressionTypes() .contains(compressionType), "Format %s doesn't support compression format %s", format.getName(), compressionType.getName()); }
private static ColumnMapping buildColumnMapping(JsonNode node) { ValidationException.check(node.isArray(), "Must be a JSON array of column mappings"); ColumnMapping.Builder builder = new ColumnMapping.Builder(); for (Iterator<JsonNode> it = node.elements(); it.hasNext();) { builder.fieldMapping(parseFieldMapping(it.next())); } return builder.build(); }
/** * Precondition-style validation that a dataset name is compatible. * * @param namespace a String namespace * @param name a String name */ public static void checkDatasetName(String namespace, String name) { Preconditions.checkNotNull(namespace, "Namespace cannot be null"); Preconditions.checkNotNull(name, "Dataset name cannot be null"); ValidationException.check(Compatibility.isCompatibleName(namespace), "Namespace %s is not alphanumeric (plus '_')", namespace); ValidationException.check(Compatibility.isCompatibleName(name), "Dataset name %s is not alphanumeric (plus '_')", name); }
/** * Precondition-style validation that a {@link Schema} is compatible. * * @param schema an avro {@code Schema} */ public static void checkSchema(Schema schema) { Preconditions.checkNotNull(schema, "Schema cannot be null"); List<String> incompatible = getIncompatibleNames(schema); ValidationException.check(incompatible.isEmpty(), "Field names are not alphanumeric (plus '_'): %s", Joiner.on(", ").join(incompatible)); }
/** * Returns the nested {@link Schema} for the given field name. * * @param schema a record Schema * @param name a String field name * @return the nested Schema for the field */ public static Schema fieldSchema(Schema schema, String name) { Schema nested = unwrapNullable(schema); List<String> levels = Lists.newArrayList(); for (String level : NAME_SPLITTER.split(name)) { levels.add(level); ValidationException.check(Schema.Type.RECORD == schema.getType(), "Cannot get schema for %s: %s is not a record schema: %s", name, NAME_JOINER.join(levels), nested.toString(true)); Schema.Field field = nested.getField(level); ValidationException.check(field != null, "Cannot get schema for %s: %s is not a field", name, NAME_JOINER.join(levels)); nested = unwrapNullable(field.schema()); } return nested; }
public static void checkStrategyUpdate(PartitionStrategy existing, PartitionStrategy other, Schema schema) { List<FieldPartitioner> existingFields = Accessor.getDefault() .getFieldPartitioners(existing); List<FieldPartitioner> otherFields = Accessor.getDefault() .getFieldPartitioners(other); ValidationException.check(existingFields.size() == otherFields.size(), "Not compatible: cannot replace %s partitioners with %s partitioners", existingFields.size(), otherFields.size()); for (int i = 0; i < existingFields.size(); i += 1) { FieldPartitioner fp = existingFields.get(i); FieldPartitioner replacement = otherFields.get(i); if (fp.equals(replacement)) { continue; } ValidationException.check(fp instanceof ProvidedFieldPartitioner, "Cannot replace partition %s: not a provided partitioner", fp.getName()); ValidationException.check(fp.getName().equals(replacement.getName()), "Cannot change the name of partition %s (to %s)", fp.getName(), replacement.getName()); Class<?> outputType = SchemaUtil.getPartitionType(replacement, schema); ValidationException.check( isCompatibleWithProvidedType(fp.getType(), outputType), "Cannot change the data type of partition %s", fp.getName()); } }
private static void checkPartitionStrategy( Schema schema, @Nullable PartitionStrategy strategy) { if (strategy == null) { return; } for (FieldPartitioner fp : strategy.getFieldPartitioners()) { if (fp instanceof ProvidedFieldPartitioner) { // provided partitioners are not based on the entity fields continue; } // check the entity is a record if there is a non-provided partitioner ValidationException.check(schema.getType() == Schema.Type.RECORD, "Cannot partition non-records: %s", schema); // the source name should be a field in the schema, but not necessarily // the record. Schema fieldSchema; try { fieldSchema = SchemaUtil.fieldSchema(schema, fp.getSourceName()); } catch (IllegalArgumentException e) { throw new ValidationException( "Cannot partition on " + fp.getSourceName(), e); } ValidationException.check( SchemaUtil.isConsistentWithExpectedType( fieldSchema.getType(), fp.getSourceType()), "Field type %s does not match partitioner %s", fieldSchema.getType(), fp); } } }
@Override public Object extractField(E entity, String fieldName) { // make sure the field is a direct child of the schema ValidationException.check( accessor.getReadSchema().getField(fieldName) != null, "No field named %s in schema %s", fieldName, accessor.getReadSchema()); return accessor.get(entity, fieldName); }
@Override public Object extractField(E entity, String fieldName) { // make sure the field is a direct child of the schema ValidationException.check( accessor.getReadSchema().getField(fieldName) != null, "No field named %s in schema %s", fieldName, accessor.getReadSchema()); return accessor.get(entity, fieldName); }
return; ValidationException.check(schema.getType() == Schema.Type.RECORD, "Cannot map non-records: %s", schema); Set<String> keyMappedFields = Sets.newHashSet(); for (FieldMapping fm : mappings.getFieldMappings()) { Schema fieldSchema = SchemaUtil.fieldSchema(schema, fm.getFieldName()); ValidationException.check( SchemaUtil.isConsistentWithMappingType( fieldSchema.getType(), fm.getMappingType()),
if (m.matches()) { String fieldName = m.group(1); ValidationException.check(fieldName != null && !fieldName.isEmpty(), "Invalid field name: %s", String.valueOf(fieldName)); String partitionerType = m.group(2); if ("hash".equals(partitionerType)) { String width = m.group(3); ValidationException.check(width != null, "Missing number of hash partitions: %s:hash[?]", fieldName); strategyBuilder.hash(fieldName, Integer.parseInt(width));
if (matcher.matches()) { String source = matcher.group(1); ValidationException.check(schema.getField(source) != null, "Not a schema field: %s", source); mappingBuilder.occ(source); if (matcher.matches()) { String source = matcher.group(1); ValidationException.check(schema.getField(source) != null, "Not a schema field: %s", source); mappingBuilder.key(source); String qualOrPrefix = matcher.group(3); Schema.Field field = schema.getField(source); ValidationException.check(field != null, "Not a schema field: %s", source); ValidationException.check(family != null && !family.isEmpty(), "Missing column family: %s:?", source); Schema.Type type = schema.getField(source).schema().getType();
"Unable to open a writer from state:%s", state); ValidationException.check(isSupportedFormat(descriptor), "Not a supported format: %s", descriptor.getFormat());
@Override public void initialize() { Preconditions.checkState(state.equals(ReaderWriterState.NEW), "Unable to open a writer from state:%s", state); DatasetDescriptor descriptor = view.getDataset().getDescriptor(); ValidationException.check( FileSystemWriter.isSupportedFormat(descriptor), "Not a supported format: %s", descriptor.getFormat()); LOG.debug("Opening partitioned dataset writer w/strategy:{}", partitionStrategy); cachedWriters = CacheBuilder.newBuilder().maximumSize(maxWriters) .removalListener(new DatasetWriterCloser<E>()) .build(createCacheLoader()); state = ReaderWriterState.OPEN; }
ValidationException.check(schema != null, "Descriptor schema is required and cannot be null");