public Scope convert(AvroScope scope) { return scope == null ? null : Scope.values()[scope.ordinal()]; }
public static Map<Scope, Set<FieldType>> getFieldTypeAndScope(Set<SchemaId> fieldIds, FieldFilter fieldFilter, TypeManager typeManager) throws RepositoryException, InterruptedException { // Could be written more elegantly using Multimaps.index, but we want to limit dependencies Map<Scope, Set<FieldType>> result = new EnumMap<Scope, Set<FieldType>>(Scope.class); for (Scope scope : Scope.values()) { result.put(scope, new HashSet<FieldType>()); } for (SchemaId fieldId : fieldIds) { FieldType fieldType; try { fieldType = typeManager.getFieldTypeById(fieldId); } catch (FieldTypeNotFoundException e) { // A field whose field type does not exist: skip it continue; } if (fieldFilter.accept(fieldType)) { result.get(fieldType.getScope()).add(fieldType); } } return result; }
private void setRecordTypesAfterUpdate(Record record, Record originalRecord, Set<Scope> changedScopes) { // The returned record object after an update should always contain complete record type information for // all the scopes for (Scope scope : Scope.values()) { // For any unchanged or non-existing scope, we reset the record type information to the one of the // original record, so that the returned record object corresponds to the repository state (= same // as when one would do a fresh read) // // Copy over the original record type of a scope if: // - the scope was unchanged. If it was changed, the record type will already have been filled in // by calculateRecordChanges. // - for the non-versioned scope, only copy it over if none of the scopes changed, because the // record type of the non-versioned scope is always brought up to date in case any scope is changed if (!changedScopes.contains(scope) && (scope != Scope.NON_VERSIONED || changedScopes.isEmpty())) { record.setRecordType(scope, originalRecord.getRecordTypeName(scope), originalRecord.getRecordTypeVersion(scope)); } } }
Map<QName, Object> undeterminedFields = new TreeMap<QName, Object>(QNAME_COMP); for (Scope scope : Scope.values()) { fieldsByScope.put(scope, new TreeMap<QName, Object>(QNAME_COMP));
public static final void writeIdRecord(IdRecord record, DataOutput output, LRepository repository) throws RepositoryException, InterruptedException { write(record, output, repository); output.writeVInt(record.getFieldIdToNameMapping().size()); for (Map.Entry<SchemaId, QName> entry : record.getFieldIdToNameMapping().entrySet()) { writeBytes(entry.getKey().getBytes(), output); writeQName(entry.getValue(), output); } for (Scope scope : Scope.values()) { SchemaId schemaId = record.getRecordTypeId(scope); writeNullOrBytes(schemaId != null ? schemaId.getBytes() : null, output); } }
@Override public IdRecord readWithIds(RecordId recordId, Long aLong, List<SchemaId> schemaIds) throws RepositoryException, InterruptedException { Record record = getRecord(recordId); TypeManager typeManager = this.getTypeManager(); Map<SchemaId, QName> map = Maps.newHashMap(); for (QName qname : record.getFields().keySet()) { map.put(typeManager.getFieldTypeByName(qname).getId(), qname); } Map<Scope,SchemaId> recordTypeIds = Maps.newHashMap(); for (Scope scope : Scope.values()) { RecordType recordType = typeManager.getRecordTypeByName(record.getRecordTypeName(scope), record.getVersion()); if (recordType != null) { recordTypeIds.put(scope, recordType.getId()); } } IdRecord idRecord = new IdRecordImpl(record, map, recordTypeIds); return idRecord; }
public static final IdRecord readIdRecord(DataInput input, LRepository repository) throws RepositoryException, InterruptedException { Record record = read(input, repository); IdGenerator idGenerator = repository.getIdGenerator(); int size = input.readVInt(); Map<SchemaId, QName> idToQNameMapping = new HashMap<SchemaId, QName>(); for (int i = 0; i < size; i++) { byte[] schemaIdBytes = readBytes(input); QName name = readQName(input); SchemaId schemaId = idGenerator.getSchemaId(schemaIdBytes); idToQNameMapping.put(schemaId, name); } Map<Scope, SchemaId> recordTypeIds = new EnumMap(Scope.class); for (Scope scope : Scope.values()) { byte[] schemaIdBytes = readNullOrBytes(input); if (schemaIdBytes != null) { SchemaId schemaId = idGenerator.getSchemaId(schemaIdBytes); recordTypeIds.put(scope, schemaId); } } return new IdRecordImpl(record, idToQNameMapping, recordTypeIds); }
for (Scope scope : Scope.values()) { fieldTypeEntriesByScope.put(scope, new ArrayList<Pair<FieldTypeEntry, FieldType>>());
for (Scope scope : Scope.values()) { writeNullOrQName(record.getRecordTypeName(scope), output); writeNullOrVLong(record.getRecordTypeVersion(scope), output);
for (Scope scope : Scope.values()) { Pair<SchemaId, Long> recordTypePair = requestedVersion == null ? extractLatestRecordType(scope, result) : extractVersionRecordType(scope, result, requestedVersion);
for (Scope scope : Scope.values()) { QName recordType = readNullOrQName(input); Long rtVersion = readNullOrVLong(input);