/** * Converts an Avro SchemaTableEntry into a SchemaEntry. * * @param avroEntry Avro SchemaTableEntry * @return an equivalent SchemaEntry */ public static SchemaEntry fromAvroEntry(final SchemaTableEntry avroEntry) { final String schemaJson = avroEntry.getAvroSchema(); final Schema schema = new Schema.Parser().parse(schemaJson); return new SchemaEntry(avroEntry.getId(), new BytesKey(avroEntry.getHash().bytes()), schema); }
/** * Converts an Avro SchemaTableEntry into a SchemaEntry. * * @param avroEntry Avro SchemaTableEntry * @return an equivalent SchemaEntry */ public static SchemaEntry fromAvroEntry(final SchemaTableEntry avroEntry) { final String schemaJson = avroEntry.getAvroSchema(); final Schema schema = new Schema.Parser().parse(schemaJson); return new SchemaEntry(avroEntry.getId(), new BytesKey(avroEntry.getHash().bytes()), schema); }
mAdmin.execute( mPreparedStatementWriteHashTable.bind( ByteBuffer.wrap(avroEntry.getHash().bytes()), new Date(timestamp), ByteBuffer.wrap(entryBytes)));
/** * Writes the given schema entry to the ID and hash tables. * * This is not protected from concurrent writes. Caller must ensure consistency. * * @param avroEntry Schema entry to write. * @param timestamp Write entries with this timestamp. * @param flush Whether to flush tables synchronously. * @throws IOException on I/O error. */ private void storeInTable(final SchemaTableEntry avroEntry, long timestamp, boolean flush) throws IOException { final byte[] entryBytes = encodeSchemaEntry(avroEntry); // Writes the ID mapping first: if the hash table write fails, we just lost one schema ID. // The hash table write must not happen before the ID table write has been persisted. // Otherwise, another client may see the hash entry, write cells with the schema ID that cannot // be decoded (since the ID mapping has not been written yet). final Put putId = new Put(longToVarInt64(avroEntry.getId())) .add(SCHEMA_COLUMN_FAMILY_BYTES, SCHEMA_COLUMN_QUALIFIER_BYTES, timestamp, entryBytes); mSchemaIdTable.put(putId); if (flush) { mSchemaIdTable.flushCommits(); } final Put putHash = new Put(avroEntry.getHash().bytes()) .add(SCHEMA_COLUMN_FAMILY_BYTES, SCHEMA_COLUMN_QUALIFIER_BYTES, timestamp, entryBytes); mSchemaHashTable.put(putHash); if (flush) { mSchemaHashTable.flushCommits(); } }