/** * Serializes a long integer into bytes using the zig-zag variable-length encoding scheme. * * @param number Long integer to encode. * @return Zig-zag encoded long, as an array of up to 10 bytes. */ public static byte[] longToZigZagVarInt64(long number) { // Zig-zag encode: move sign to low-order bit, and flip others if negative number = (number << 1) ^ (number >> 63); return longToVarInt64(number); }
/** {@inheritDoc} */ @Override public void encode(final Schema writerSchema) throws IOException { final long schemaId = mCellSpec.getSchemaTable().getOrCreateSchemaId(writerSchema); mByteArrayEncoder.writeFixed(ByteStreamArray.longToVarInt64(schemaId)); } }
/** * Fetches a schema entry from the tables given a schema ID. * * @param schemaId schema ID * @return Avro schema entry, or null if the schema ID does not exist in the table * @throws IOException on I/O error. */ private SchemaTableEntry loadFromIdTable(long schemaId) throws IOException { final Get get = new Get(longToVarInt64(schemaId)); final Result result = mSchemaIdTable.get(get); return result.isEmpty() ? null : decodeSchemaEntry(result.value()); }
/** * Writes the given schema entry to the ID and hash tables. * * This is not protected from concurrent writes. Caller must ensure consistency. * * @param avroEntry Schema entry to write. * @param timestamp Write entries with this timestamp. * @param flush Whether to flush tables synchronously. * @throws IOException on I/O error. */ private void storeInTable(final SchemaTableEntry avroEntry, long timestamp, boolean flush) throws IOException { final byte[] entryBytes = encodeSchemaEntry(avroEntry); // Writes the ID mapping first: if the hash table write fails, we just lost one schema ID. // The hash table write must not happen before the ID table write has been persisted. // Otherwise, another client may see the hash entry, write cells with the schema ID that cannot // be decoded (since the ID mapping has not been written yet). final Put putId = new Put(longToVarInt64(avroEntry.getId())) .add(SCHEMA_COLUMN_FAMILY_BYTES, SCHEMA_COLUMN_QUALIFIER_BYTES, timestamp, entryBytes); mSchemaIdTable.put(putId); if (flush) { mSchemaIdTable.flushCommits(); } final Put putHash = new Put(avroEntry.getHash().bytes()) .add(SCHEMA_COLUMN_FAMILY_BYTES, SCHEMA_COLUMN_QUALIFIER_BYTES, timestamp, entryBytes); mSchemaHashTable.put(putHash); if (flush) { mSchemaHashTable.flushCommits(); } }