/** * Returns the factory to be used to create {@link HFile} writers */ public static final WriterFactory getWriterFactory(Configuration conf, CacheConfig cacheConf) { int version = getFormatVersion(conf); switch (version) { case 2: throw new IllegalArgumentException("This should never happen. " + "Did you change hfile.format.version to read v2? This version of the software writes v3" + " hfiles only (but it can read v2 files without having to update hfile.format.version " + "in hbase-site.xml)"); case 3: return new HFile.WriterFactory(conf, cacheConf); default: throw new IllegalArgumentException("Cannot create writer for HFile " + "format version " + version); } }
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize); HFileContext context = new HFileContextBuilder().withBlockSize(16).build(); HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf) .withFileContext(context) .withPath(fs, hfPath).create();
conf.setInt(HFileBlockIndex.MIN_INDEX_NUM_ENTRIES_KEY, minNumEntries); HFileContext context = new HFileContextBuilder().withBlockSize(16).build(); HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf) .withFileContext(context) .withPath(fs, hfPath).create();
.withIncludesTags(useTags) .withCompression(compressAlgo).build(); HFile.Writer writer = new HFile.WriterFactory(conf, new CacheConfig(conf)) .withPath(fs, hfilePath) .withFileContext(context)
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize); HFileContext context = new HFileContextBuilder().withBlockSize(16).build(); HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf) .withFileContext(context) .withPath(fs, hfPath).create();
conf.setInt(HFileBlockIndex.MIN_INDEX_NUM_ENTRIES_KEY, minNumEntries); HFileContext context = new HFileContextBuilder().withBlockSize(16).build(); HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf) .withFileContext(context) .withPath(fs, hfPath).create();
/** * Write {@code entryCount} random keyvalues to a new HFile at {@code path}. Returns the row * bytes of the KeyValues written, in the order they were written. */ private static void writeHFile(Configuration conf, CacheConfig cc, FileSystem fs, Path path, HFileContext cxt, int entryCount) throws IOException { HFile.Writer writer = new HFile.WriterFactory(conf, cc) .withPath(fs, path) .withFileContext(cxt) .create(); // write a bunch of random kv's Random rand = new Random(9713312); // some seed. final byte[] family = Bytes.toBytes("f"); final byte[] qualifier = Bytes.toBytes("q"); for (int i = 0; i < entryCount; i++) { byte[] keyBytes = RandomKeyValueUtil.randomOrderedKey(rand, i); byte[] valueBytes = RandomKeyValueUtil.randomValue(rand); // make a real keyvalue so that hfile tool can examine it writer.append(new KeyValue(keyBytes, family, qualifier, valueBytes)); } writer.close(); }
.withIncludesTags(useTags) .withCompression(compressAlgo).build(); HFile.Writer writer = new HFile.WriterFactory(conf, new CacheConfig(conf)) .withPath(fs, hfilePath) .withFileContext(context)
/** * Write {@code entryCount} random keyvalues to a new HFile at {@code path}. Returns the row * bytes of the KeyValues written, in the order they were written. */ private static void writeHFile(Configuration conf, CacheConfig cc, FileSystem fs, Path path, HFileContext cxt, int entryCount) throws IOException { HFile.Writer writer = new HFile.WriterFactory(conf, cc) .withPath(fs, path) .withFileContext(cxt) .create(); // write a bunch of random kv's Random rand = new Random(9713312); // some seed. final byte[] family = Bytes.toBytes("f"); final byte[] qualifier = Bytes.toBytes("q"); for (int i = 0; i < entryCount; i++) { byte[] keyBytes = RandomKeyValueUtil.randomOrderedKey(rand, i); byte[] valueBytes = RandomKeyValueUtil.randomValue(rand); // make a real keyvalue so that hfile tool can examine it writer.append(new KeyValue(keyBytes, family, qualifier, valueBytes)); } writer.close(); }