@Override public void bulkHFile(ColumnFamilyDescriptorBuilder builder, FileStatus hfileStatus) throws IOException { Path hfile = hfileStatus.getPath(); try (HFile.Reader reader = HFile.createReader(fs, hfile, CacheConfig.DISABLED, true, getConf())) { if (builder.getCompressionType() != reader.getFileContext().getCompression()) { builder.setCompressionType(reader.getFileContext().getCompression()); LOG.info("Setting compression " + reader.getFileContext().getCompression().name() + " for family " + builder.getNameAsString()); } reader.loadFileInfo(); byte[] first = reader.getFirstRowKey().get(); byte[] last = reader.getLastRowKey().get(); LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); // To eventually infer start key-end key boundaries Integer value = map.containsKey(first) ? map.get(first) : 0; map.put(first, value + 1); value = map.containsKey(last) ? map.get(last) : 0; map.put(last, value - 1); } } });
@Override public void bulkHFile(final HColumnDescriptor hcd, final FileStatus hfileStatus) throws IOException { Path hfile = hfileStatus.getPath(); HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(getConf()), getConf()); try { if (hcd.getCompressionType() != reader.getFileContext().getCompression()) { hcd.setCompressionType(reader.getFileContext().getCompression()); LOG.info("Setting compression " + hcd.getCompressionType().name() + " for family " + hcd.toString()); } reader.loadFileInfo(); byte[] first = reader.getFirstRowKey(); byte[] last = reader.getLastRowKey(); LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); // To eventually infer start key-end key boundaries Integer value = map.containsKey(first)? map.get(first):0; map.put(first, value+1); value = map.containsKey(last)? map.get(last):0; map.put(last, value-1); } finally { reader.close(); } } });
columnFamily.setAttribute(ATTR_CF_CACHED_DATA_ON_WRITE, hColumnDescriptor.isCacheDataOnWrite()); columnFamily.setAttribute(ATTR_CF_CACHED_INDEXES_ON_WRITE, hColumnDescriptor.isCacheIndexesOnWrite()); columnFamily.setAttribute(ATTR_CF_COMPACTION_COMPRESSION_TYPE, (hColumnDescriptor.getCompactionCompressionType() != null ? hColumnDescriptor.getCompactionCompressionType().name():null)); columnFamily.setAttribute(ATTR_CF_COMPRESSION_TYPE, (hColumnDescriptor.getCompressionType() != null ? hColumnDescriptor.getCompressionType().name():null)); columnFamily.setAttribute(ATTR_CF_DATA_BLOCK_ENCODING, (hColumnDescriptor.getDataBlockEncoding() != null ? hColumnDescriptor.getDataBlockEncoding().name():null)); columnFamily.setAttribute(ATTR_CF_ENCRYPTION_TYPE, hColumnDescriptor.getEncryptionType());
columnFamily.setAttribute(ATTR_CF_CACHED_DATA_ON_WRITE, hColumnDescriptor.isCacheDataOnWrite()); columnFamily.setAttribute(ATTR_CF_CACHED_INDEXES_ON_WRITE, hColumnDescriptor.isCacheIndexesOnWrite()); columnFamily.setAttribute(ATTR_CF_COMPACTION_COMPRESSION_TYPE, (hColumnDescriptor.getCompactionCompressionType() != null ? hColumnDescriptor.getCompactionCompressionType().name():null)); columnFamily.setAttribute(ATTR_CF_COMPRESSION_TYPE, (hColumnDescriptor.getCompressionType() != null ? hColumnDescriptor.getCompressionType().name():null)); columnFamily.setAttribute(ATTR_CF_DATA_BLOCK_ENCODING, (hColumnDescriptor.getDataBlockEncoding() != null ? hColumnDescriptor.getDataBlockEncoding().name():null)); columnFamily.setAttribute(ATTR_CF_ENCRYPTION_TYPE, hColumnDescriptor.getEncryptionType());
ret.setAttribute(ATTR_CF_CACHED_DATA_ON_WRITE, hcdt.isCacheDataOnWrite()); ret.setAttribute(ATTR_CF_CACHED_INDEXES_ON_WRITE, hcdt.isCacheIndexesOnWrite()); ret.setAttribute(ATTR_CF_COMPACTION_COMPRESSION_TYPE, (hcdt.getCompactionCompressionType() != null ? hcdt.getCompactionCompressionType().name():null)); ret.setAttribute(ATTR_CF_COMPRESSION_TYPE, (hcdt.getCompressionType() != null ? hcdt.getCompressionType().name():null)); ret.setAttribute(ATTR_CF_DATA_BLOCK_ENCODING, (hcdt.getDataBlockEncoding() != null ? hcdt.getDataBlockEncoding().name():null)); ret.setAttribute(ATTR_CF_ENCRYPTION_TYPE, hcdt.getEncryptionType());
ret.setAttribute(ATTR_CF_CACHED_DATA_ON_WRITE, hcdt.isCacheDataOnWrite()); ret.setAttribute(ATTR_CF_CACHED_INDEXES_ON_WRITE, hcdt.isCacheIndexesOnWrite()); ret.setAttribute(ATTR_CF_COMPACTION_COMPRESSION_TYPE, (hcdt.getCompactionCompressionType() != null ? hcdt.getCompactionCompressionType().name():null)); ret.setAttribute(ATTR_CF_COMPRESSION_TYPE, (hcdt.getCompressionType() != null ? hcdt.getCompressionType().name():null)); ret.setAttribute(ATTR_CF_DATA_BLOCK_ENCODING, (hcdt.getDataBlockEncoding() != null ? hcdt.getDataBlockEncoding().name():null)); ret.setAttribute(ATTR_CF_ENCRYPTION_TYPE, hcdt.getEncryptionType());
/** * Compression types supported in hbase. LZO is not bundled as part of the * hbase distribution. See * <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO * Compression</a> * for how to enable it. * * @param type Compression type setting. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setCompactionCompressionType( Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_BYTES, type.name()); }
/** * Compression types supported in hbase. LZO is not bundled as part of the * hbase distribution. See * <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO * Compression</a> * for how to enable it. * * @param type Compression type setting. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_BYTES, type.name()); }
/** * Compression types supported in hbase. LZO is not bundled as part of the * hbase distribution. See * <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO * Compression</a> * for how to enable it. * * @param type Compression type setting. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setCompactionCompressionType( Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_BYTES, type.name()); }
/** * Compression types supported in hbase. LZO is not bundled as part of the * hbase distribution. See * <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO * Compression</a> * for how to enable it. * * @param type Compression type setting. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_BYTES, type.name()); }
/** * Compression types supported in hbase. LZO is not bundled as part of the * hbase distribution. See * <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO * Compression</a> * for how to enable it. * * @param type Compression type setting. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setCompactionCompressionType( Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_BYTES, type.name()); }
/** * Compression types supported in hbase. LZO is not bundled as part of the * hbase distribution. See * <a href="http://wiki.apache.org/hadoop/UsingLzoCompression">LZO * Compression</a> * for how to enable it. * * @param type Compression type setting. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_BYTES, type.name()); }