/** * Creates the cache config. * @param family The current column family. */ protected void createCacheConf(final ColumnFamilyDescriptor family) { this.cacheConf = new CacheConfig(conf, family, region.getBlockCache()); }
@Override void setUp() throws Exception { reader = HFile.createReader(this.fs, this.mf, new CacheConfig(this.conf), true, this.conf); this.reader.loadFileInfo(); }
@Before public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); // This test requires the most recent HFile format (i.e. v2). conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION); fs = FileSystem.get(conf); blockCache = BlockCacheFactory.createBlockCache(conf); cacheConf = new CacheConfig(conf, blockCache); }
@Before public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); this.conf.set("dfs.datanode.data.dir.perm", "700"); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE); conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZE); conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData); cowType.modifyConf(conf); conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, cowType.shouldBeCached(BlockType.DATA)); conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, cowType.shouldBeCached(BlockType.LEAF_INDEX)); conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, cowType.shouldBeCached(BlockType.BLOOM_CHUNK)); cacheConf = new CacheConfig(conf, blockCache); fs = HFileSystem.get(conf); }
@Before public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); fs = HFileSystem.get(conf); blockCache = BlockCacheFactory.createBlockCache(conf); cacheConf = new CacheConfig(conf, blockCache); }
@BeforeClass public static void setUp() throws Exception { conf = TEST_UTIL.getConfiguration(); cacheConf = new CacheConfig(conf); fs = TEST_UTIL.getTestFileSystem(); }
@Test public void testPrefetchSetInHCDWorks() { ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).setPrefetchBlocksOnOpen(true) .build(); Configuration c = HBaseConfiguration.create(); assertFalse(c.getBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false)); CacheConfig cc = new CacheConfig(c, columnFamilyDescriptor, blockCache); assertTrue(cc.shouldPrefetchOnOpen()); }
private void addStoreFile() throws IOException { HStoreFile f = this.store.getStorefiles().iterator().next(); Path storedir = f.getPath().getParent(); long seqid = this.store.getMaxSequenceId().orElse(0L); Configuration c = TEST_UTIL.getConfiguration(); FileSystem fs = FileSystem.get(c); HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build(); StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs) .withOutputDir(storedir) .withFileContext(fileContext) .build(); w.appendMetadata(seqid + 1, false); w.close(); LOG.info("Added store file:" + w.getPath()); }
/** * Create an HFile with the given number of bytes */ private void createHFile(Path path, int rowIdx, byte[] dummyData) throws IOException { HFileContext meta = new HFileContextBuilder().build(); HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path) .withFileContext(meta).create(); long now = System.currentTimeMillis(); try { KeyValue kv = new KeyValue(Bytes.add(STARTROW, Bytes.toBytes(rowIdx)), COLUMN_FAMILY, Bytes.toBytes("colX"), now, dummyData); writer.append(kv); } finally { writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis())); writer.close(); } }
private long countMobCellsInMetadata() throws IOException { long mobCellsCount = 0; Path mobDirPath = MobUtils.getMobFamilyPath(conf, htd.getTableName(), hcd.getNameAsString()); Configuration copyOfConf = new Configuration(conf); copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f); CacheConfig cacheConfig = new CacheConfig(copyOfConf); if (fs.exists(mobDirPath)) { FileStatus[] files = UTIL.getTestFileSystem().listStatus(mobDirPath); for (FileStatus file : files) { HStoreFile sf = new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true); sf.initReader(); Map<byte[], byte[]> fileInfo = sf.getReader().loadFileInfo(); byte[] count = fileInfo.get(MOB_CELLS_COUNT); assertTrue(count != null); mobCellsCount += Bytes.toLong(count); } } return mobCellsCount; }
private static void createHFile(Configuration conf, FileSystem fs, Path path, byte[] family, byte[] qualifier) throws IOException { HFileContext context = new HFileContextBuilder().build(); HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path) .withFileContext(context).create(); long now = System.currentTimeMillis(); try { for (int i = 1; i <= 9; i++) { KeyValue kv = new KeyValue(Bytes.toBytes(i + ""), family, qualifier, now, Bytes.toBytes(i + "")); writer.append(kv); } } finally { writer.close(); } } }
@Test public void testLruBlockCache() throws IOException { CacheConfig cc = new CacheConfig(this.conf); assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); assertTrue(blockCache instanceof LruBlockCache); logPerBlock(blockCache); addDataAndHits(blockCache, 3); // The below has no asserts. It is just exercising toString and toJSON code. LOG.info("count=" + blockCache.getBlockCount() + ", currentSize=" + blockCache.getCurrentSize() + ", freeSize=" + blockCache.getFreeSize()); LOG.info(Objects.toString(blockCache.getStats())); BlockCacheUtil.CachedBlocksByFile cbsbf = logPerBlock(blockCache); LOG.info(Objects.toString(cbsbf)); logPerFile(cbsbf); bucketCacheReport(blockCache); LOG.info(BlockCacheUtil.toJSON(cbsbf)); }
@Test public void testCacheConfigDefaultLRUBlockCache() { CacheConfig cc = new CacheConfig(this.conf); assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); basicBlockCacheOps(blockCache, cc, false, true); assertTrue(blockCache instanceof LruBlockCache); }
private int verifyHFile(Path p) throws IOException { Configuration conf = util.getConfiguration(); HFile.Reader reader = HFile.createReader(p.getFileSystem(conf), p, new CacheConfig(conf), true, conf); reader.loadFileInfo(); HFileScanner scanner = reader.getScanner(false, false); scanner.seekTo(); int count = 0; do { count++; } while (scanner.next()); assertTrue(count > 0); reader.close(); return count; }
private byte[] extractHFileKey(Path path) throws Exception { HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path, new CacheConfig(conf), true, conf); try { reader.loadFileInfo(); Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext(); assertNotNull("Reader has a null crypto context", cryptoContext); Key key = cryptoContext.getKey(); assertNotNull("Crypto context has no key", key); return key.getEncoded(); } finally { reader.close(); } }
private static byte[] extractHFileKey(Path path) throws Exception { HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path, new CacheConfig(conf), true, conf); try { reader.loadFileInfo(); Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext(); assertNotNull("Reader has a null crypto context", cryptoContext); Key key = cryptoContext.getKey(); assertNotNull("Crypto context has no key", key); return key.getEncoded(); } finally { reader.close(); } }
private static byte[] extractHFileKey(Path path) throws Exception { HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), path, new CacheConfig(conf), true, conf); try { reader.loadFileInfo(); Encryption.Context cryptoContext = reader.getFileContext().getEncryptionContext(); assertNotNull("Reader has a null crypto context", cryptoContext); Key key = cryptoContext.getKey(); if (key == null) { return null; } return key.getEncoded(); } finally { reader.close(); } }
MockHStoreFile(HBaseTestingUtility testUtil, Path testPath, long length, long ageInDisk, boolean isRef, long sequenceid) throws IOException { super(testUtil.getTestFileSystem(), testPath, testUtil.getConfiguration(), new CacheConfig(testUtil.getConfiguration()), BloomType.NONE, true); this.length = length; this.isRef = isRef; this.ageInDisk = ageInDisk; this.sequenceid = sequenceid; this.isMajor = false; hdfsBlocksDistribution = new HDFSBlocksDistribution(); hdfsBlocksDistribution.addHostsAndBlockWeight( new String[] { RSRpcServices.getHostname(testUtil.getConfiguration(), false) }, 1); modificationTime = EnvironmentEdgeManager.currentTime(); }
/** * Method returns the total KVs in given hfile * @param fs File System * @param p HFile path * @return KV count in the given hfile * @throws IOException */ private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException { Configuration conf = util.getConfiguration(); HFile.Reader reader = HFile.createReader(fs, p, new CacheConfig(conf), true, conf); reader.loadFileInfo(); HFileScanner scanner = reader.getScanner(false, false); scanner.seekTo(); int count = 0; do { count++; } while (scanner.next()); reader.close(); return count; }
private void doBucketCacheConfigTest() { final int bcSize = 100; this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize); CacheConfig cc = new CacheConfig(this.conf); BlockCache blockCache = BlockCacheFactory.createBlockCache(this.conf); basicBlockCacheOps(blockCache, cc, false, false); assertTrue(blockCache instanceof CombinedBlockCache); // TODO: Assert sizes allocated are right and proportions. CombinedBlockCache cbc = (CombinedBlockCache) blockCache; BlockCache[] bcs = cbc.getBlockCaches(); assertTrue(bcs[0] instanceof LruBlockCache); LruBlockCache lbc = (LruBlockCache) bcs[0]; assertEquals(MemorySizeUtil.getOnHeapCacheSize(this.conf), lbc.getMaxSize()); assertTrue(bcs[1] instanceof BucketCache); BucketCache bc = (BucketCache) bcs[1]; // getMaxSize comes back in bytes but we specified size in MB assertEquals(bcSize, bc.getMaxSize() / (1024 * 1024)); }