@Test public void testSplitShouldNotHappenIfSplitIsDisabledForTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("f")); htd.setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()); Table table = TEST_UTIL.createTable(htd, null); for(int i = 0; i < 10; i++) { Put p = new Put(Bytes.toBytes("row"+i)); byte[] q1 = Bytes.toBytes("q1"); byte[] v1 = Bytes.toBytes("v1"); p.addColumn(Bytes.toBytes("f"), q1, v1); table.put(p); } this.admin.flush(tableName); try { this.admin.split(tableName, Bytes.toBytes("row5")); Threads.sleep(10000); } catch (Exception e) { // Nothing to do. } // Split should not happen. List<RegionInfo> allRegions = MetaTableAccessor.getTableRegions( this.admin.getConnection(), tableName, true); assertEquals(1, allRegions.size()); }
/** * Create an HTableDescriptor from provided TestOptions. */ protected static HTableDescriptor getTableDescriptor(TestOptions opts) { HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(opts.tableName)); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); HColumnDescriptor familyDesc = new HColumnDescriptor(familyName); familyDesc.setDataBlockEncoding(opts.blockEncoding); familyDesc.setCompressionType(opts.compression); familyDesc.setBloomFilterType(opts.bloomType); familyDesc.setBlocksize(opts.blockSize); if (opts.inMemoryCF) { familyDesc.setInMemory(true); } familyDesc.setInMemoryCompaction(opts.inMemoryCompaction); tableDesc.addFamily(familyDesc); } if (opts.replicas != DEFAULT_OPTS.replicas) { tableDesc.setRegionReplication(opts.replicas); } if (opts.splitPolicy != null && !opts.splitPolicy.equals(DEFAULT_OPTS.splitPolicy)) { tableDesc.setRegionSplitPolicyClassName(opts.splitPolicy); } return tableDesc; }
checkTableIsLegal(htd); htd.setRegionSplitPolicyClassName("nonexisting.foo.class"); checkTableIsIllegal(htd); htd.setRegionSplitPolicyClassName(null); checkTableIsLegal(htd);
public void createTable(Admin admin, Configuration hbaseConf) throws IOException { TableName table = getTableName(hbaseConf); if (admin.tableExists(table)) { // do not disable / delete existing table // similar to the approach taken by map-reduce jobs when // output directory exists throw new IOException("Table " + table.getNameAsString() + " already exists."); } HTableDescriptor domainTableDescp = new HTableDescriptor(table); HColumnDescriptor mappCF = new HColumnDescriptor(DomainColumnFamily.INFO.getBytes()); mappCF.setBloomFilterType(BloomType.ROWCOL); domainTableDescp.addFamily(mappCF); domainTableDescp .setRegionSplitPolicyClassName( "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy"); domainTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length", TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH); admin.createTable(domainTableDescp, TimelineHBaseSchemaConstants.getUsernameSplits()); LOG.info("Status of table creation for " + table.getNameAsString() + "=" + admin.tableExists(table)); } }
public void createTable(Admin admin, Configuration hbaseConf) throws IOException { TableName table = getTableName(hbaseConf); if (admin.tableExists(table)) { // do not disable / delete existing table // similar to the approach taken by map-reduce jobs when // output directory exists throw new IOException("Table " + table.getNameAsString() + " already exists."); } HTableDescriptor appToFlowTableDescp = new HTableDescriptor(table); HColumnDescriptor mappCF = new HColumnDescriptor(AppToFlowColumnFamily.MAPPING.getBytes()); mappCF.setBloomFilterType(BloomType.ROWCOL); appToFlowTableDescp.addFamily(mappCF); appToFlowTableDescp .setRegionSplitPolicyClassName( "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy"); appToFlowTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length", TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH); admin.createTable(appToFlowTableDescp, TimelineHBaseSchemaConstants.getUsernameSplits()); LOG.info("Status of table creation for " + table.getNameAsString() + "=" + admin.tableExists(table)); } }
metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME, DEFAULT_METRICS_TTL)); entityTableDescp.setRegionSplitPolicyClassName( "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy"); entityTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME, DEFAULT_METRICS_TTL)); applicationTableDescp.setRegionSplitPolicyClassName( "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy"); applicationTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME, DEFAULT_METRICS_TTL)); subAppTableDescp.setRegionSplitPolicyClassName( "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy"); subAppTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
@Test public void testSplitShouldNotHappenIfSplitIsDisabledForTable() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("f")); htd.setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()); Table table = TEST_UTIL.createTable(htd, null); for(int i = 0; i < 10; i++) { Put p = new Put(Bytes.toBytes("row"+i)); byte[] q1 = Bytes.toBytes("q1"); byte[] v1 = Bytes.toBytes("v1"); p.addColumn(Bytes.toBytes("f"), q1, v1); table.put(p); } this.admin.flush(tableName); try { this.admin.split(tableName, Bytes.toBytes("row5")); Threads.sleep(10000); } catch (Exception e) { // Nothing to do. } // Split should not happen. List<RegionInfo> allRegions = MetaTableAccessor.getTableRegions( this.admin.getConnection(), tableName, true); assertEquals(1, allRegions.size()); }
/** * Create an HTableDescriptor from provided TestOptions. */ protected static HTableDescriptor getTableDescriptor(TestOptions opts) { HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(opts.tableName)); HColumnDescriptor family = new HColumnDescriptor(FAMILY_NAME); family.setDataBlockEncoding(opts.blockEncoding); family.setCompressionType(opts.compression); family.setBloomFilterType(opts.bloomType); family.setBlocksize(opts.blockSize); if (opts.inMemoryCF) { family.setInMemory(true); } family.setInMemoryCompaction(opts.inMemoryCompaction); desc.addFamily(family); if (opts.replicas != DEFAULT_OPTS.replicas) { desc.setRegionReplication(opts.replicas); } if (opts.splitPolicy != null && !opts.splitPolicy.equals(DEFAULT_OPTS.splitPolicy)) { desc.setRegionSplitPolicyClassName(opts.splitPolicy); } return desc; }
/** * Create an HTableDescriptor from provided TestOptions. */ protected static HTableDescriptor getTableDescriptor(TestOptions opts) { HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(opts.tableName)); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); HColumnDescriptor familyDesc = new HColumnDescriptor(familyName); familyDesc.setDataBlockEncoding(opts.blockEncoding); familyDesc.setCompressionType(opts.compression); familyDesc.setBloomFilterType(opts.bloomType); familyDesc.setBlocksize(opts.blockSize); if (opts.inMemoryCF) { familyDesc.setInMemory(true); } familyDesc.setInMemoryCompaction(opts.inMemoryCompaction); tableDesc.addFamily(familyDesc); } if (opts.replicas != DEFAULT_OPTS.replicas) { tableDesc.setRegionReplication(opts.replicas); } if (opts.splitPolicy != null && !opts.splitPolicy.equals(DEFAULT_OPTS.splitPolicy)) { tableDesc.setRegionSplitPolicyClassName(opts.splitPolicy); } return tableDesc; }
htd.addFamily(new HColumnDescriptor("f")); htd.addFamily(new HColumnDescriptor("i_f")); htd.setRegionSplitPolicyClassName(CustomSplitPolicy.class.getName()); admin.createTable(htd); List<HRegion> regions = awaitTableRegions(tableName);
checkTableIsLegal(htd); htd.setRegionSplitPolicyClassName("nonexisting.foo.class"); checkTableIsIllegal(htd); htd.setRegionSplitPolicyClassName(null); checkTableIsLegal(htd);