@Before public void setUp() throws Exception { this.conf = new Configuration(); this.keyValues = ImmutableMap.of("k1", "v1", "k2", "v2"); this.key = "my_conf_key"; }
@Test public void testInvalidExtraFreeFactorConfig() throws IOException { float[] configValues = {-1f, 0f, 0.2f, 1.05f}; //throws due to <0, in expected range, in expected range, config can be > 1.0 boolean[] expectedOutcomes = {false, true, true, true}; Map<String, float[]> configMappings = ImmutableMap.of(BucketCache.EXTRA_FREE_FACTOR_CONFIG_NAME, configValues); Configuration conf = HBaseConfiguration.create(); checkConfigValues(conf, configMappings, expectedOutcomes); }
@Test public void testInvalidAcceptFactorConfig() throws IOException { float[] configValues = {-1f, 0.2f, 0.86f, 1.05f}; boolean[] expectedOutcomes = {false, false, true, false}; Map<String, float[]> configMappings = ImmutableMap.of(BucketCache.ACCEPT_FACTOR_CONFIG_NAME, configValues); Configuration conf = HBaseConfiguration.create(); checkConfigValues(conf, configMappings, expectedOutcomes); }
@Test public void testInvalidMinFactorConfig() throws IOException { float[] configValues = {-1f, 0f, 0.96f, 1.05f}; //throws due to <0, in expected range, minFactor > acceptableFactor, > 1.0 boolean[] expectedOutcomes = {false, true, false, false}; Map<String, float[]> configMappings = ImmutableMap .of(BucketCache.MIN_FACTOR_CONFIG_NAME, configValues); Configuration conf = HBaseConfiguration.create(); checkConfigValues(conf, configMappings, expectedOutcomes); }
@Test public void testInvalidCacheSplitFactorConfig() throws IOException { float[] singleFactorConfigValues = {0.2f, 0f, -0.2f, 1f}; float[] multiFactorConfigValues = {0.4f, 0f, 1f, .05f}; float[] memoryFactorConfigValues = {0.4f, 0f, 0.2f, .5f}; // All configs add up to 1.0 and are between 0 and 1.0, configs don't add to 1.0, configs can't be negative, configs don't add to 1.0 boolean[] expectedOutcomes = {true, false, false, false}; Map<String, float[]> configMappings = ImmutableMap.of(BucketCache.SINGLE_FACTOR_CONFIG_NAME, singleFactorConfigValues, BucketCache.MULTI_FACTOR_CONFIG_NAME, multiFactorConfigValues, BucketCache.MEMORY_FACTOR_CONFIG_NAME, memoryFactorConfigValues); Configuration conf = HBaseConfiguration.create(); checkConfigValues(conf, configMappings, expectedOutcomes); }
/** * List lock queues. * @return the locks */ List<LockedResource> getLocks() { List<LockedResource> lockedResources = new ArrayList<>(); addToLockedResources(lockedResources, serverLocks, sn -> sn.getServerName(), LockedResourceType.SERVER); addToLockedResources(lockedResources, namespaceLocks, Function.identity(), LockedResourceType.NAMESPACE); addToLockedResources(lockedResources, tableLocks, tn -> tn.getNameAsString(), LockedResourceType.TABLE); addToLockedResources(lockedResources, regionLocks, Function.identity(), LockedResourceType.REGION); addToLockedResources(lockedResources, peerLocks, Function.identity(), LockedResourceType.PEER); addToLockedResources(lockedResources, ImmutableMap.of(TableName.META_TABLE_NAME, metaLock), tn -> tn.getNameAsString(), LockedResourceType.META); return lockedResources; }
@Before public void setUp() throws Exception { this.subject = Mockito.spy(new MultiTableSnapshotInputFormatImpl()); // mock out restoreSnapshot // TODO: this is kind of meh; it'd be much nicer to just inject the RestoreSnapshotHelper // dependency into the // input format. However, we need a new RestoreSnapshotHelper per snapshot in the current // design, and it *also* // feels weird to introduce a RestoreSnapshotHelperFactory and inject that, which would // probably be the more "pure" // way of doing things. This is the lesser of two evils, perhaps? doNothing().when(this.subject). restoreSnapshot(any(), any(), any(), any(), any()); this.conf = new Configuration(); this.rootDir = new Path("file:///test-root-dir"); FSUtils.setRootDir(conf, rootDir); this.snapshotScans = ImmutableMap.<String, Collection<Scan>>of("snapshot1", ImmutableList.of(new Scan(Bytes.toBytes("1"), Bytes.toBytes("2"))), "snapshot2", ImmutableList.of(new Scan(Bytes.toBytes("3"), Bytes.toBytes("4")), new Scan(Bytes.toBytes("5"), Bytes.toBytes("6")))); this.restoreDir = new Path(FSUtils.getRootDir(conf), "restore-dir"); }
@Override public String toString() { return "serverLocks=" + filterUnlocked(this.serverLocks) + ", namespaceLocks=" + filterUnlocked(this.namespaceLocks) + ", tableLocks=" + filterUnlocked(this.tableLocks) + ", regionLocks=" + filterUnlocked(this.regionLocks) + ", peerLocks=" + filterUnlocked(this.peerLocks) + ", metaLocks=" + filterUnlocked(ImmutableMap.of(TableName.META_TABLE_NAME, metaLock)); }
@Test public void testSubset() throws Exception { Configuration conf = HBaseConfiguration.create(); // subset is used in TableMapReduceUtil#initCredentials to support different security // configurations between source and destination clusters, so we'll use that as an example String prefix = "hbase.mapred.output."; conf.set("hbase.security.authentication", "kerberos"); conf.set("hbase.regionserver.kerberos.principal", "hbasesource"); HBaseConfiguration.setWithPrefix(conf, prefix, ImmutableMap.of( "hbase.regionserver.kerberos.principal", "hbasedest", "", "shouldbemissing") .entrySet()); Configuration subsetConf = HBaseConfiguration.subset(conf, prefix); assertNull(subsetConf.get(prefix + "hbase.regionserver.kerberos.principal")); assertEquals("hbasedest", subsetConf.get("hbase.regionserver.kerberos.principal")); assertNull(subsetConf.get("hbase.security.authentication")); assertNull(subsetConf.get("")); Configuration mergedConf = HBaseConfiguration.create(conf); HBaseConfiguration.merge(mergedConf, subsetConf); assertEquals("hbasedest", mergedConf.get("hbase.regionserver.kerberos.principal")); assertEquals("kerberos", mergedConf.get("hbase.security.authentication")); assertEquals("shouldbemissing", mergedConf.get(prefix)); }
private static void appendRegionEvent(Writer w, String region) throws IOException { WALProtos.RegionEventDescriptor regionOpenDesc = ProtobufUtil.toRegionEventDescriptor( WALProtos.RegionEventDescriptor.EventType.REGION_OPEN, TABLE_NAME.toBytes(), Bytes.toBytes(region), Bytes.toBytes(String.valueOf(region.hashCode())), 1, ServerName.parseServerName("ServerName:9099"), ImmutableMap.<byte[], List<Path>>of()); final long time = EnvironmentEdgeManager.currentTime(); KeyValue kv = new KeyValue(Bytes.toBytes(region), WALEdit.METAFAMILY, WALEdit.REGION_EVENT, time, regionOpenDesc.toByteArray()); final WALKeyImpl walKey = new WALKeyImpl(Bytes.toBytes(region), TABLE_NAME, 1, time, HConstants.DEFAULT_CLUSTER_ID); w.append( new Entry(walKey, new WALEdit().add(kv))); w.sync(false); }
private void updatePushedSeqId(RegionInfo region, long seqId) throws ReplicationException { QUEUE_STORAGE.setWALPosition(UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName(), PEER_1, WAL_FILE_NAME, 10, ImmutableMap.of(region.getEncodedName(), seqId)); QUEUE_STORAGE.setWALPosition(UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName(), PEER_2, WAL_FILE_NAME, 10, ImmutableMap.of(region.getEncodedName(), seqId)); } }
private void updatePushedSeqId(RegionInfo region, long seqId) throws ReplicationException { QUEUE_STORAGE.setWALPosition(UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName(), PEER_ID, WAL_FILE_NAME, 10, ImmutableMap.of(region.getEncodedName(), seqId)); }
@Test public void testRemoveAllLastPushedSeqIdsForPeer() throws Exception { String peerId = "1"; String peerIdToDelete = "2"; for (int i = 0; i < 100; i++) { String encodedRegionName = MD5Hash.getMD5AsHex(Bytes.toBytes(i)); STORAGE.setLastSequenceIds(peerId, ImmutableMap.of(encodedRegionName, (long) i)); STORAGE.setLastSequenceIds(peerIdToDelete, ImmutableMap.of(encodedRegionName, (long) i)); } for (int i = 0; i < 100; i++) { String encodedRegionName = MD5Hash.getMD5AsHex(Bytes.toBytes(i)); assertEquals(i, STORAGE.getLastSequenceId(encodedRegionName, peerId)); assertEquals(i, STORAGE.getLastSequenceId(encodedRegionName, peerIdToDelete)); } STORAGE.removeLastSequenceIds(peerIdToDelete); for (int i = 0; i < 100; i++) { String encodedRegionName = MD5Hash.getMD5AsHex(Bytes.toBytes(i)); assertEquals(i, STORAGE.getLastSequenceId(encodedRegionName, peerId)); assertEquals(HConstants.NO_SEQNUM, STORAGE.getLastSequenceId(encodedRegionName, peerIdToDelete)); } } }
@Before public void setUpBase() throws Exception { if (!peerExist(PEER_ID2)) { ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder() .setClusterKey(utility2.getClusterKey()).setSerial(isSerialPeer()); if (isSyncPeer()) { FileSystem fs2 = utility2.getTestFileSystem(); // The remote wal dir is not important as we do not use it in DA state, here we only need to // confirm that a sync peer in DA state can still replicate data to remote cluster // asynchronously. builder.setReplicateAllUserTables(false) .setTableCFsMap(ImmutableMap.of(tableName, ImmutableList.of())) .setRemoteWALDir(new Path("/RemoteWAL") .makeQualified(fs2.getUri(), fs2.getWorkingDirectory()).toUri().toString()); } hbaseAdmin.addReplicationPeer(PEER_ID2, builder.build()); } }
@Test public void testSetWALPositionBadVersion() throws IOException, ReplicationException { ZKReplicationQueueStorage storage = createWithUnstableVersion(); ServerName serverName1 = ServerName.valueOf("128.0.0.1", 8000, 10000); assertTrue(storage.getAllQueues(serverName1).isEmpty()); String queue1 = "1"; String fileName = getFileName("file1", 0); String encodedRegionName = "31d9792f4435b99d9fb1016f6fbc8dc6"; storage.addWAL(serverName1, queue1, fileName); List<String> wals1 = storage.getWALsInQueue(serverName1, queue1); assertEquals(1, wals1.size()); assertEquals(0, storage.getWALPosition(serverName1, queue1, fileName)); // This should return eventually when data version stabilizes storage.setWALPosition(serverName1, queue1, fileName, 100, ImmutableMap.of(encodedRegionName, 120L)); assertEquals(100, storage.getWALPosition(serverName1, queue1, fileName)); assertEquals(120L, storage.getLastSequenceId(encodedRegionName, queue1)); }
@Test public void testRemoveTable() throws Exception { TableName tableName = createTable(); ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() .setClusterKey("127.0.0.1:2181:/hbase") .setReplicationEndpointImpl(LocalReplicationEndpoint.class.getName()) .setReplicateAllUserTables(false) .setTableCFsMap(ImmutableMap.of(tableName, Collections.emptyList())).setSerial(true).build(); UTIL.getAdmin().addReplicationPeer(PEER_ID, peerConfig, true); try (Table table = UTIL.getConnection().getTable(tableName)) { for (int i = 0; i < 100; i++) { table.put(new Put(Bytes.toBytes(i)).addColumn(CF, CQ, Bytes.toBytes(i))); } } RegionInfo region = UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getRegionInfo(); waitUntilHasLastPushedSequenceId(region); UTIL.getAdmin().updateReplicationPeerConfig(PEER_ID, ReplicationPeerConfig.newBuilder(peerConfig).setTableCFsMap(Collections.emptyMap()).build()); ReplicationQueueStorage queueStorage = UTIL.getMiniHBaseCluster().getMaster().getReplicationPeerManager().getQueueStorage(); assertEquals(HConstants.NO_SEQNUM, queueStorage.getLastSequenceId(region.getEncodedName(), PEER_ID)); }
ReplicationPeerConfig.newBuilder().setClusterKey(UTIL2.getClusterKey()) .setReplicateAllUserTables(false) .setTableCFsMap(ImmutableMap.of(TABLE_NAME, new ArrayList<>())) .setRemoteWALDir(REMOTE_WAL_DIR2.toUri().toString()).build()); UTIL2.getAdmin().addReplicationPeer(PEER_ID, ReplicationPeerConfig.newBuilder().setClusterKey(UTIL1.getClusterKey()) .setReplicateAllUserTables(false) .setTableCFsMap(ImmutableMap.of(TABLE_NAME, new ArrayList<>())) .setRemoteWALDir(REMOTE_WAL_DIR1.toUri().toString()).build());
UTIL.getAdmin().updateReplicationPeerConfig(PEER_ID, ReplicationPeerConfig.newBuilder(peerConfig) .setTableCFsMap(ImmutableMap.of(tableName, Collections.emptyList())).build()); UTIL.getAdmin().enableReplicationPeer(PEER_ID); try (Table table = UTIL.getConnection().getTable(tableName)) {
.setClusterKey(getZKClusterKey()).setReplicationEndpointImpl(TestEndpoint.class.getName()) .setReplicateAllUserTables(false).setSerial(isSerial) .setTableCFsMap(ImmutableMap.of(tableName, ImmutableList.of())).build(); admin.addReplicationPeer(peerId, peerConfig);