private ZNRecord getExampleZNRecord() { ZNRecord record = new ZNRecord("TestDB"); record.setSimpleField(IdealStateProperty.REBALANCE_MODE.toString(), RebalanceMode.CUSTOMIZED.toString()); Map<String, String> map = new HashMap<String, String>(); map.put("localhost_12918", "MASTER"); map.put("localhost_12919", "SLAVE"); record.setMapField("TestDB_0", map); List<String> list = new ArrayList<String>(); list.add("localhost_12918"); list.add("localhost_12919"); record.setListField("TestDB_0", list); return record; } }
ZNRecord recordNew = new ZNRecord(record); recordNew.setSimpleField(IdealStateProperty.REBALANCE_MODE.toString(), RebalanceMode.SEMI_AUTO.toString()); arg1 = new ZnodeOpArg(pathChild2, ZnodePropertyType.ZNODE, "+", recordNew); command1 = new TestCommand(CommandType.MODIFY, new TestTrigger(0, 500, record), arg1);
ZNRecord recordNew = new ZNRecord(record); recordNew.setSimpleField(IdealStateProperty.REBALANCE_MODE.toString(), RebalanceMode.SEMI_AUTO.toString()); ZnodeOpArg arg1 = new ZnodeOpArg(pathChild1, ZnodePropertyType.ZNODE, "+", recordNew); TestCommand command1 =
RebalanceMode.SEMI_AUTO.toString()); admin.rebalance(clusterName, resourceName, 1);
_setupTool.addCluster(_clusterName, true); _setupTool.addResourceToCluster(_clusterName, RESOURCES[0], NUM_PARTITIONS, BuiltInStateModelDefinitions.MasterSlave.name(), RebalanceMode.SEMI_AUTO.toString()); _setupTool.addResourceToCluster(_clusterName, RESOURCES[1], NUM_PARTITIONS, BuiltInStateModelDefinitions.OnlineOffline.name(), RebalanceMode.SEMI_AUTO.toString()); BuiltInStateModelDefinitions.MasterSlave.name(), RebalanceMode.FULL_AUTO.toString()); _setupTool.addResourceToCluster(_clusterName, RESOURCES[3], NUM_PARTITIONS, BuiltInStateModelDefinitions.OnlineOffline.name(), RebalanceMode.FULL_AUTO.toString());
ZNRecord recordNew = new ZNRecord(record); recordNew.setSimpleField(IdealStateProperty.REBALANCE_MODE.toString(), RebalanceMode.SEMI_AUTO.toString()); arg = new ZnodeOpArg(pathChild2, ZnodePropertyType.ZNODE, "+", recordNew); command = new TestCommand(CommandType.MODIFY, new TestTrigger(0, 3000, record), arg);
String testDB = "resource-testDB"; _setupTool.addResourceToCluster(_clusterName, testDB, 1, BuiltInStateModelDefinitions.MasterSlave.name(), RebalanceMode.SEMI_AUTO.toString());
@Test public void testCustomizedIdealStateRebalancer() throws InterruptedException { _gSetupTool.addResourceToCluster(CLUSTER_NAME, db2, 60, "MasterSlave"); _gSetupTool.addResourceProperty(CLUSTER_NAME, db2, IdealStateProperty.REBALANCER_CLASS_NAME.toString(), TestCustomizedIdealStateRebalancer.TestRebalancer.class.getName()); _gSetupTool.addResourceProperty(CLUSTER_NAME, db2, IdealStateProperty.REBALANCE_MODE.toString(), RebalanceMode.USER_DEFINED.toString()); _gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, db2, 3); boolean result = ClusterStateVerifier.verifyByZkCallback(new ExternalViewBalancedVerifier(_gZkClient, CLUSTER_NAME, db2)); Assert.assertTrue(result); Thread.sleep(1000); HelixDataAccessor accessor = new ZKHelixDataAccessor(CLUSTER_NAME, new ZkBaseDataAccessor<ZNRecord>(_gZkClient)); Builder keyBuilder = accessor.keyBuilder(); ExternalView ev = accessor.getProperty(keyBuilder.externalView(db2)); Assert.assertEquals(ev.getPartitionSet().size(), 60); for (String partition : ev.getPartitionSet()) { Assert.assertEquals(ev.getStateMap(partition).size(), 1); } IdealState is = accessor.getProperty(keyBuilder.idealStates(db2)); for (String partition : is.getPartitionSet()) { Assert.assertEquals(is.getPreferenceList(partition).size(), 0); Assert.assertEquals(is.getInstanceStateMap(partition).size(), 0); } Assert.assertTrue(testRebalancerCreated); Assert.assertTrue(testRebalancerInvoked); }
RebalanceMode.FULL_AUTO.toString());
protected void setupDBs(ClusterSetup clusterSetup) { // Set up target db if (_numDbs > 1) { for (int i = 0; i < _numDbs; i++) { int varyNum = _partitionVary == true ? 10 * i : 0; String db = WorkflowGenerator.DEFAULT_TGT_DB + i; clusterSetup .addResourceToCluster(CLUSTER_NAME, db, _numPartitions + varyNum, MASTER_SLAVE_STATE_MODEL, IdealState.RebalanceMode.FULL_AUTO.toString()); clusterSetup.rebalanceStorageCluster(CLUSTER_NAME, db, _numReplicas); _testDbs.add(db); } } else { if (_instanceGroupTag) { clusterSetup .addResourceToCluster(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB, _numPartitions, "OnlineOffline", IdealState.RebalanceMode.FULL_AUTO.name()); IdealState idealState = clusterSetup.getClusterManagementTool().getResourceIdealState(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB); idealState.setInstanceGroupTag("TESTTAG0"); clusterSetup.getClusterManagementTool().setResourceIdealState(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB, idealState); } else { clusterSetup.addResourceToCluster(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB, _numPartitions, MASTER_SLAVE_STATE_MODEL, IdealState.RebalanceMode.FULL_AUTO.name()); } clusterSetup.rebalanceStorageCluster(CLUSTER_NAME, WorkflowGenerator.DEFAULT_TGT_DB, _numReplicas); } }
public static void setupCluster(String clusterName, String zkAddr, int startPort, String participantNamePrefix, String resourceNamePrefix, int resourceNb, int partitionNb, int nodesNb, int replica, String stateModelDef, RebalanceMode mode, boolean doRebalance) throws Exception { HelixZkClient zkClient = SharedZkClientFactory.getInstance() .buildZkClient(new HelixZkClient.ZkConnectionConfig(zkAddr)); if (zkClient.exists("/" + clusterName)) { LOG.warn("Cluster already exists:" + clusterName + ". Deleting it"); zkClient.deleteRecursively("/" + clusterName); } ClusterSetup setupTool = new ClusterSetup(zkAddr); setupTool.addCluster(clusterName, true); for (int i = 0; i < nodesNb; i++) { int port = startPort + i; setupTool.addInstanceToCluster(clusterName, participantNamePrefix + "_" + port); } for (int i = 0; i < resourceNb; i++) { String resourceName = resourceNamePrefix + i; setupTool.addResourceToCluster(clusterName, resourceName, partitionNb, stateModelDef, mode.toString()); if (doRebalance) { setupTool.rebalanceStorageCluster(clusterName, resourceName, replica); } } zkClient.close(); }
@BeforeClass public void beforeClass() throws Exception { _participants = new MockParticipantManager[_numNodes]; _numPartitions = 1; _gSetupTool.addCluster(CLUSTER_NAME, true); _accessor = new ZKHelixDataAccessor(CLUSTER_NAME, _baseAccessor); _keyBuilder = _accessor.keyBuilder(); setupParticipants(); for (int i = 0; i < _numDbs; i++) { String db = WorkflowGenerator.DEFAULT_TGT_DB + i; _gSetupTool.addResourceToCluster(CLUSTER_NAME, db, _numPartitions, MASTER_SLAVE_STATE_MODEL, IdealState.RebalanceMode.SEMI_AUTO.toString()); _gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, db, _numReplicas); _testDbs.add(db); } startParticipants(); // start controller String controllerName = CONTROLLER_PREFIX + "_0"; _controller = new ClusterControllerManager(ZK_ADDR, CLUSTER_NAME, controllerName); _controller.syncStart(); Thread.sleep(2000L); createManagers(); }
private void setupUnbalancedDB() throws InterruptedException { // Start with Full-Auto mode to create the partitions, Semi-Auto won't create partitions. _gSetupTool.addResourceToCluster(CLUSTER_NAME, UNBALANCED_DB_NAME, 50, MASTER_SLAVE_STATE_MODEL, IdealState.RebalanceMode.FULL_AUTO.toString()); _gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, UNBALANCED_DB_NAME, 1); // Set preference list to put all partitions to one instance. IdealState idealState = _gSetupTool.getClusterManagementTool().getResourceIdealState(CLUSTER_NAME, UNBALANCED_DB_NAME); Set<String> partitions = idealState.getPartitionSet(); for (String partition : partitions) { idealState.setPreferenceList(partition, Lists.newArrayList(_blockedParticipant.getInstanceName())); } idealState.setRebalanceMode(IdealState.RebalanceMode.SEMI_AUTO); _gSetupTool.getClusterManagementTool().setResourceIdealState(CLUSTER_NAME, UNBALANCED_DB_NAME, idealState); Assert.assertTrue(_clusterVerifier.verifyByPolling(10000, 100)); } }
private void createISSpec(HelixDataAccessor accessor, String specId, String stateModelDefRef, RebalanceMode rebalanceMode) { PropertyKey propertyKey = accessor.keyBuilder().clusterConfig(); HelixProperty property = accessor.getProperty(propertyKey); if (property == null) { property = new HelixProperty("sampleClusterConfig"); } String key = "IdealStateRule!" + specId; String value = IdealStateProperty.REBALANCE_MODE.toString() + "=" + rebalanceMode.toString() + "," + IdealStateProperty.STATE_MODEL_DEF_REF.toString() + "=" + stateModelDefRef; property.getRecord().setSimpleField(key, value); accessor.setProperty(propertyKey, property); }
public void addResourceToCluster(String clusterName, String resourceName, int numPartitions, String stateModelRef) { addResourceToCluster(clusterName, resourceName, numPartitions, stateModelRef, RebalanceMode.SEMI_AUTO.toString()); }
@Override public void addResource(String clusterName, String resourceName, int partitions, String stateModelRef) { addResource(clusterName, resourceName, partitions, stateModelRef, RebalanceMode.SEMI_AUTO.toString(), 0); }
protected void createDBInSemiAuto(ClusterSetup clusterSetup, String clusterName, String dbName, List<String> preferenceList, String stateModelDef, int numPartition, int replica) { clusterSetup.addResourceToCluster(clusterName, dbName, numPartition, stateModelDef, IdealState.RebalanceMode.SEMI_AUTO.toString()); clusterSetup.rebalanceStorageCluster(clusterName, dbName, replica); IdealState is = _gSetupTool.getClusterManagementTool().getResourceIdealState(clusterName, dbName); for (String p : is.getPartitionSet()) { is.setPreferenceList(p, preferenceList); } clusterSetup.getClusterManagementTool().setResourceIdealState(clusterName, dbName, is); }
public void addResourceToCluster(String clusterName, String resourceName, int numPartitions, String stateModelRef) { addResourceToCluster(clusterName, resourceName, numPartitions, stateModelRef, RebalanceMode.SEMI_AUTO.toString()); }
@Override public void addResource(String clusterName, String resourceName, int partitions, String stateModelRef) { addResource(clusterName, resourceName, partitions, stateModelRef, RebalanceMode.SEMI_AUTO.toString(), 0); }