PropertyKey liveInstanceKey = _keyBuilder.liveInstance(instanceName); LiveInstance liveInstance = _helixDataAccessor.getProperty(liveInstanceKey); if (liveInstance == null) {
private void logSegmentsLoadingInfo() { InstanceDataManager instanceDataManager = _serverInstance.getInstanceDataManager(); if (instanceDataManager == null) { return; } HelixDataAccessor helixDataAccessor = _helixManager.getHelixDataAccessor(); Builder keyBuilder = helixDataAccessor.keyBuilder(); LiveInstance liveInstance = helixDataAccessor.getProperty(keyBuilder.liveInstance(_instanceId)); String sessionId = liveInstance.getSessionId(); List<String> tableNames = _helixAdmin.getResourcesInCluster(_helixClusterName); for (String tableName : tableNames) { PropertyKey currentStateKey = keyBuilder.currentState(_instanceId, sessionId, tableName); CurrentState currentState = helixDataAccessor.getProperty(currentStateKey); int numSegmentsLoaded = instanceDataManager.getAllSegmentsMetadata(tableName).size(); if (currentState != null && currentState.isValid()) { int numSegmentsToLoad = currentState.getPartitionStateMap().size(); LOGGER.info( "Segments are not fully loaded during server bootstrap, current progress: table: {}, segments loading progress [ {} / {} ]", tableName, numSegmentsLoaded, numSegmentsToLoad); } } }
private int getNumSegmentsToLoad() { InstanceDataManager instanceDataManager = _serverInstance.getInstanceDataManager(); if (instanceDataManager == null) { return -1; } HelixDataAccessor helixDataAccessor = _helixManager.getHelixDataAccessor(); Builder keyBuilder = helixDataAccessor.keyBuilder(); int numSegmentsToLoad = 0; List<String> tableNames = _helixAdmin.getResourcesInCluster(_helixClusterName); for (String tableName : tableNames) { LiveInstance liveInstance = helixDataAccessor.getProperty(keyBuilder.liveInstance(_instanceId)); String sessionId = liveInstance.getSessionId(); PropertyKey currentStateKey = keyBuilder.currentState(_instanceId, sessionId, tableName); CurrentState currentState = helixDataAccessor.getProperty(currentStateKey); if (currentState != null && currentState.isValid()) { numSegmentsToLoad += currentState.getPartitionStateMap().size(); } } return numSegmentsToLoad; }
accessor.keyBuilder().liveInstance(instance2))); accessor.keyBuilder().liveInstance(instance2))) { Thread.sleep(100); } else accessor.keyBuilder().liveInstance(instance2)));
HelixProperty liveInstance = accessor.getProperty(keyBuilder.liveInstance(instance)); accessor.setProperty(keyBuilder.liveInstance(instance), liveInstance);
LiveInstance liveInstance = new LiveInstance(node); liveInstance.setSessionId("testSession"); accessor.setProperty(keyBuilder.liveInstance(node), liveInstance);
new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient)); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); Assert.assertNull(accessor.getProperty(keyBuilder.liveInstance("localhost_12918"))); Assert.assertNull(accessor.getProperty(keyBuilder.controllerLeader()));
public void dropInstanceFromCluster(String clusterName, String instanceId) { ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient)); Builder keyBuilder = accessor.keyBuilder(); InstanceConfig instanceConfig = InstanceConfig.toInstanceConfig(instanceId); instanceId = instanceConfig.getInstanceName(); // ensure node is not live LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceId)); if (liveInstance != null) { throw new HelixException(String .format("Cannot drop instance %s as it is still live. Please stop it first", instanceId)); } InstanceConfig config = accessor.getProperty(keyBuilder.instanceConfig(instanceId)); if (config == null) { String error = "Node " + instanceId + " does not exist, cannot drop"; _logger.warn(error); throw new HelixException(error); } ClusterConfig clusterConfig = accessor.getProperty(keyBuilder.clusterConfig()); // ensure node is disabled, otherwise fail if (config.getInstanceEnabled() && (clusterConfig.getDisabledInstances() == null || !clusterConfig.getDisabledInstances().containsKey(instanceId))) { String error = "Node " + instanceId + " is enabled, cannot drop"; _logger.warn(error); throw new HelixException(error); } _admin.dropInstance(clusterName, config); }
public void dropInstanceFromCluster(String clusterName, String instanceId) { ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient)); Builder keyBuilder = accessor.keyBuilder(); InstanceConfig instanceConfig = InstanceConfig.toInstanceConfig(instanceId); instanceId = instanceConfig.getInstanceName(); // ensure node is not live LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceId)); if (liveInstance != null) { throw new HelixException(String .format("Cannot drop instance %s as it is still live. Please stop it first", instanceId)); } InstanceConfig config = accessor.getProperty(keyBuilder.instanceConfig(instanceId)); if (config == null) { String error = "Node " + instanceId + " does not exist, cannot drop"; _logger.warn(error); throw new HelixException(error); } ClusterConfig clusterConfig = accessor.getProperty(keyBuilder.clusterConfig()); // ensure node is disabled, otherwise fail if (config.getInstanceEnabled() && (clusterConfig.getDisabledInstances() == null || !clusterConfig.getDisabledInstances().containsKey(instanceId))) { String error = "Node " + instanceId + " is enabled, cannot drop"; _logger.warn(error); throw new HelixException(error); } _admin.dropInstance(clusterName, config); }
@GET @Path("{instanceName}") public Response getInstance(@PathParam("clusterId") String clusterId, @PathParam("instanceName") String instanceName) throws IOException { HelixDataAccessor accessor = getDataAccssor(clusterId); Map<String, Object> instanceMap = new HashMap<>(); instanceMap.put(Properties.id.name(), JsonNodeFactory.instance.textNode(instanceName)); instanceMap.put(InstanceProperties.liveInstance.name(), null); InstanceConfig instanceConfig = accessor.getProperty(accessor.keyBuilder().instanceConfig(instanceName)); LiveInstance liveInstance = accessor.getProperty(accessor.keyBuilder().liveInstance(instanceName)); if (instanceConfig != null) { instanceMap.put(InstanceProperties.config.name(), instanceConfig.getRecord()); } else { return notFound(); } if (liveInstance != null) { instanceMap.put(InstanceProperties.liveInstance.name(), liveInstance.getRecord()); } return JSONRepresentation(instanceMap); }
@Test public void testAddedFieldsInCurrentState() { String instanceName = PARTICIPANT_PREFIX + "_" + _startPort; HelixDataAccessor accessor = _manager.getHelixDataAccessor(); LiveInstance liveInstance = accessor.getProperty(accessor.keyBuilder().liveInstance(instanceName)); CurrentState currentState = accessor.getProperty(accessor.keyBuilder() .currentState(instanceName, liveInstance.getSessionId(), WorkflowGenerator.DEFAULT_TGT_DB)); // Test start time should happen after test start time Assert.assertTrue( currentState.getStartTime(WorkflowGenerator.DEFAULT_TGT_DB + "_0") >= _testStartTime); // Test end time is always larger than start time Assert.assertTrue( currentState.getEndTime(WorkflowGenerator.DEFAULT_TGT_DB + "_0") >= currentState .getStartTime(WorkflowGenerator.DEFAULT_TGT_DB + "_0")); // Final state is MASTER, so SLAVE will be the previous state Assert.assertEquals(currentState.getPreviousState(WorkflowGenerator.DEFAULT_TGT_DB + "_0"), "SLAVE"); } }
StringRepresentation getSchedulerTasksRepresentation() throws JsonGenerationException, JsonMappingException, IOException { String clusterName = (String) getRequest().getAttributes().get("clusterName"); String instanceName = (String) getRequest().getAttributes().get("instanceName"); ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT); ClusterSetup setupTool = new ClusterSetup(zkClient); List<String> instances = setupTool.getClusterManagementTool().getInstancesInCluster(clusterName); HelixDataAccessor accessor = ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName); LiveInstance liveInstance = accessor.getProperty(accessor.keyBuilder().liveInstance(instanceName)); String sessionId = liveInstance.getSessionId(); StringRepresentation representation = new StringRepresentation("");// (ClusterRepresentationUtil.ObjectToJson(instanceConfigs), // MediaType.APPLICATION_JSON); return representation; }
protected List<String> setupLiveInstances(int numLiveInstances) { List<String> instances = new ArrayList<>(); for (int i = 0; i < numLiveInstances; i++) { LiveInstance liveInstance = new LiveInstance(HOSTNAME_PREFIX + i); liveInstance.setSessionId(SESSION_PREFIX + i); Builder keyBuilder = accessor.keyBuilder(); accessor.setProperty(keyBuilder.liveInstance(HOSTNAME_PREFIX + i), liveInstance); instances.add(liveInstance.getInstanceName()); } return instances; }
public static String getInstanceSessionId(ZkClient zkClient, String clusterName, String instanceName) { ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient)); Builder keyBuilder = accessor.keyBuilder(); ZNRecord liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceName)).getRecord(); return liveInstance.getSimpleField(LiveInstanceProperty.SESSION_ID.toString()); }
protected void setupLiveInstances(String clusterName, int[] liveInstances) { ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient)); Builder keyBuilder = accessor.keyBuilder(); for (int i = 0; i < liveInstances.length; i++) { String instance = "localhost_" + liveInstances[i]; LiveInstance liveInstance = new LiveInstance(instance); liveInstance.setSessionId("session_" + liveInstances[i]); liveInstance.setHelixVersion("0.0.0"); accessor.setProperty(keyBuilder.liveInstance(instance), liveInstance); } }
@Override public void doInject(ClusterDataCache cache) { accessor.removeProperty(accessor.keyBuilder().liveInstance(downInstance)); cache.getLiveInstances().remove("localhost_0"); cache.getInstanceOfflineTimeMap().put("localhost_0", lastOfflineTime); cache.notifyDataChange(HelixConstants.ChangeType.LIVE_INSTANCE); } }, 1, 0,
private void clearStatusUpdate(String clusterName, String instance, String resource, String partition) { // clear status update for error partition so verify() will not fail on old // errors ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient)); Builder keyBuilder = accessor.keyBuilder(); LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instance)); accessor.removeProperty(keyBuilder.stateTransitionStatus(instance, liveInstance.getSessionId(), resource, partition)); } // TODO: throw exception in reset()
@Override public boolean verify() throws Exception { LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instanceName)); return liveInstance == null; } }, 3 * 1000);
if (_helixDataAccessor.getProperty(_keyBuilder.liveInstance(instanceName)) != null) { return PinotResourceManagerResponse.failure("Instance " + instanceName + " is still live");
@Override protected CurrentState getState(String resourceName) { PropertyKey.Builder keyBuilder = _helixDataAccessor.keyBuilder(); LiveInstance liveInstance = _helixDataAccessor.getProperty(keyBuilder.liveInstance(_instanceName)); String sessionId = liveInstance.getSessionId(); return _helixDataAccessor.getProperty(keyBuilder.currentState(_instanceName, sessionId, resourceName)); }