public static SegmentMergeLineage fromZNRecord(ZNRecord record) { String tableNameWithType = record.getId(); Map<String, List<String>> segmentGroupLineageMap = record.getListFields(); Map<Integer, Map<String, List<String>>> groupToSegmentsMap = new HashMap<>(); for (Map.Entry<String, Map<String, String>> entry : record.getMapFields().entrySet()) { String levelKey = entry.getKey(); Integer level = Integer.parseInt(levelKey.substring(LEVEL_KEY_PREFIX.length())); Map<String, List<String>> groupToSegmentsForLevel = new HashMap<>(); for (Map.Entry<String, String> groupEntry : entry.getValue().entrySet()) { String groupId = groupEntry.getKey(); String segmentsString = groupEntry.getValue(); List<String> segments = Arrays.asList(segmentsString.split(SEGMENT_DELIMITER)); groupToSegmentsForLevel.put(groupId, new ArrayList<>(segments)); } groupToSegmentsMap.put(level, groupToSegmentsForLevel); } return new SegmentMergeLineage(tableNameWithType, segmentGroupLineageMap, groupToSegmentsMap); }
public InstanceZKMetadata(ZNRecord record) { _id = record.getId(); setInstanceConfigFromId(_id); _groupIdMap.putAll(record.getMapField(KAFKA_HIGH_LEVEL_CONSUMER_GROUP_MAP)); _partitionMap.putAll(record.getMapField(KAFKA_HIGH_LEVEL_CONSUMER_PARTITION_MAP)); }
/** * Locate the controller leader so that we can send LLC segment completion requests to it. * Checks the {@link ControllerLeaderLocator::_cachedControllerLeaderInvalid} flag and fetches the leader from helix if cached value is invalid * * @return The host:port string of the current controller leader. */ public synchronized Pair<String, Integer> getControllerLeader() { if (!_cachedControllerLeaderInvalid) { return _controllerLeaderHostPort; } BaseDataAccessor<ZNRecord> dataAccessor = _helixManager.getHelixDataAccessor().getBaseDataAccessor(); Stat stat = new Stat(); try { ZNRecord znRecord = dataAccessor.get("/" + _clusterName + "/CONTROLLER/LEADER", stat, AccessOption.THROW_EXCEPTION_IFNOTEXIST); String leader = znRecord.getId(); int index = leader.lastIndexOf('_'); String leaderHost = leader.substring(0, index); int leaderPort = Integer.valueOf(leader.substring(index + 1)); _controllerLeaderHostPort = new Pair<>(leaderHost, leaderPort); _cachedControllerLeaderInvalid = false; LOGGER.info("Setting controller leader to be {}:{} as per znode version {}, mtime {}", leaderHost, leaderPort, stat.getVersion(), stat.getMtime()); return _controllerLeaderHostPort; } catch (Exception e) { LOGGER.warn("Could not locate controller leader, exception", e); _cachedControllerLeaderInvalid = true; return null; } }
/** * rebalances all tables for the tenant * @param tenantName */ public void rebalanceTenantTables(String tenantName) throws Exception { String tableConfigPath = "/CONFIGS/TABLE"; List<Stat> stats = new ArrayList<>(); List<ZNRecord> tableConfigs = propertyStore.getChildren(tableConfigPath, stats, 0); String rawTenantName = tenantName.replaceAll("_OFFLINE", "").replace("_REALTIME", ""); int nRebalances = 0; for (ZNRecord znRecord : tableConfigs) { TableConfig tableConfig; try { tableConfig = TableConfig.fromZnRecord(znRecord); } catch (Exception e) { LOGGER.warn("Failed to parse table configuration for ZnRecord id: {}. Skipping", znRecord.getId()); continue; } if (tableConfig.getTenantConfig().getServer().equals(rawTenantName)) { LOGGER.info(tableConfig.getTableName() + ":" + tableConfig.getTenantConfig().getServer()); nRebalances++; rebalanceTable(tableConfig.getTableName(), tenantName); } } if (nRebalances == 0) { LOGGER.info("No tables found for tenant " + tenantName); } }
@Test public void testControllerLeaderExists() { HelixManager helixManager = mock(HelixManager.class); HelixDataAccessor helixDataAccessor = mock(HelixDataAccessor.class); BaseDataAccessor<ZNRecord> baseDataAccessor = mock(BaseDataAccessor.class); ZNRecord znRecord = mock(ZNRecord.class); final String leaderHost = "host"; final int leaderPort = 12345; when(helixManager.getHelixDataAccessor()).thenReturn(helixDataAccessor); when(helixDataAccessor.getBaseDataAccessor()).thenReturn(baseDataAccessor); when(znRecord.getId()).thenReturn(leaderHost + "_" + leaderPort); when(baseDataAccessor.get(anyString(), (Stat) any(), anyInt())).thenReturn(znRecord); when(helixManager.getClusterName()).thenReturn("myCluster"); // Create Controller Leader Locator FakeControllerLeaderLocator.create(helixManager); ControllerLeaderLocator controllerLeaderLocator = FakeControllerLeaderLocator.getInstance(); Pair<String, Integer> expectedLeaderLocation = new Pair<>(leaderHost, leaderPort); Assert.assertEquals(controllerLeaderLocator.getControllerLeader().getFirst(), expectedLeaderLocation.getFirst()); Assert.assertEquals(controllerLeaderLocator.getControllerLeader().getSecond(), expectedLeaderLocation.getSecond()); }
String znRecordId = tableConfigZnRecord.getId(); if (TableNameBuilder.getTableTypeFromTableName(znRecordId) == TableType.REALTIME) { TableConfig tableConfig = TableConfig.fromZnRecord(tableConfigZnRecord); } else { LOGGER.error("Caught exception while processing ZNRecord id: {}. Skipping node to continue setting watches", tableConfigZnRecord.getId(), e);
when(znRecord.getId()).thenReturn(leaderHost + "_" + leaderPort); when(baseDataAccessor.get(anyString(), any(), anyInt())).thenReturn(znRecord); when(helixManager.getClusterName()).thenReturn("testCluster");
String latestSegment = newZnRec.getId(); Map<String, String> instanceStateMap = idealState.getInstanceStateMap(latestSegment); for (Map.Entry<String, String> entry : instanceStateMap.entrySet()) { segmentManager._metadataMap.get(newZnRec.getId()).setStatus(CommonConstants.Segment.Realtime.Status.DONE); segmentManager._metadataMap.get(newZnRec.getId()).setStatus(CommonConstants.Segment.Realtime.Status.IN_PROGRESS); String deleteSegment = newZnRec.getId(); Map<String, String> instanceStateMap1 = idealState.getInstanceStateMap(deleteSegment); idealState = idealStateBuilder.removeSegment(deleteSegment).setSegmentState(oldZnRec.getId(), "CONSUMING").build(); nextOffset = 5000; committingSegmentMetadata = new LLCRealtimeSegmentZKMetadata(oldZnRec);
/** * Initialize with a pre-populated ZNRecord * @param record */ public ZNRecord(ZNRecord record) { this(record, record.getId()); }
/** * Get the instance for which these stats have been collected * @return instance name */ public String getInstanceName() { return _record.getId(); }
/** * Get the name of this resource * * @return the instance name */ public String getClusterName() { return _record.getId(); } }
/** * Get the name of this instance * @return the instance name */ public String getInstanceName() { return _record.getId(); }
/** * Get the instance for which these stats have been collected * @return instance name */ public String getInstanceName() { return _record.getId(); }
public static void dropChildren(ZkClient client, String parentPath, ZNRecord nodeRecord) { // TODO: check if parentPath exists String id = nodeRecord.getId(); String temp = parentPath + "/" + id; client.deleteRecursively(temp); }
public StateTransitionTimeoutConfig(ZNRecord record) { _resource = record.getId(); if (record.getMapFields().containsKey(StateTransitionTimeoutProperty.TIMEOUT.name())) { _timeoutMap = record.getMapField(StateTransitionTimeoutProperty.TIMEOUT.name()); } else { _timeoutMap = new HashMap<String, String>(); } }
@Override public void reportHealthReportMessage(ZNRecord healthCheckInfoUpdate) { HelixDataAccessor accessor = _helixManager.getHelixDataAccessor(); Builder keyBuilder = accessor.keyBuilder(); if(!accessor.setProperty(keyBuilder.healthReport(_instanceName, healthCheckInfoUpdate.getId()), new HealthStat(healthCheckInfoUpdate))) { LOG.error("Failed to persist health report to zk!"); } }
@Override public void reportHealthReportMessage(ZNRecord healthCheckInfoUpdate) { HelixDataAccessor accessor = _helixManager.getHelixDataAccessor(); Builder keyBuilder = accessor.keyBuilder(); if(!accessor.setProperty(keyBuilder.healthReport(_instanceName, healthCheckInfoUpdate.getId()), new HealthStat(healthCheckInfoUpdate))) { LOG.error("Failed to persist health report to zk!"); } }
@Test() public void testCreateOrReplace() { String path = PropertyPathBuilder.instanceConfig(clusterName, "id8"); ZNRecord record = new ZNRecord("id8"); ZKUtil.createOrReplace(_gZkClient, path, record, true); record = _gZkClient.readData(path); AssertJUnit.assertEquals("id8", record.getId()); record = new ZNRecord("id9"); ZKUtil.createOrReplace(_gZkClient, path, record, true); record = _gZkClient.readData(path); AssertJUnit.assertEquals("id9", record.getId()); }