public ZNRecord toZNRecord() { ZNRecord record = new ZNRecord(_tableNameWithType); record.setListFields(_parentGroupToChildrenGroupsMap); Map<String, Map<String, String>> groupToSegmentsMap = new HashMap<>(); for (Map.Entry<Integer, Map<String, List<String>>> entry : _levelToGroupToSegmentsMap.entrySet()) { String key = LEVEL_KEY_PREFIX + entry.getKey(); Map<String, String> groupSegmentsForLevel = new HashMap<>(); for (Map.Entry<String, List<String>> groupEntry : entry.getValue().entrySet()) { String groupId = groupEntry.getKey(); String segments = String.join(SEGMENT_DELIMITER, groupEntry.getValue()); groupSegmentsForLevel.put(groupId, segments); } groupToSegmentsMap.put(key, groupSegmentsForLevel); } record.setMapFields(groupToSegmentsMap); return record; }
private static void initPropertyStorePath(String helixClusterName, String zkPath) { String propertyStorePath = PropertyPathConfig.getPath(PropertyType.PROPERTYSTORE, helixClusterName); ZkHelixPropertyStore<ZNRecord> propertyStore = new ZkHelixPropertyStore<ZNRecord>(zkPath, new ZNRecordSerializer(), propertyStorePath); propertyStore.create("/CONFIGS", new ZNRecord(""), AccessOption.PERSISTENT); propertyStore.create("/CONFIGS/CLUSTER", new ZNRecord(""), AccessOption.PERSISTENT); propertyStore.create("/CONFIGS/TABLE", new ZNRecord(""), AccessOption.PERSISTENT); propertyStore.create("/CONFIGS/INSTANCE", new ZNRecord(""), AccessOption.PERSISTENT); propertyStore.create("/SCHEMAS", new ZNRecord(""), AccessOption.PERSISTENT); propertyStore.create("/SEGMENTS", new ZNRecord(""), AccessOption.PERSISTENT); }
/** * Wrap {@link Schema} into a {@link ZNRecord}. */ public static ZNRecord toZNRecord(@Nonnull Schema schema) { ZNRecord record = new ZNRecord(schema.getSchemaName()); record.setSimpleField("schemaJSON", schema.getJSONSchema()); return record; }
@Override public ZNRecord toZNRecord() { ZNRecord znRecord = new ZNRecord(getId()); znRecord.setMapField(KAFKA_HIGH_LEVEL_CONSUMER_GROUP_MAP, _groupIdMap); znRecord.setMapField(KAFKA_HIGH_LEVEL_CONSUMER_PARTITION_MAP, _partitionMap); return znRecord; }
@Nonnull public static ZNRecord toZnRecord(@Nonnull TableConfig tableConfig) { ZNRecord znRecord = new ZNRecord(tableConfig.getTableName()); Map<String, String> simpleFields = new HashMap<>(); simpleFields.put(TABLE_NAME_KEY, tableConfig._tableName); simpleFields.put(TABLE_TYPE_KEY, tableConfig._tableType.toString()); try { simpleFields.put(VALIDATION_CONFIG_KEY, JsonUtils.objectToString(tableConfig._validationConfig)); simpleFields.put(TENANT_CONFIG_KEY, JsonUtils.objectToString(tableConfig._tenantConfig)); simpleFields.put(INDEXING_CONFIG_KEY, JsonUtils.objectToString(tableConfig._indexingConfig)); simpleFields.put(CUSTOM_CONFIG_KEY, JsonUtils.objectToString(tableConfig._customConfig)); if (tableConfig._quotaConfig != null) { simpleFields.put(QUOTA_CONFIG_KEY, JsonUtils.objectToString(tableConfig._quotaConfig)); } if (tableConfig._taskConfig != null) { simpleFields.put(TASK_CONFIG_KEY, JsonUtils.objectToString(tableConfig._taskConfig)); } if (tableConfig._routingConfig != null) { simpleFields.put(ROUTING_CONFIG_KEY, JsonUtils.objectToString(tableConfig._routingConfig)); } } catch (IOException e) { throw new RuntimeException(e); } znRecord.setSimpleFields(simpleFields); return znRecord; }
private void createHelixEntriesForHighLevelConsumer(TableConfig config, String realtimeTableName, IdealState idealState) { if (idealState == null) { idealState = PinotTableIdealStateBuilder .buildInitialHighLevelRealtimeIdealStateFor(realtimeTableName, config, _helixZkManager, _propertyStore, _enableBatchMessageMode); LOGGER.info("Adding helix resource with empty HLC IdealState for {}", realtimeTableName); _helixAdmin.addResource(_helixClusterName, realtimeTableName, idealState); } else { // TODO jfim: We get in this block if we're trying to add a HLC or it already exists. If it doesn't already exist, we need to set instance configs properly (which is done in buildInitialHighLevelRealtimeIdealState, surprisingly enough). For now, do nothing. LOGGER.info("Not reconfiguring HLC for table {}", realtimeTableName); } LOGGER.info("Successfully created empty ideal state for high level consumer for {} ", realtimeTableName); // Finally, create the propertystore entry that will trigger watchers to create segments String tablePropertyStorePath = ZKMetadataProvider.constructPropertyStorePathForResource(realtimeTableName); if (!_propertyStore.exists(tablePropertyStorePath, AccessOption.PERSISTENT)) { _propertyStore.create(tablePropertyStorePath, new ZNRecord(realtimeTableName), AccessOption.PERSISTENT); } }
private ZNRecord getTestInstanceZNRecord() { ZNRecord record = new ZNRecord("Server_localhost_1234"); Map<String, String> groupIdMap = new HashMap<>(); Map<String, String> partitionMap = new HashMap<>(); for (int i = 0; i < 10; ++i) { groupIdMap.put("testRes" + i + "_REALTIME", "groupId" + i); partitionMap.put("testRes" + i + "_REALTIME", "part" + i); } record.setMapField("KAFKA_HLC_GROUP_MAP", groupIdMap); record.setMapField("KAFKA_HLC_PARTITION_MAP", partitionMap); return record; }
/** * Add or update a {@link ZNRecord} to include a simple field with the given key and value. If the supplied * {@link ZNRecord} is null, a new {@link ZNRecord} will be created. * @param oldRecord A {@link ZNRecord} to add or update its simple field. {@code null} indicates to create a * new {@link ZNRecord}. * @param key The key for the simple field. * @param value The value for the simple field. * @return A {@link ZNRecord} including a simple field with the given key and value. */ private ZNRecord makeZNRecordWithSimpleField(ZNRecord oldRecord, String key, String value) { ZNRecord zNRecord = oldRecord == null ? new ZNRecord(String.valueOf(System.currentTimeMillis())) : oldRecord; if (key != null && value != null) { zNRecord.setSimpleField(key, value); } return zNRecord; }
private static InstanceZKMetadata getInstanceZKMetadata() { ZNRecord record = new ZNRecord("Server_localhost_1234"); Map<String, String> groupIdMap = new HashMap<>(); Map<String, String> partitionMap = new HashMap<>(); groupIdMap.put("mirror", "groupId_testTable_" + String.valueOf(System.currentTimeMillis())); partitionMap.put("testTable_R", "0"); record.setMapField("KAFKA_HLC_GROUP_MAP", groupIdMap); record.setMapField("KAFKA_HLC_PARTITION_MAP", partitionMap); return new InstanceZKMetadata(record); }
/** * Add or update a {@link ZNRecord} to include a map field with the given key and value. If the supplied * {@link ZNRecord} is null, a new {@link ZNRecord} will be created. * @param oldRecord A {@link ZNRecord} to add or update its map field. {@code null} indicates to create a * new {@link ZNRecord}. * @param mapKey The key for the map field. * @param mapValue The value for the map field. * @return A {@link ZNRecord} including a map field with the given key and value. */ private ZNRecord makeZNRecordWithMapField(ZNRecord oldRecord, String mapKey, Map<String, String> mapValue) { ZNRecord zNRecord = oldRecord == null ? new ZNRecord(String.valueOf(System.currentTimeMillis())) : oldRecord; if (mapKey != null && mapValue != null) { zNRecord.setMapField(mapKey, mapValue); } return zNRecord; }
/** * Write the replica group partition assignment to property store * * @param partitionAssignment a replica group partition assignment */ public void writeReplicaGroupPartitionAssignment(ReplicaGroupPartitionAssignment partitionAssignment) { String tableNameWithType = partitionAssignment.getTableName(); ZNRecord znRecord = new ZNRecord(tableNameWithType); znRecord.setListFields(partitionAssignment.getPartitionToInstances()); String path = ZKMetadataProvider.constructPropertyStorePathForInstancePartitions(tableNameWithType); _propertyStore.set(path, znRecord, AccessOption.PERSISTENT); }
public static void setClusterTenantIsolationEnabled(ZkHelixPropertyStore<ZNRecord> propertyStore, boolean isSingleTenantCluster) { final ZNRecord znRecord; final String path = constructPropertyStorePathForControllerConfig(CLUSTER_TENANT_ISOLATION_ENABLED_KEY); if (!propertyStore.exists(path, AccessOption.PERSISTENT)) { znRecord = new ZNRecord(CLUSTER_TENANT_ISOLATION_ENABLED_KEY); } else { znRecord = propertyStore.get(path, null, AccessOption.PERSISTENT); } znRecord.setBooleanField(CLUSTER_TENANT_ISOLATION_ENABLED_KEY, isSingleTenantCluster); propertyStore.set(path, znRecord, AccessOption.PERSISTENT); }
/** * Tests reading {@link ZNRecord} from {@link HelixPropertyStore}, where the {@link ZNRecord} has an invalid account * record and a valid account record. This is a NOT good {@link ZNRecord} format and it should fail fetch or update * operations, with none of the record should be read. * @throws Exception Any unexpected exception. */ @Test public void testReadBadZNRecordCase6() throws Exception { ZNRecord zNRecord = new ZNRecord(String.valueOf(System.currentTimeMillis())); Map<String, String> accountMap = new HashMap<>(); accountMap.put(String.valueOf(refAccount.getId()), refAccount.toJson(true).toString()); accountMap.put(String.valueOf(refAccount.getId() + 1), BAD_ACCOUNT_METADATA_STRING); zNRecord.setMapField(ACCOUNT_METADATA_MAP_KEY, accountMap); updateAndWriteZNRecord(zNRecord, false); }
private void addAndRemoveNewInstanceConfig(ZkClient zkClient) throws Exception { int biggerRandomNumber = NUM_INSTANCES + new Random().nextInt(NUM_INSTANCES); String instanceName = "Server_localhost_" + String.valueOf(biggerRandomNumber); String instanceConfigPath = PropertyPathBuilder.instanceConfig(_helixClusterName, instanceName); Assert.assertFalse(zkClient.exists(instanceConfigPath)); List<String> instances = _helixResourceManager.getAllInstances(); Assert.assertFalse(instances.contains(instanceName)); // Add new ZNode. ZNRecord znRecord = new ZNRecord(instanceName); zkClient.createPersistent(instanceConfigPath, znRecord); List<String> latestAllInstances = _helixResourceManager.getAllInstances(); long maxTime = System.currentTimeMillis() + MAX_TIMEOUT_IN_MILLISECOND; while (!latestAllInstances.contains(instanceName) && System.currentTimeMillis() < maxTime) { Thread.sleep(100L); latestAllInstances = _helixResourceManager.getAllInstances(); } Assert.assertTrue(System.currentTimeMillis() < maxTime, "Timeout when waiting for adding instance config"); // Remove new ZNode. zkClient.delete(instanceConfigPath); latestAllInstances = _helixResourceManager.getAllInstances(); maxTime = System.currentTimeMillis() + MAX_TIMEOUT_IN_MILLISECOND; while (latestAllInstances.contains(instanceName) && System.currentTimeMillis() < maxTime) { Thread.sleep(100L); latestAllInstances = _helixResourceManager.getAllInstances(); } Assert.assertTrue(System.currentTimeMillis() < maxTime, "Timeout when waiting for removing instance config"); }
/** * Pre-populates a collection of {@link Account}s to the underlying {@link org.apache.helix.store.HelixPropertyStore} * using {@link com.github.ambry.clustermap.HelixStoreOperator} (not through the {@link HelixAccountService}). This method * does not check any conflict among the {@link Account}s to write. * @throws Exception Any unexpected exception. */ private void writeAccountsToHelixPropertyStore(Collection<Account> accounts, boolean shouldNotify) throws Exception { HelixStoreOperator storeOperator = new HelixStoreOperator(mockHelixAccountServiceFactory.getHelixStore(ZK_CONNECT_STRING, storeConfig)); ZNRecord zNRecord = new ZNRecord(String.valueOf(System.currentTimeMillis())); Map<String, String> accountMap = new HashMap<>(); for (Account account : accounts) { accountMap.put(String.valueOf(account.getId()), account.toJson(true).toString()); } zNRecord.setMapField(ACCOUNT_METADATA_MAP_KEY, accountMap); // Write account metadata into HelixPropertyStore. storeOperator.write(HelixAccountService.FULL_ACCOUNT_METADATA_PATH, zNRecord); if (shouldNotify) { notifier.publish(ACCOUNT_METADATA_CHANGE_TOPIC, FULL_ACCOUNT_METADATA_CHANGE_MESSAGE); } }
externalView.setState("myTable_1", "pinot2", "ONLINE"); ZNRecord znrecord = new ZNRecord("myTable_0"); znrecord.setSimpleField(CommonConstants.Segment.SEGMENT_NAME, "myTable_0"); znrecord.setSimpleField(CommonConstants.Segment.TABLE_NAME, "myTable_OFFLINE");
@Override public ZNRecord toZNRecord() { ZNRecord znRecord = new ZNRecord(_segmentName); znRecord.setSimpleField(CommonConstants.Segment.SEGMENT_NAME, _segmentName); znRecord.setSimpleField(CommonConstants.Segment.TABLE_NAME, _tableName);
private ZNRecord getTestDoneRealtimeSegmentZNRecord() { String segmentName = "testTable_R_1000_2000_groupId0_part0"; ZNRecord record = new ZNRecord(segmentName); record.setSimpleField(CommonConstants.Segment.SEGMENT_NAME, segmentName); record.setSimpleField(CommonConstants.Segment.TABLE_NAME, "testTable"); record.setSimpleField(CommonConstants.Segment.INDEX_VERSION, "v1"); record.setEnumField(CommonConstants.Segment.SEGMENT_TYPE, CommonConstants.Segment.SegmentType.REALTIME); record.setEnumField(CommonConstants.Segment.Realtime.STATUS, CommonConstants.Segment.Realtime.Status.DONE); record.setLongField(CommonConstants.Segment.START_TIME, 1000); record.setLongField(CommonConstants.Segment.END_TIME, 2000); record.setSimpleField(CommonConstants.Segment.TIME_UNIT, TimeUnit.HOURS.toString()); record.setLongField(CommonConstants.Segment.TOTAL_DOCS, 10000); record.setLongField(CommonConstants.Segment.CRC, 1234); record.setLongField(CommonConstants.Segment.CREATION_TIME, 3000); record.setIntField(CommonConstants.Segment.FLUSH_THRESHOLD_SIZE, 1234); record.setSimpleField(CommonConstants.Segment.FLUSH_THRESHOLD_TIME, "6h"); return record; }
private ZNRecord getTestInProgressRealtimeSegmentZNRecord() { String segmentName = "testTable_R_1000_groupId0_part0"; ZNRecord record = new ZNRecord(segmentName); record.setSimpleField(CommonConstants.Segment.SEGMENT_NAME, segmentName); record.setSimpleField(CommonConstants.Segment.TABLE_NAME, "testTable"); record.setSimpleField(CommonConstants.Segment.INDEX_VERSION, "v1"); record.setEnumField(CommonConstants.Segment.SEGMENT_TYPE, CommonConstants.Segment.SegmentType.REALTIME); record.setEnumField(CommonConstants.Segment.Realtime.STATUS, CommonConstants.Segment.Realtime.Status.IN_PROGRESS); record.setLongField(CommonConstants.Segment.START_TIME, 1000); record.setLongField(CommonConstants.Segment.END_TIME, -1); record.setSimpleField(CommonConstants.Segment.TIME_UNIT, TimeUnit.HOURS.toString()); record.setLongField(CommonConstants.Segment.TOTAL_DOCS, -1); record.setLongField(CommonConstants.Segment.CRC, -1); record.setLongField(CommonConstants.Segment.CREATION_TIME, 1000); record.setIntField(CommonConstants.Segment.FLUSH_THRESHOLD_SIZE, 1234); record.setSimpleField(CommonConstants.Segment.FLUSH_THRESHOLD_TIME, "6h"); return record; }
private ZNRecord getTestOfflineSegmentZNRecord() { String segmentName = "testTable_O_3000_4000"; ZNRecord record = new ZNRecord(segmentName); record.setSimpleField(CommonConstants.Segment.SEGMENT_NAME, segmentName); record.setSimpleField(CommonConstants.Segment.TABLE_NAME, "testTable"); record.setSimpleField(CommonConstants.Segment.CRYPTER_NAME, "testCrypter"); record.setSimpleField(CommonConstants.Segment.INDEX_VERSION, "v1"); record.setEnumField(CommonConstants.Segment.SEGMENT_TYPE, CommonConstants.Segment.SegmentType.OFFLINE); record.setLongField(CommonConstants.Segment.START_TIME, 1000); record.setLongField(CommonConstants.Segment.END_TIME, 2000); record.setSimpleField(CommonConstants.Segment.TIME_UNIT, TimeUnit.HOURS.toString()); record.setLongField(CommonConstants.Segment.TOTAL_DOCS, 50000); record.setLongField(CommonConstants.Segment.CRC, 54321); record.setLongField(CommonConstants.Segment.CREATION_TIME, 1000); record.setSimpleField(CommonConstants.Segment.Offline.DOWNLOAD_URL, "http://localhost:8000/testTable_O_3000_4000"); record.setLongField(CommonConstants.Segment.Offline.PUSH_TIME, 4000); record.setLongField(CommonConstants.Segment.Offline.REFRESH_TIME, 8000); return record; }