public InstanceZKMetadata(ZNRecord record) { _id = record.getId(); setInstanceConfigFromId(_id); _groupIdMap.putAll(record.getMapField(KAFKA_HIGH_LEVEL_CONSUMER_GROUP_MAP)); _partitionMap.putAll(record.getMapField(KAFKA_HIGH_LEVEL_CONSUMER_PARTITION_MAP)); }
public StateTransitionTimeoutConfig(ZNRecord record) { _resource = record.getId(); if (record.getMapFields().containsKey(StateTransitionTimeoutProperty.TIMEOUT.name())) { _timeoutMap = record.getMapField(StateTransitionTimeoutProperty.TIMEOUT.name()); } else { _timeoutMap = new HashMap<String, String>(); } }
logger.debug("The ZNRecord to read does not exist on path={}", pathToFullAccountMetadata); } else { Map<String, String> remoteAccountMap = zNRecord.getMapField(ACCOUNT_METADATA_MAP_KEY); if (remoteAccountMap == null) { logger.debug("ZNRecord={} to read on path={} does not have a simple map with key={}", zNRecord,
@Test public void noDowntimeUpdateWithNoCommonElements() { Map<String, String> targetMap = new HashMap<>(); targetMap.put("host4", "ONLINE"); targetMap.put("host3", "ONLINE"); Map<String, String> srcMap = current.getRecord().getMapField(segmentId); TableRebalancer updater = new TableRebalancer(null, null, null); updater.updateSegmentIfNeeded(segmentId, srcMap, targetMap, current, noDowntime); Map<String, String> tempMap = current.getRecord().getMapField(segmentId); Set<String> targetHosts = new HashSet<String>(Arrays.asList("host3", "host4")); Set<String> srcHosts = new HashSet<String>(Arrays.asList("host1", "host2")); Assert.assertEquals(tempMap.size(), targetHosts.size()); for (String instance : tempMap.keySet()) { Assert.assertTrue(targetHosts.contains(instance) || srcHosts.contains(instance)); } }
@Test public void downtimeUpdateWithCommonElements() { Map<String, String> targetMap = new HashMap<>(); targetMap.put("host1", "ONLINE"); targetMap.put("host3", "ONLINE"); Map<String, String> srcMap = current.getRecord().getMapField(segmentId); Assert.assertEquals(srcMap.size(), 2); TableRebalancer updater = new TableRebalancer(null, null, null); updater.updateSegmentIfNeeded(segmentId, srcMap, targetMap, current, downtime); Map<String, String> tempMap = current.getRecord().getMapField(segmentId); Assert.assertTrue(EqualityUtils.isEqual(tempMap, targetMap)); }
@Test public void noDowntimeUpdateWithCommonElements() { Map<String, String> targetMap = new HashMap<>(); targetMap.put("host1", "ONLINE"); targetMap.put("host3", "ONLINE"); Map<String, String> srcMap = current.getRecord().getMapField(segmentId); Assert.assertEquals(srcMap.size(), 2); TableRebalancer updater = new TableRebalancer(null, null, null); updater.updateSegmentIfNeeded(segmentId, srcMap, targetMap, current, noDowntime); Map<String, String> tempMap = current.getRecord().getMapField(segmentId); Assert.assertTrue(EqualityUtils.isEqual(tempMap, targetMap)); }
@Test public void downtimeUpdateWithNoCommonElements() { Map<String, String> targetMap = new HashMap<>(); targetMap.put("host4", "ONLINE"); targetMap.put("host3", "ONLINE"); Map<String, String> srcMap = current.getRecord().getMapField(segmentId); TableRebalancer updater = new TableRebalancer(null, null, null); updater.updateSegmentIfNeeded(segmentId, srcMap, targetMap, current, downtime); Map<String, String> tempMap = current.getRecord().getMapField(segmentId); Assert.assertTrue(EqualityUtils.isEqual(tempMap, targetMap)); } }
recordToUpdate = recordFromZk; Map<String, String> accountMap = recordToUpdate.getMapField(ACCOUNT_METADATA_MAP_KEY); if (accountMap == null) { logger.debug("AccountMap does not exist in ZNRecord when updating accounts. Creating a new accountMap");
public SegmentZKMetadata(ZNRecord znRecord) { _segmentName = znRecord.getSimpleField(CommonConstants.Segment.SEGMENT_NAME); _tableName = znRecord.getSimpleField(CommonConstants.Segment.TABLE_NAME); _crypterName = znRecord.getSimpleField(CommonConstants.Segment.CRYPTER_NAME); _segmentType = znRecord.getEnumField(CommonConstants.Segment.SEGMENT_TYPE, SegmentType.class, SegmentType.OFFLINE); _startTime = znRecord.getLongField(CommonConstants.Segment.START_TIME, -1); _endTime = znRecord.getLongField(CommonConstants.Segment.END_TIME, -1); if (znRecord.getSimpleFields().containsKey(CommonConstants.Segment.TIME_UNIT) && !znRecord .getSimpleField(CommonConstants.Segment.TIME_UNIT).equals(NULL)) { setTimeUnit(znRecord.getEnumField(CommonConstants.Segment.TIME_UNIT, TimeUnit.class, TimeUnit.DAYS)); } _indexVersion = znRecord.getSimpleField(CommonConstants.Segment.INDEX_VERSION); _totalRawDocs = znRecord.getLongField(CommonConstants.Segment.TOTAL_DOCS, -1); _crc = znRecord.getLongField(CommonConstants.Segment.CRC, -1); _creationTime = znRecord.getLongField(CommonConstants.Segment.CREATION_TIME, -1); try { String partitionMetadataJson = znRecord.getSimpleField(CommonConstants.Segment.PARTITION_METADATA); if (partitionMetadataJson != null) { _partitionMetadata = SegmentPartitionMetadata.fromJsonString(partitionMetadataJson); } } catch (IOException e) { LOGGER.error( "Exception caught while reading partition info from zk metadata for segment '{}', partition info dropped.", _segmentName, e); } _segmentUploadStartTime = znRecord.getLongField(CommonConstants.Segment.SEGMENT_UPLOAD_START_TIME, -1); _customMap = znRecord.getMapField(CommonConstants.Segment.CUSTOM_MAP); }
/** * Get a specific statistic * @param statName the statistic to look up * @return map of (stat attribute, value) */ public Map<String, String> getStatFields(String statName) { return _record.getMapField(statName); }
/** * Get specific alert statistics * @param statName the name of the statistic group * @return a mapping of property and value for the statistic */ public Map<String, String> getStatFields(String statName) { return _record.getMapField(statName); }
@Override public ZNRecord update(ZNRecord znRecord) { if (znRecord.getMapField(task) == null) { znRecord.setMapField(task, new HashMap<String, String>()); } znRecord.getMapField(task).put(key, value); return znRecord; } }, AccessOption.PERSISTENT);
/** * Remove task quota with the given quota type. * @param quotaType */ public void removeTaskQuotaRatio(String quotaType) { if (_record.getMapField(ClusterConfigProperty.QUOTA_TYPES.name()) != null) { _record.getMapField(ClusterConfigProperty.QUOTA_TYPES.name()).remove(quotaType); } }
/** * Get resource quota map of the live instance. Note that this resource name * refers to compute / storage / network resource that this liveinstance * has, i.e. thread count, CPU cores, heap size, etc. * @return resource quota map: key=resourceName, value=quota */ public Map<String, String> getResourceCapacityMap() { return _record.getMapField(LiveInstanceProperty.RESOURCE_CAPACITY.name()); }
int getCapacity(String participant) { Map<String, String> participantMap = _record.getMapField(participant); if (participantMap != null && participantMap.containsKey(CAPACITY)) { return Integer.parseInt(participantMap.get(CAPACITY)); } return getDefaultCapacity(); }
private Map<String, String> getOrAddParticipantMap(String participant) { Map<String, String> participantMap = _record.getMapField(participant); if (participantMap == null) { participantMap = new HashMap<>(); _record.setMapField(participant, participantMap); } return participantMap; }
public Map<String, Long> getJobStartTimes() { Map<String, Long> startTimes = new HashMap<String, Long>(); Map<String, String> startTimesMap = _record.getMapField(WorkflowContextProperties.StartTime.name()); if (startTimesMap != null) { for (Map.Entry<String, String> time : startTimesMap.entrySet()) { startTimes.put(time.getKey(), Long.valueOf(time.getValue())); } } return startTimes; }
void setPartitionWeight(String resource, String partition, int weight) { Map<String, String> partitionWeightMap = _record.getMapField(resource); if (partitionWeightMap == null) { partitionWeightMap = new HashMap<>(); _record.setMapField(resource, partitionWeightMap); } partitionWeightMap.put(partition, new Integer(weight).toString()); }
private Map<String, String> getMessageContentFromReply( final Message message ) { return new HashMap<String, String>() {{ for ( final Map.Entry<String, String> field : message.getRecord().getMapField( Message.Attributes.MESSAGE_RESULT.toString() ).entrySet() ) { if ( !field.getKey().equals( "origin" ) && !field.getKey().equals( "type" ) ) { put( field.getKey(), field.getValue() ); } } }}; }
public TaskState getJobState(String job) { Map<String, String> states = _record.getMapField(WorkflowContextProperties.JOB_STATES.name()); if (states == null) { return null; } String s = states.get(job); if (s == null) { return null; } return TaskState.valueOf(s); }