final Map<Integer, Long> sourcePartitionOffsetStart = src.getSourcePartitionOffsetStart(); final Map<Integer, Long> sourcePartitionOffsetEnd = src.getSourcePartitionOffsetEnd(); if (endOffset <= 0 || startOffset >= endOffset) {
public static SourcePartition getCopyOf(SourcePartition origin) { SourcePartition copy = new SourcePartition(); copy.setTSRange(origin.getTSRange()); copy.setSegRange(origin.getSegRange()); if (origin.getSourcePartitionOffsetStart() != null) { copy.setSourcePartitionOffsetStart(new HashMap<>(origin.getSourcePartitionOffsetStart())); } if (origin.getSourcePartitionOffsetEnd() != null) { copy.setSourcePartitionOffsetEnd(new HashMap<>(origin.getSourcePartitionOffsetEnd())); } return copy; } }
public CubeSegment appendSegment(CubeInstance cube, SourcePartition src) throws IOException { return appendSegment(cube, src.getTSRange(), src.getSegRange(), src.getSourcePartitionOffsetStart(), src.getSourcePartitionOffsetEnd()); }
logger.info("Get {} partitions for topic {} ", partitionInfos.size(), topic); for (PartitionInfo partitionInfo : partitionInfos) { if (result.getSourcePartitionOffsetStart().containsKey(partitionInfo.partition()) == false) { long earliest = KafkaClient.getEarliestOffset(consumer, topic, partitionInfo.partition()); logger.debug("New partition {} added, with start offset {}", partitionInfo.partition(), earliest); result.getSourcePartitionOffsetStart().put(partitionInfo.partition(), earliest); if (result.getSourcePartitionOffsetStart().containsKey(partitionId)) { if (result.getSourcePartitionOffsetStart().get(partitionId) > latestOffsets.get(partitionId)) { throw new IllegalArgumentException("Partition " + partitionId + " end offset (" + latestOffsets.get(partitionId) + ") is smaller than start offset ( " + result.getSourcePartitionOffsetStart().get(partitionId) + ")"); for (Long v : result.getSourcePartitionOffsetStart().values()) { totalStartOffset += v;
final Map<Integer, Long> sourcePartitionOffsetStart = src.getSourcePartitionOffsetStart(); final Map<Integer, Long> sourcePartitionOffsetEnd = src.getSourcePartitionOffsetEnd(); if (endOffset <= 0 || startOffset >= endOffset) {
public static SourcePartition getCopyOf(SourcePartition origin) { SourcePartition copy = new SourcePartition(); copy.setTSRange(origin.getTSRange()); copy.setSegRange(origin.getSegRange()); if (origin.getSourcePartitionOffsetStart() != null) { copy.setSourcePartitionOffsetStart(new HashMap<>(origin.getSourcePartitionOffsetStart())); } if (origin.getSourcePartitionOffsetEnd() != null) { copy.setSourcePartitionOffsetEnd(new HashMap<>(origin.getSourcePartitionOffsetEnd())); } return copy; } }
public CubeSegment appendSegment(CubeInstance cube, SourcePartition src) throws IOException { return appendSegment(cube, src.getTSRange(), src.getSegRange(), src.getSourcePartitionOffsetStart(), src.getSourcePartitionOffsetEnd()); }
logger.info("Get {} partitions for topic {} ", partitionInfos.size(), topic); for (PartitionInfo partitionInfo : partitionInfos) { if (result.getSourcePartitionOffsetStart().containsKey(partitionInfo.partition()) == false) { long earliest = KafkaClient.getEarliestOffset(consumer, topic, partitionInfo.partition()); logger.debug("New partition {} added, with start offset {}", partitionInfo.partition(), earliest); result.getSourcePartitionOffsetStart().put(partitionInfo.partition(), earliest); if (result.getSourcePartitionOffsetStart().containsKey(partitionId)) { if (result.getSourcePartitionOffsetStart().get(partitionId) > latestOffsets.get(partitionId)) { throw new IllegalArgumentException("Partition " + partitionId + " end offset (" + latestOffsets.get(partitionId) + ") is smaller than start offset ( " + result.getSourcePartitionOffsetStart().get(partitionId) + ")"); for (Long v : result.getSourcePartitionOffsetStart().values()) { totalStartOffset += v;