private boolean isInIsr(Integer leader, Cluster cluster, TopicPartition tp) { return Arrays.stream(cluster.partition(tp).inSyncReplicas()).anyMatch(node -> node.id() == leader); }
PartitionInfo currPartInfo = curr.partition(new TopicPartition(prevPartInfo.topic(), prevPartInfo.partition())); if (leaderChanged(prevPartInfo, currPartInfo) || replicaListChanged(prevPartInfo, currPartInfo)) { return true;
new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.of(100), replicas, isr, offlineReplicas)); metadata.update(metadataResponse, 10L); assertNotNull(metadata.fetch().partition(tp)); assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); assertNotNull(metadata.fetch().partition(tp)); assertEquals(metadata.fetch().partitionCountForTopic("topic-1").longValue(), 5); assertFalse(metadata.partitionInfoIfCurrent(tp).isPresent()); assertNotNull(metadata.fetch().partition(tp)); assertEquals(metadata.fetch().partitionCountForTopic("topic-1").longValue(), 5); assertFalse(metadata.partitionInfoIfCurrent(tp).isPresent()); new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.of(101), replicas, isr, offlineReplicas)); metadata.update(metadataResponse, 30L); assertNotNull(metadata.fetch().partition(tp)); assertEquals(metadata.fetch().partitionCountForTopic("topic-1").longValue(), 5); assertTrue(metadata.partitionInfoIfCurrent(tp).isPresent()); assertNull(metadata.fetch().partition(tp)); assertNull(metadata.fetch().partitionCountForTopic("topic-1")); assertFalse(metadata.partitionInfoIfCurrent(tp).isPresent());
/** * Check if the partition is currently under replicated. * @param cluster The current cluster state. * @param tp The topic partition to check. * @return True if the partition is currently under replicated. */ public static boolean isPartitionUnderReplicated(Cluster cluster, TopicPartition tp) { PartitionInfo partitionInfo = cluster.partition(tp); return partitionInfo.inSyncReplicas().length != partitionInfo.replicas().length; } }
} else { PartitionInfo previousInfo = cache.cluster().partition(tp); if (previousInfo != null) { partitionInfoConsumer.accept(previousInfo);
/** * For each proposal, create a replica action task if there is a need for moving replica(s) to reach expected final proposal state. * * @param proposals Execution proposals. * @param cluster Kafka cluster state. */ private void maybeAddReplicaMovementTasks(Collection<ExecutionProposal> proposals, Cluster cluster) { for (ExecutionProposal proposal : proposals) { TopicPartition tp = proposal.topicPartition(); PartitionInfo partitionInfo = cluster.partition(tp); if (partitionInfo == null) { LOG.trace("Ignored the attempt to move non-existing partition for topic partition: {}", tp); continue; } if (!proposal.isCompletedSuccessfully(partitionInfo.replicas())) { long replicaActionExecutionId = _executionId++; ExecutionTask executionTask = new ExecutionTask(replicaActionExecutionId, proposal, REPLICA_ACTION); _remainingReplicaMovements.add(executionTask); _remainingDataToMove += proposal.dataToMoveInMB(); LOG.trace("Added action {} as replica proposal {}", replicaActionExecutionId, proposal); } } _partMoveTaskByBrokerId = _replicaMovementTaskStrategy.applyStrategy(_remainingReplicaMovements, cluster); }
for (ExecutionTask task : _executionTaskManager.inExecutionTasks()) { TopicPartition tp = task.proposal().topicPartition(); if (cluster.partition(tp) == null) {
/** * For a replica action, the completion depends on the task state: * IN_PROGRESS: when the current replica list is the same as the new replica list. * ABORTING: done when the current replica list is the same as the old replica list. Due to race condition, * we also consider it done if the current replica list is the same as the new replica list. * DEAD: always considered as done because we neither move forward or rollback. * * There should be no other task state seen here. */ private boolean isReplicaActionDone(Cluster cluster, TopicPartition tp, ExecutionTask task) { Node[] currentOrderedReplicas = cluster.partition(tp).replicas(); switch (task.state()) { case IN_PROGRESS: return task.proposal().isCompletedSuccessfully(currentOrderedReplicas); case ABORTING: return task.proposal().isAborted(currentOrderedReplicas); case DEAD: return true; default: throw new IllegalStateException("Should never be here. State " + task.state()); } }
new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.of(100), replicas, isr, offlineReplicas)); metadata.update(metadataResponse, 10L); assertNotNull(metadata.fetch().partition(tp)); assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.of(99), replicas, Collections.emptyList(), offlineReplicas)); metadata.update(metadataResponse, 20L); assertEquals(metadata.fetch().partition(tp).inSyncReplicas().length, 1); assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.of(100), replicas, Collections.emptyList(), offlineReplicas)); metadata.update(metadataResponse, 20L); assertEquals(metadata.fetch().partition(tp).inSyncReplicas().length, 0); assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); "dummy", 1, Collections.emptyMap(), Collections.emptyMap(), MetadataResponse.PartitionMetadata::new); metadata.update(metadataResponse, 20L); assertNull(metadata.fetch().partition(tp)); assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); new MetadataResponse.PartitionMetadata(error, partition, leader, Optional.of(99), replicas, isr, offlineReplicas)); metadata.update(metadataResponse, 10L); assertNull(metadata.fetch().partition(tp)); assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100);
@Override public Samples getSamples(Cluster cluster, Set<TopicPartition> assignedPartitions, long startTime, long endTime, SamplingMode mode, MetricDef metricDef) throws MetricSamplingException { if (_exceptionsLeft > 0) { _exceptionsLeft--; throw new MetricSamplingException("Error"); } Set<PartitionMetricSample> partitionMetricSamples = new HashSet<>(assignedPartitions.size()); for (TopicPartition tp : assignedPartitions) { PartitionMetricSample sample = new PartitionMetricSample(cluster.partition(tp).leader().id(), tp); long now = TIME.milliseconds(); for (Resource resource : Resource.cachedValues()) { for (MetricInfo metricInfo : KafkaMetricDef.resourceToMetricInfo(resource)) { sample.record(metricInfo, now); } } sample.close(now); partitionMetricSamples.add(sample); } return new Samples(partitionMetricSamples, Collections.emptySet()); }
public void restartLeader(TopicPartition partition) { failedNodes.remove(cluster.partition(partition).leader()); }
public void failLeader(TopicPartition partition) { failedNodes.add(cluster.partition(partition).leader()); }
public boolean leaderAvailable(TopicPartition partition) { return logs.containsKey(partition) && nodeAvailable(cluster.partition(partition).leader()); }