@Override public synchronized void onJobStart(SparkListenerJobStart jobStart) { int jobId = jobStart.jobId(); int size = jobStart.stageIds().size(); int[] intStageIds = new int[size]; for (int i = 0; i < size; i++) { Integer stageId = (Integer) jobStart.stageIds().apply(i); intStageIds[i] = stageId; stageIdToJobId.put(stageId, jobId); } jobIdToStageId.put(jobId, intStageIds); }
bldr.append(" {\"topic\":\"").append(topic).append("\",\"partition\":").append(partition).append(",\"replicas\":["); scala.collection.Seq<Object> replicas = partitionsToBeReassigned.apply(partition); for (int replicaIndex = 0; replicaIndex < replicas.size(); replicaIndex++) { Object replica = replicas.apply(replicaIndex); bldr.append(replica).append(",");
@Override public synchronized void onJobStart(SparkListenerJobStart jobStart) { int jobId = jobStart.jobId(); int size = jobStart.stageIds().size(); int[] intStageIds = new int[size]; for (int i = 0; i < size; i++) { Integer stageId = (Integer) jobStart.stageIds().apply(i); intStageIds[i] = stageId; stageIdToJobId.put(stageId, jobId); } jobIdToStageId.put(jobId, intStageIds); }
brokerSampleRetentionMs = Math.max(_minBrokerSampleStoreTopicRetentionTimeMs, brokerSampleRetentionMs); int numberOfBrokersInCluster = zkUtils.getAllBrokersInCluster().size(); if (numberOfBrokersInCluster <= 1) { throw new IllegalStateException(
/** * @param zkUrl zookeeper connection url * @return number of brokers in this cluster */ public static int getBrokerCount(String zkUrl) { ZkUtils zkUtils = ZkUtils.apply(zkUrl, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled()); try { return zkUtils.getAllBrokersInCluster().size(); } finally { zkUtils.close(); } }
.fetchTopicMetadataFromZk(JavaConversions.asScalaSet(Collections.singleton(topicName)), zkClient); if (topicMetadatas != null && topicMetadatas.size() > 0) { return JavaConversions.asJavaSet(topicMetadatas).iterator().next().partitionsMetadata().size(); } else { throw new IllegalStateException("Failed to get metadata for topic " + topicName);
/** * Read number of partitions for the given topic on the specified zookeeper * @param zkUrl zookeeper connection url * @param topic topic name * * @return the number of partitions of the given topic */ public static int getPartitionNumForTopic(String zkUrl, String topic) { ZkUtils zkUtils = ZkUtils.apply(zkUrl, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled()); try { Seq<String> topics = scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(topic)); return zkUtils.getPartitionsForTopics(topics).apply(topic).size(); } catch (NoSuchElementException e) { return 0; } finally { zkUtils.close(); } }
return getPartitionNumForTopic(zkUrl, topic); int brokerCount = zkUtils.getAllBrokersInCluster().size(); int partitionCount = Math.max((int) Math.ceil(brokerCount * partitionToBrokerRatio), minPartitionNum);
Assert.assertEquals(metaData.partitionsMetadata().size(), Integer.parseInt(topicPartitionCount));
private static List<PartitionInfo> getPartitionInfo(KafkaZkClient zkClient, String topic) { scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic); scala.collection.Map<Object, scala.collection.Seq<Object>> partitionAssignments = zkClient.getPartitionAssignmentForTopics(topicList).apply(topic); List<PartitionInfo> partitionInfoList = new ArrayList<>(); scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = partitionAssignments.iterator(); while (it.hasNext()) { scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next(); Integer partition = (Integer) scalaTuple._1(); scala.Option<Object> leaderOption = zkClient.getLeaderForPartition(new TopicPartition(topic, partition)); Node leader = leaderOption.isEmpty() ? null : new Node((Integer) leaderOption.get(), "", -1); Node[] replicas = new Node[scalaTuple._2().size()]; for (int i = 0; i < replicas.length; i++) { Integer brokerId = (Integer) scalaTuple._2().apply(i); replicas[i] = new Node(brokerId, "", -1); } partitionInfoList.add(new PartitionInfo(topic, partition, leader, replicas, null)); } return partitionInfoList; }
Assert.assertEquals(metaData.partitionsMetadata().size(), partionCount);
private void executeAndVerifyProposals(ZkUtils zkUtils, Collection<ExecutionProposal> proposalsToExecute, Collection<ExecutionProposal> proposalsToCheck) { KafkaCruiseControlConfig configs = new KafkaCruiseControlConfig(getExecutorProperties()); Executor executor = new Executor(configs, new SystemTime(), new MetricRegistry(), 86400000L, 43200000L); executor.setExecutionMode(false); executor.executeProposals(proposalsToExecute, Collections.emptySet(), null, EasyMock.mock(LoadMonitor.class), null, null, null); Map<TopicPartition, Integer> replicationFactors = new HashMap<>(); for (ExecutionProposal proposal : proposalsToCheck) { int replicationFactor = zkUtils.getReplicasForPartition(proposal.topic(), proposal.partitionId()).size(); replicationFactors.put(new TopicPartition(proposal.topic(), proposal.partitionId()), replicationFactor); } waitUntilExecutionFinishes(executor); for (ExecutionProposal proposal : proposalsToCheck) { TopicPartition tp = new TopicPartition(proposal.topic(), proposal.partitionId()); int expectedReplicationFactor = replicationFactors.get(tp); assertEquals("Replication factor for partition " + tp + " should be " + expectedReplicationFactor, expectedReplicationFactor, zkUtils.getReplicasForPartition(tp.topic(), tp.partition()).size()); if (proposal.hasReplicaAction()) { for (int brokerId : proposal.newReplicas()) { assertTrue("The partition should have moved for " + tp, zkUtils.getReplicasForPartition(tp.topic(), tp.partition()).contains(brokerId)); } } assertEquals("The leader should have moved for " + tp, proposal.newLeader(), zkUtils.getLeaderForPartition(tp.topic(), tp.partition()).get()); } }
@Override public synchronized void onJobStart(SparkListenerJobStart jobStart) { int jobId = jobStart.jobId(); int size = jobStart.stageIds().size(); int[] intStageIds = new int[size]; for (int i = 0; i < size; i++) { Integer stageId = (Integer) jobStart.stageIds().apply(i); intStageIds[i] = stageId; stageIdToJobId.put(stageId, jobId); } jobIdToStageId.put(jobId, intStageIds); }
ProductionReference getRightCapture(ProductionReference previousRightCapture, ProductionReference outer, ProductionReference inner) { EnumSet<Fixity> fixity = getFixity(outer); int position = getPosition(inner, outer); if (position == outer.production().items().size() - 1 && fixity.contains(Fixity.BARE_RIGHT)) { return previousRightCapture; } else { return outer; } }
public static String getBrokers(String zkUrl, SecurityProtocol securityProtocol) { ZkUtils zkUtils = getZkUtils(zkUrl); Seq<Broker> brokersSeq = zkUtils.getAllBrokersInCluster(); Broker[] brokers = new Broker[brokersSeq.size()]; brokersSeq.copyToArray(brokers); String brokersStr = Arrays.stream(brokers) .map(b -> b.brokerEndPoint( ListenerName.forSecurityProtocol(securityProtocol)).connectionString()) .reduce(null, (a, b) -> (a == null) ? b : a + "," + b); return brokersStr; }
public static String getBrokers(String zkUrl, SecurityProtocol securityProtocol) { ZkUtils zkUtils = getZkUtils(zkUrl); Seq<Broker> brokersSeq = zkUtils.getAllBrokersInCluster(); Broker[] brokers = new Broker[brokersSeq.size()]; brokersSeq.copyToArray(brokers); String brokersStr = Arrays.stream(brokers) .map(b -> b.brokerEndPoint( ListenerName.forSecurityProtocol(securityProtocol)).connectionString()) .reduce(null, (a, b) -> (a == null) ? b : a + "," + b); return brokersStr; }
private EnumSet<Fixity> getFixity(ProductionReference t) { Production p = t.production(); EnumSet<Fixity> set = EnumSet.noneOf(Fixity.class); if (t instanceof Constant) { return set; } if (p.items().apply(0) instanceof NonTerminal) set.add(Fixity.BARE_LEFT); if (p.items().apply(p.items().size() - 1) instanceof NonTerminal) set.add(Fixity.BARE_RIGHT); return set; }
boolean hasTerminalAtIdx(Production p, int position) { if (position < 0 || position >= p.items().size()) { return false; } return p.items().apply(position) instanceof TerminalLike; }
public Either<java.util.Set<ParseFailedException>, Term> apply(TermCons tc) { if (tc.production().items().apply(tc.production().items().size() - 1) instanceof NonTerminal) { String msg = parent.production().klabel().get() + " is not allowed to be an immediate child of cast." + " Use parentheses: (x):Sort to set the proper scope of the operations."; KException kex = new KException(KException.ExceptionType.ERROR, KException.KExceptionGroup.CRITICAL, msg, tc.source().get(), tc.location().get()); return Left.apply(Sets.newHashSet(new PriorityException(kex))); } return Right.apply(tc); } }
public static WebInput webInput(WebInputCommand wi){ WebInput w=new WebInput(); DataField [] par=new DataField[(wi.params().size())]; for (int i=0;i<par.length;i++){ par[i]=dataField(wi.params().apply(i)); } w.setParameters(par); w.setName(wi.name()); return w; }