public static void stopServer(KafkaServerStartable serverStartable) { serverStartable.shutdown(); FileUtils.deleteQuietly(new File(serverStartable.serverConfig().logDirs().apply(0))); }
scala.collection.Seq<Object> replicas = partitionsToBeReassigned.apply(partition); for (int replicaIndex = 0; replicaIndex < replicas.size(); replicaIndex++) { Object replica = replicas.apply(replicaIndex); bldr.append(replica).append(",");
@Override public synchronized void onJobStart(SparkListenerJobStart jobStart) { int jobId = jobStart.jobId(); int size = jobStart.stageIds().size(); int[] intStageIds = new int[size]; for (int i = 0; i < size; i++) { Integer stageId = (Integer) jobStart.stageIds().apply(i); intStageIds[i] = stageId; stageIdToJobId.put(stageId, jobId); } jobIdToStageId.put(jobId, intStageIds); }
@Override public synchronized void onJobStart(SparkListenerJobStart jobStart) { int jobId = jobStart.jobId(); int size = jobStart.stageIds().size(); int[] intStageIds = new int[size]; for (int i = 0; i < size; i++) { Integer stageId = (Integer) jobStart.stageIds().apply(i); intStageIds[i] = stageId; stageIdToJobId.put(stageId, jobId); } jobIdToStageId.put(jobId, intStageIds); }
@Override public void onJobStart(SparkListenerJobStart jobStart) { synchronized (stageToJobId) { for (int i = 0; i < jobStart.stageIds().length(); i++) { stageToJobId.put((Integer) jobStart.stageIds().apply(i), jobStart.jobId()); } } }
/** * This broker's `metadata.broker.list` value. Example: `127.0.0.1:9092`. * * <p>You can use this to tell Kafka producers and consumers how to connect to this instance. * * <p>This version returns the port of the first listener. * @return the broker list */ String brokerList() { final ListenerName listenerName = kafka.config().advertisedListeners().apply(0).listenerName(); return kafka.config().hostName() + ":" + kafka.boundPort(listenerName); }
private static List<PartitionInfo> getPartitionInfo(KafkaZkClient zkClient, String topic) { scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic); scala.collection.Map<Object, scala.collection.Seq<Object>> partitionAssignments = zkClient.getPartitionAssignmentForTopics(topicList).apply(topic); List<PartitionInfo> partitionInfoList = new ArrayList<>(); scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = partitionAssignments.iterator(); while (it.hasNext()) { scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next(); Integer partition = (Integer) scalaTuple._1(); scala.Option<Object> leaderOption = zkClient.getLeaderForPartition(new TopicPartition(topic, partition)); Node leader = leaderOption.isEmpty() ? null : new Node((Integer) leaderOption.get(), "", -1); Node[] replicas = new Node[scalaTuple._2().size()]; for (int i = 0; i < replicas.length; i++) { Integer brokerId = (Integer) scalaTuple._2().apply(i); replicas[i] = new Node(brokerId, "", -1); } partitionInfoList.add(new PartitionInfo(topic, partition, leader, replicas, null)); } return partitionInfoList; }
public static void stopServer(KafkaServerStartable serverStartable) { serverStartable.shutdown(); FileUtils.deleteQuietly(new File(serverStartable.serverConfig().logDirs().apply(0))); }
@Override public Long next() { return (Long) seq.apply(index++); }
public static Map<String, String> urlDecode(String body,String encode) throws IOException{ Map<String, String> postData = Maps.newHashMap(); scala.collection.immutable.Map<String, Seq<String>> formData = FormUrlEncodedParser.parse(body,encode); Map<String, Seq<String>> map = JavaConversions.mapAsJavaMap(formData); for(Map.Entry<String, Seq<String>> entry : map.entrySet()){ postData.put(entry.getKey(), entry.getValue().apply(0)); } return postData; } }
private EnumSet<Fixity> getFixity(ProductionReference t) { Production p = t.production(); EnumSet<Fixity> set = EnumSet.noneOf(Fixity.class); if (t instanceof Constant) { return set; } if (p.items().apply(0) instanceof NonTerminal) set.add(Fixity.BARE_LEFT); if (p.items().apply(p.items().size() - 1) instanceof NonTerminal) set.add(Fixity.BARE_RIGHT); return set; }
@Override public synchronized void onJobStart(SparkListenerJobStart jobStart) { int jobId = jobStart.jobId(); int size = jobStart.stageIds().size(); int[] intStageIds = new int[size]; for (int i = 0; i < size; i++) { Integer stageId = (Integer) jobStart.stageIds().apply(i); intStageIds[i] = stageId; stageIdToJobId.put(stageId, jobId); } jobIdToStageId.put(jobId, intStageIds); }
@Override public synchronized void onJobStart(SparkListenerJobStart jobStart) { int jobId = jobStart.jobId(); int size = jobStart.stageIds().size(); int[] intStageIds = new int[size]; for (int i = 0; i < size; i++) { Integer stageId = (Integer) jobStart.stageIds().apply(i); intStageIds[i] = stageId; stageIdToJobId.put(stageId, jobId); } jobIdToStageId.put(jobId, intStageIds); }
@Override public void onJobStart(SparkListenerJobStart jobStart) { synchronized (stageToJobId) { for (int i = 0; i < jobStart.stageIds().length(); i++) { stageToJobId.put((Integer) jobStart.stageIds().apply(i), jobStart.jobId()); } } }
boolean hasTerminalAtIdx(Production p, int position) { if (position < 0 || position >= p.items().size()) { return false; } return p.items().apply(position) instanceof TerminalLike; }
Assert.assertEquals(bean.getB().length, result.length()); for (int i = 0; i < result.length(); i++) { Assert.assertEquals(bean.getB()[i], result.apply(i)); Assert.assertEquals(bean.getD().size(), d.length()); for (int i = 0; i < d.length(); i++) { Assert.assertEquals(bean.getD().get(i), d.apply(i));
Assert.assertEquals(bean.getB().length, result.length()); for (int i = 0; i < result.length(); i++) { Assert.assertEquals(bean.getB()[i], result.apply(i)); Assert.assertEquals(bean.getD().size(), d.length()); for (int i = 0; i < d.length(); i++) { Assert.assertEquals(bean.getD().get(i), d.apply(i));
Assert.assertEquals(bean.getB().length, result.length()); for (int i = 0; i < result.length(); i++) { Assert.assertEquals(bean.getB()[i], result.apply(i)); Assert.assertEquals(bean.getD().size(), d.length()); for (int i = 0; i < d.length(); i++) { Assert.assertEquals(bean.getD().get(i), d.apply(i));
public Either<java.util.Set<ParseFailedException>, Term> apply(TermCons tc) { if (tc.production().items().apply(tc.production().items().size() - 1) instanceof NonTerminal) { String msg = parent.production().klabel().get() + " is not allowed to be an immediate child of cast." + " Use parentheses: (x):Sort to set the proper scope of the operations."; KException kex = new KException(KException.ExceptionType.ERROR, KException.KExceptionGroup.CRITICAL, msg, tc.source().get(), tc.location().get()); return Left.apply(Sets.newHashSet(new PriorityException(kex))); } return Right.apply(tc); } }
public static WebInput webInput(WebInputCommand wi){ WebInput w=new WebInput(); DataField [] par=new DataField[(wi.params().size())]; for (int i=0;i<par.length;i++){ par[i]=dataField(wi.params().apply(i)); } w.setParameters(par); w.setName(wi.name()); return w; }