private static Seq<String> gaugeName(String name) { return scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(name)).toList(); }
@Override public Iterable<Notification> getNotifications() { return JavaConversions.asJavaIterable( inner.notifications() ); }
@Override public void consume(Iterator<KeyMessage<String, U>> updateIterator, Configuration hadoopConf) { scalaManager.consume(JavaConversions.asScalaIterator(updateIterator), hadoopConf); }
/** * Read number of partitions for the given topic on the specified zookeeper * @param zkUrl zookeeper connection url * @param topic topic name * * @return the number of partitions of the given topic */ public static int getPartitionNumForTopic(String zkUrl, String topic) { ZkUtils zkUtils = ZkUtils.apply(zkUrl, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled()); try { Seq<String> topics = scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(topic)); return zkUtils.getPartitionsForTopics(topics).apply(topic).size(); } catch (NoSuchElementException e) { return 0; } finally { zkUtils.close(); } }
/** * This convenience method should only be called in test code. */ @VisibleForTesting public void write(Iterator<Product2<K, V>> records) throws IOException { write(JavaConverters.asScalaIteratorConverter(records).asScala()); }
StringBuilder bldr = new StringBuilder(); bldr.append("{\"version\":1,\"partitions\":[\n"); for (int partition = 0; partition < partitionsToBeReassigned.size(); partition++) { bldr.append(" {\"topic\":\"").append(topic).append("\",\"partition\":").append(partition).append(",\"replicas\":["); scala.collection.Seq<Object> replicas = partitionsToBeReassigned.apply(partition); for (int replicaIndex = 0; replicaIndex < replicas.size(); replicaIndex++) { Object replica = replicas.apply(replicaIndex); bldr.append(replica).append(",");
@Override public synchronized void onJobStart(SparkListenerJobStart jobStart) { int jobId = jobStart.jobId(); int size = jobStart.stageIds().size(); int[] intStageIds = new int[size]; for (int i = 0; i < size; i++) { Integer stageId = (Integer) jobStart.stageIds().apply(i); intStageIds[i] = stageId; stageIdToJobId.put(stageId, jobId); } jobIdToStageId.put(jobId, intStageIds); }
/** * @param zkUrl zookeeper connection url * @return number of brokers in this cluster */ public static int getBrokerCount(String zkUrl) { ZkUtils zkUtils = ZkUtils.apply(zkUrl, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled()); try { return zkUtils.getAllBrokersInCluster().size(); } finally { zkUtils.close(); } }
/** * This broker's `metadata.broker.list` value. Example: `127.0.0.1:9092`. * * <p>You can use this to tell Kafka producers and consumers how to connect to this instance. * * <p>This version returns the port of the first listener. * @return the broker list */ String brokerList() { final ListenerName listenerName = kafka.config().advertisedListeners().apply(0).listenerName(); return kafka.config().hostName() + ":" + kafka.boundPort(listenerName); }
@Override public int getExecutorCount() { return sc.sc().getExecutorMemoryStatus().size(); }
/** * This convenience method should only be called in test code. */ @VisibleForTesting public void write(Iterator<Product2<K, V>> records) throws IOException { write(JavaConverters.asScalaIteratorConverter(records).asScala()); }
@Override public synchronized void onJobStart(SparkListenerJobStart jobStart) { int jobId = jobStart.jobId(); int size = jobStart.stageIds().size(); int[] intStageIds = new int[size]; for (int i = 0; i < size; i++) { Integer stageId = (Integer) jobStart.stageIds().apply(i); intStageIds[i] = stageId; stageIdToJobId.put(stageId, jobId); } jobIdToStageId.put(jobId, intStageIds); }
public Iterable<LogSegment> getSegments() { return JavaConversions.asJavaIterable(kafkaLog.logSegments()); }
@Override public int getExecutorCount() { return sc.sc().getExecutorMemoryStatus().size(); }
@Override public void consume(Iterator<KeyMessage<String,U>> updateIterator, Configuration hadoopConf) { scalaManager.consume(JavaConversions.asScalaIterator(updateIterator), hadoopConf); }
/** * This convenience method should only be called in test code. */ @VisibleForTesting public void write(Iterator<Product2<K, V>> records) throws IOException { write(JavaConverters.asScalaIteratorConverter(records).asScala()); }
/** * Returns the first valid offset in the entire journal. * * @return first offset */ public long getLogStartOffset() { final Iterable<LogSegment> logSegments = JavaConversions.asJavaIterable(kafkaLog.logSegments()); final LogSegment segment = Iterables.getFirst(logSegments, null); if (segment == null) { return 0; } return segment.baseOffset(); }
public void toString( PrintWriter writer ) { inner.dumpToString( writer ); // legacy method - don't convert exceptions... for ( Notification notification : JavaConversions.asJavaIterable( inner.notifications() ) ) { writer.println( notification.getDescription() ); } }
@Override public Iterable<U> buildUpdates(JavaPairRDD<K, M> newData) { return JavaConversions.asJavaIterable(scalaManager.buildUpdates(newData.rdd())); }