/** * This convenience method should only be called in test code. */ @VisibleForTesting public void write(Iterator<Product2<K, V>> records) throws IOException { write(JavaConverters.asScalaIteratorConverter(records).asScala()); }
/** * This convenience method should only be called in test code. */ @VisibleForTesting public void write(Iterator<Product2<K, V>> records) throws IOException { write(JavaConverters.asScalaIteratorConverter(records).asScala()); }
/** * This convenience method should only be called in test code. */ @VisibleForTesting public void write(Iterator<Product2<K, V>> records) throws IOException { write(JavaConverters.asScalaIteratorConverter(records).asScala()); }
JavaConverters.asScalaIteratorConverter(newAssignedReplica.iterator()).asScala().toSeq());
/** * Sets the schema using all the table columns before any column pruning has been done */ private void setSchema() { try (Connection conn = DriverManager.getConnection("jdbc:phoenix:" + zkUrl)) { List<ColumnInfo> columnInfos = PhoenixRuntime.generateColumnInfo(conn, tableName, null); Seq<ColumnInfo> columnInfoSeq = JavaConverters.asScalaIteratorConverter(columnInfos.iterator()).asScala().toSeq(); schema = SparkSchemaUtil.phoenixSchemaToCatalystSchema(columnInfoSeq, dateAsTimestamp); } catch (SQLException e) { throw new RuntimeException(e); } }
private List<TopicAndPartition> getTopicPartitions(String t) { List<TopicAndPartition> tpList = new ArrayList<>(); List<String> l = Arrays.asList(t); java.util.Map<String, Seq<Object>> tpMap = JavaConverters.mapAsJavaMapConverter( zkUtils.getPartitionsForTopics( JavaConverters.asScalaIteratorConverter(l.iterator()).asScala().toSeq())) .asJava(); if (tpMap != null) { ArrayList<Object> partitionLists = new ArrayList<>(JavaConverters.seqAsJavaListConverter(tpMap.get(t)).asJava()); tpList = partitionLists.stream().map(p -> new TopicAndPartition(t, (Integer) p)).collect(toList()); } return tpList; }