/** * Converts a Java Collection to a Scala Seq. * * @param javaCollection the java collection * @param <A> the type of Seq element * @return the scala Seq. */ public static <A> scala.collection.immutable.Seq<A> asScala(Collection<A> javaCollection) { return scala.collection.JavaConverters.collectionAsScalaIterableConverter(javaCollection).asScala().toList(); }
@Test public void combineByKey() { JavaRDD<Integer> originalRDD = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6)); Function<Integer, Integer> keyFunction = v1 -> v1 % 3; Function<Integer, Integer> createCombinerFunction = v1 -> v1; Function2<Integer, Integer, Integer> mergeValueFunction = (v1, v2) -> v1 + v2; JavaPairRDD<Integer, Integer> combinedRDD = originalRDD.keyBy(keyFunction) .combineByKey(createCombinerFunction, mergeValueFunction, mergeValueFunction); Map<Integer, Integer> results = combinedRDD.collectAsMap(); ImmutableMap<Integer, Integer> expected = ImmutableMap.of(0, 9, 1, 5, 2, 7); assertEquals(expected, results); Partitioner defaultPartitioner = Partitioner.defaultPartitioner( combinedRDD.rdd(), JavaConverters.collectionAsScalaIterableConverter( Collections.<RDD<?>>emptyList()).asScala().toSeq()); combinedRDD = originalRDD.keyBy(keyFunction) .combineByKey( createCombinerFunction, mergeValueFunction, mergeValueFunction, defaultPartitioner, false, new KryoSerializer(new SparkConf())); results = combinedRDD.collectAsMap(); assertEquals(expected, results); }
@Override public com.google.common.base.Optional<Model> modelFor(ModelContext modelContext) { ResolvedType propertiesHost = alternateTypeProvider.alternateFor(modelContext.resolvedType(resolver)); if (isContainerType(propertiesHost) || propertiesHost.getErasedType().isEnum() || Types.isBaseType(Types.typeNameFor(propertiesHost.getErasedType()))) { return Optional.absent(); } Map<String, ModelProperty> properties = newLinkedHashMap(); int index = 0; for (com.mangofactory.swagger.models.property.ModelProperty each : properties(modelContext, propertiesHost)) { properties.put(each.getName(), new ModelProperty(each.typeName(modelContext), each.qualifiedTypeName(), index, each.isRequired(), each.propertyDescription(), each.allowableValues(), itemModelRef(each.getType()) )); } return Optional.of(new Model(typeName(propertiesHost), typeName(propertiesHost), simpleQualifiedTypeName(propertiesHost), toScalaLinkedHashMap(properties), modelDescription(propertiesHost), Option.apply(""), Option.<String>empty(), collectionAsScalaIterable(new ArrayList<String>()).toList())); }
public static Object scalaIterableCheck(Object array, Schema schema) { Class collectionClass = ScalaSafeReflectData.getClassProp(schema, ScalaSafeReflectData.CLASS_PROP); if (collectionClass != null) { if (scala.collection.Iterable.class.isAssignableFrom(collectionClass)) { scala.collection.Iterable it = toIter(array); if (scala.collection.immutable.List.class.isAssignableFrom(collectionClass)) { return it.toList(); } if (scala.collection.mutable.Buffer.class.isAssignableFrom(collectionClass)) { return it.toBuffer(); } if (scala.collection.immutable.Set.class.isAssignableFrom(collectionClass)) { return it.toSet(); } return it; } } return array; }
int asyncTaskCount = taskInstances.values().count(new AbstractFunction1<TaskInstance, Object>() { @Override public Boolean apply(TaskInstance t) {
/** * Generates a Stream that traverses a scala.collection.Iterable. * <p> * Only sequential operations will be efficient. * For efficient parallel operation, use the streamAccumulated method instead, * but note that this creates a copy of the contents of the Iterable * * @param coll The scala.collection.Iterable to traverse * @return A Stream view of the collection which, by default, executes sequentially. */ public static <T> Stream<T> stream(scala.collection.Iterable<T> coll) { return StreamSupport.stream(new StepsAnyIterator<T>(coll.iterator()), false); }
@SuppressWarnings("unchecked") public static <T, U extends T> Seq<U> convertIterable(Iterable<T> els) { return scala.collection.JavaConverters.iterableAsScalaIterableConverter((Iterable<U>)els).asScala().toVector(); } public static <T, U extends T> Seq<U> convertArray(T[] els) {
int asyncTaskCount = taskInstances.values().count(new AbstractFunction1<TaskInstance, Object>() { @Override public Boolean apply(TaskInstance t) {
/** * Generates a Stream that traverses a scala.collection.Iterable. * <p> * Only sequential operations will be efficient. * For efficient parallel operation, use the streamAccumulated method instead, * but note that this creates a copy of the contents of the Iterable * * @param coll The scala.collection.Iterable to traverse * @return A Stream view of the collection which, by default, executes sequentially. */ public static <T> Stream<T> stream(scala.collection.Iterable<T> coll) { return StreamSupport.stream(new StepsAnyIterator<T>(coll.iterator()), false); }
@SuppressWarnings("unchecked") public static <T, U extends T> Seq<U> convertIterable(Iterable<T> els) { return scala.collection.JavaConverters.iterableAsScalaIterableConverter((Iterable<U>)els).asScala().toVector(); } public static <T, U extends T> Seq<U> convertArray(T[] els) {
@Test public void combineByKey() { JavaRDD<Integer> originalRDD = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6)); Function<Integer, Integer> keyFunction = v1 -> v1 % 3; Function<Integer, Integer> createCombinerFunction = v1 -> v1; Function2<Integer, Integer, Integer> mergeValueFunction = (v1, v2) -> v1 + v2; JavaPairRDD<Integer, Integer> combinedRDD = originalRDD.keyBy(keyFunction) .combineByKey(createCombinerFunction, mergeValueFunction, mergeValueFunction); Map<Integer, Integer> results = combinedRDD.collectAsMap(); ImmutableMap<Integer, Integer> expected = ImmutableMap.of(0, 9, 1, 5, 2, 7); assertEquals(expected, results); Partitioner defaultPartitioner = Partitioner.defaultPartitioner( combinedRDD.rdd(), JavaConverters.collectionAsScalaIterableConverter( Collections.<RDD<?>>emptyList()).asScala().toSeq()); combinedRDD = originalRDD.keyBy(keyFunction) .combineByKey( createCombinerFunction, mergeValueFunction, mergeValueFunction, defaultPartitioner, false, new KryoSerializer(new SparkConf())); results = combinedRDD.collectAsMap(); assertEquals(expected, results); }
/** * Converts a Java Collection to a Scala Seq. * * @param javaCollection the java collection * @param <A> the type of Seq element * @return the scala Seq. */ public static <A> scala.collection.immutable.Seq<A> asScala(Collection<A> javaCollection) { return scala.collection.JavaConverters.collectionAsScalaIterableConverter(javaCollection).asScala().toList(); }
int asyncTaskCount = taskInstances.values().count(new AbstractFunction1<TaskInstance, Object>() { @Override public Boolean apply(TaskInstance t) {
/** * Generates a Stream that traverses a scala.collection.Iterable. * <p> * Only sequential operations will be efficient. * For efficient parallel operation, use the streamAccumulated method instead, * but note that this creates a copy of the contents of the Iterable * * @param coll The scala.collection.Iterable to traverse * @return A Stream view of the collection which, by default, executes sequentially. */ public static <T> Stream<T> stream(scala.collection.Iterable<T> coll) { return StreamSupport.stream(new StepsAnyIterator<T>(coll.iterator()), false); }
@SuppressWarnings("unchecked") public static <T, U extends T> Seq<U> convertIterable(Iterable<T> els) { return scala.collection.JavaConverters.iterableAsScalaIterableConverter((Iterable<U>)els).asScala().toVector(); } public static <T, U extends T> Seq<U> convertArray(T[] els) {
@Test public void combineByKey() { JavaRDD<Integer> originalRDD = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6)); Function<Integer, Integer> keyFunction = v1 -> v1 % 3; Function<Integer, Integer> createCombinerFunction = v1 -> v1; Function2<Integer, Integer, Integer> mergeValueFunction = (v1, v2) -> v1 + v2; JavaPairRDD<Integer, Integer> combinedRDD = originalRDD.keyBy(keyFunction) .combineByKey(createCombinerFunction, mergeValueFunction, mergeValueFunction); Map<Integer, Integer> results = combinedRDD.collectAsMap(); ImmutableMap<Integer, Integer> expected = ImmutableMap.of(0, 9, 1, 5, 2, 7); assertEquals(expected, results); Partitioner defaultPartitioner = Partitioner.defaultPartitioner( combinedRDD.rdd(), JavaConverters.collectionAsScalaIterableConverter( Collections.<RDD<?>>emptyList()).asScala().toSeq()); combinedRDD = originalRDD.keyBy(keyFunction) .combineByKey( createCombinerFunction, mergeValueFunction, mergeValueFunction, defaultPartitioner, false, new KryoSerializer(new SparkConf())); results = combinedRDD.collectAsMap(); assertEquals(expected, results); }
/** * Converts a Java Collection to a Scala Seq. * * @param javaCollection the java collection * @param <A> the type of Seq element * @return the scala Seq. */ public static <A> scala.collection.immutable.Seq<A> asScala(Collection<A> javaCollection) { return scala.collection.JavaConverters.collectionAsScalaIterableConverter(javaCollection).asScala().toList(); }
int asyncTaskCount = taskInstances.values().count(new AbstractFunction1<TaskInstance, Object>() { @Override public Boolean apply(TaskInstance t) {
/** * Generates a LongStream that traverses a long-valued scala.collection.Iterable. * <p> * Only sequential operations will be efficient. * For efficient parallel operation, use the longStreamAccumulated method instead, * but note that this creates a copy of the contents of the Iterable. * * @param coll The scala.collection.Iterable to traverse * @return A LongStream view of the collection which, by default, executes sequentially. */ public static LongStream longStream(scala.collection.Iterable<Long> coll) { scala.collection.Iterator iter = (scala.collection.Iterator)coll.iterator(); return StreamSupport.longStream(new StepsLongIterator(iter), false); }
@SuppressWarnings("unchecked") public static <T, U extends T> Seq<U> convertIterable(Iterable<T> els) { return scala.collection.JavaConverters.iterableAsScalaIterableConverter((Iterable<U>)els).asScala().toVector(); } @SuppressWarnings("unchecked")