/** * Gets the type information of the data type of the input data set. * This method returns equivalent information as {@code getInput().getType()}. * * @return The input data type. */ public TypeInformation<IN> getInputType() { return this.input.getType(); }
/** * Gets the type information of the data type of the first input data set. * This method returns equivalent information as {@code getInput1().getType()}. * * @return The first input data type. */ public TypeInformation<IN1> getInput1Type() { return this.input1.getType(); }
/** * Gets the type information of the data type of the second input data set. * This method returns equivalent information as {@code getInput2().getType()}. * * @return The second input data type. */ public TypeInformation<IN2> getInput2Type() { return this.input2.getType(); }
/** * Create an operator that produces the union of the two given data sets. * * @param input1 The first data set to be unioned. * @param input2 The second data set to be unioned. */ public UnionOperator(DataSet<T> input1, DataSet<T> input2) { super(input1, input2, input1.getType()); }
public DistinctOperator(DataSet<T> input, Keys<T> keys) { super(input, input.getType()); if (keys == null) { throw new NullPointerException(); } this.keys = keys; }
public DefaultCross(DataSet<I1> input1, DataSet<I2> input2) { super(input1, input2, (CrossFunction<I1, I2, Tuple2<I1, I2>>) new DefaultCrossFunction<I1, I2>(), new TupleTypeInfo<Tuple2<I1, I2>>(input1.getType(), input2.getType())); if (input1 == null || input2 == null) { throw new NullPointerException(); } this.input1 = input1; this.input2 = input2; }
DeltaIteration(ExecutionEnvironment context, TypeInformation<ST> type, DataSet<ST> solutionSet, DataSet<WT> workset, Keys<ST> keys, int maxIterations) { initialSolutionSet = solutionSet; initialWorkset = workset; solutionSetPlaceholder = new SolutionSetPlaceHolder<ST>(context, solutionSet.getType(), this); worksetPlaceholder = new WorksetPlaceHolder<WT>(context, workset.getType()); this.keys = keys; this.maxIterations = maxIterations; }
protected DefaultJoin(DataSet<I1> input1, DataSet<I2> input2, Keys<I1> keys1, Keys<I2> keys2, JoinHint hint) { super(input1, input2, keys1, keys2, (JoinFunction<I1, I2, Tuple2<I1, I2>>) new DefaultJoinFunction<I1, I2>(), new TupleTypeInfo<Tuple2<I1, I2>>(input1.getType(), input2.getType()), hint); }
public FilterOperator(DataSet<T> input, FilterFunction<T> function) { super(input, input.getType()); this.function = function; extractSemanticAnnotationsFromUdf(function.getClass()); }
/** * * This is the case for a reduce-all case (in contrast to the reduce-per-group case). * * @param input * @param function */ public ReduceOperator(DataSet<IN> input, ReduceFunction<IN> function) { super(input, input.getType()); this.function = function; this.grouper = null; extractSemanticAnnotationsFromUdf(function.getClass()); }
/** * Closes the delta iteration. This method defines the end of the delta iteration's function. * * @param solutionSetDelta The delta for the solution set. The delta will be merged into the solution set at the end of * each iteration. * @param newWorkset The new workset (feedback data set) that will be fed back to the next iteration. * @return The DataSet that represents the result of the iteration, after the computation has terminated. * * @see DataSet#iterateDelta(DataSet, int, int...) */ public DataSet<ST> closeWith(DataSet<ST> solutionSetDelta, DataSet<WT> newWorkset) { return new DeltaIterationResultSet<ST, WT>(initialSolutionSet.getExecutionEnvironment(), initialSolutionSet.getType(), initialWorkset.getType(), this, solutionSetDelta, newWorkset, keys, maxIterations); }
public SortedGrouping(DataSet<T> set, Keys<T> keys, int field, Order order) { super(set, keys); if (!dataSet.getType().isTupleType()) { throw new InvalidProgramException("Specifying order keys via field positions is only valid for tuple data types"); } if (field >= dataSet.getType().getArity()) { throw new IllegalArgumentException("Order key out of tuple bounds."); } this.groupSortKeyPositions = new int[]{field}; this.groupSortOrders = new Order[]{order}; }
public FlatMapOperator(DataSet<IN> input, FlatMapFunction<IN, OUT> function) { super(input, TypeExtractor.getFlatMapReturnTypes(function, input.getType())); this.function = function; extractSemanticAnnotationsFromUdf(function.getClass()); }
public MapOperator(DataSet<IN> input, MapFunction<IN, OUT> function) { super(input, TypeExtractor.getMapReturnTypes(function, input.getType())); this.function = function; extractSemanticAnnotationsFromUdf(function.getClass()); }
/** * Constructor for a non-grouped reduce (all reduce). * * @param input The input data set to the groupReduce function. * @param function The user-defined GroupReduce function. */ public ReduceGroupOperator(DataSet<IN> input, GroupReduceFunction<IN, OUT> function) { super(input, TypeExtractor.getGroupReduceReturnTypes(function, input.getType())); this.function = function; this.grouper = null; checkCombinability(); }
public ReduceOperator(Grouping<IN> input, ReduceFunction<IN> function) { super(input.getDataSet(), input.getDataSet().getType()); this.function = function; this.grouper = input; extractSemanticAnnotationsFromUdf(function.getClass()); }
/** * Continues a CoGroup transformation and defines a {@link KeySelector} function for the first co-grouped {@link DataSet}.</br> * The KeySelector function is called for each element of the first DataSet and extracts a single * key value on which the DataSet is grouped. </br> * * @param keySelector The KeySelector function which extracts the key values from the DataSet on which it is grouped. * @return An incomplete CoGroup transformation. * Call {@link CoGroupOperatorSetsPredicate#equalTo()} to continue the CoGroup. * * @see KeySelector * @see DataSet */ public <K> CoGroupOperatorSetsPredicate where(KeySelector<I1, K> keyExtractor) { return new CoGroupOperatorSetsPredicate(new Keys.SelectorFunctionKeys<I1, K>(keyExtractor, input1.getType())); }
/** * Continues a CoGroup transformation. <br/> * Defines the {@link Tuple} fields of the first co-grouped {@link DataSet} that should be used as grouping keys.<br/> * <b>Note: Fields can only be selected as grouping keys on Tuple DataSets.</b><br/> * * @param fields The indexes of the Tuple fields of the first co-grouped DataSets that should be used as keys. * @return An incomplete CoGroup transformation. * Call {@link CoGroupOperatorSetsPredicate#equalTo()} to continue the CoGroup. * * @see Tuple * @see DataSet */ public CoGroupOperatorSetsPredicate where(int... fields) { return new CoGroupOperatorSetsPredicate(new Keys.FieldPositionKeys<I1>(fields, input1.getType())); }
/** * Continues a Join transformation and defines a {@link KeySelector} function for the first join {@link DataSet}.</br> * The KeySelector function is called for each element of the first DataSet and extracts a single * key value on which the DataSet is joined. </br> * * @param keySelector The KeySelector function which extracts the key values from the DataSet on which it is joined. * @return An incomplete Join transformation. * Call {@link JoinOperatorSetsPredicate#equalTo(int...)} or {@link JoinOperatorSetsPredicate#equalTo(KeySelector)} * to continue the Join. * * @see KeySelector * @see DataSet */ public <K extends Comparable<K>> JoinOperatorSetsPredicate where(KeySelector<I1, K> keySelector) { return new JoinOperatorSetsPredicate(new Keys.SelectorFunctionKeys<I1, K>(keySelector, input1.getType())); }
/** * Constructor for a grouped reduce. * * @param input The grouped input to be processed group-wise by the groupReduce function. * @param function The user-defined GroupReduce function. */ public ReduceGroupOperator(Grouping<IN> input, GroupReduceFunction<IN, OUT> function) { super(input != null ? input.getDataSet() : null, TypeExtractor.getGroupReduceReturnTypes(function, input.getDataSet().getType())); this.function = function; this.grouper = input; checkCombinability(); extractSemanticAnnotationsFromUdf(function.getClass()); }