public Stream multiReduce(List<Fields> inputFields, List<Stream> streams, MultiReducer function, Fields outputFields) { List<String> names = new ArrayList<>(); for (Stream s : streams) { if (s._name != null) { names.add(s._name); } } Node n = new ProcessorNode(getUniqueStreamId(), Utils.join(names, "-"), outputFields, outputFields, new MultiReducerProcessor(inputFields, function)); return addSourcedNode(streams, n); }
public Stream stateQuery(TridentState state, Fields inputFields, QueryFunction function, Fields functionFields) { projectionValidation(inputFields); String stateId = state._node.stateInfo.id; Node n = new ProcessorNode(_topology.getUniqueStreamId(), _name, TridentUtils.fieldsConcat(getOutputFields(), functionFields), functionFields, new StateQueryProcessor(stateId, inputFields, function)); _topology._colocate.get(stateId).add(n); return _topology.addSourcedNode(this, n); }
@Override public Stream partitionAggregate(Fields inputFields, Aggregator agg, Fields functionFields) { projectionValidation(inputFields); return _topology.addSourcedNode(this, new ProcessorNode(_topology.getUniqueStreamId(), _name, functionFields, functionFields, new AggregateProcessor(inputFields, agg))); }
public TridentState partitionPersist(StateSpec stateSpec, Fields inputFields, StateUpdater updater, Fields functionFields) { projectionValidation(inputFields); String id = _topology.getUniqueStateId(); ProcessorNode n = new ProcessorNode(_topology.getUniqueStreamId(), _name, functionFields, functionFields, new PartitionPersistProcessor(id, inputFields, updater)); n.committer = true; n.stateInfo = new NodeStateInfo(id, stateSpec); return _topology.addSourcedStateNode(this, n); }
@Override public Stream each(Fields inputFields, Function function, Fields functionFields) { projectionValidation(inputFields); return _topology.addSourcedNode(this, new ProcessorNode(_topology.getUniqueStreamId(), _name, TridentUtils.fieldsConcat(getOutputFields(), functionFields), functionFields, new EachProcessor(inputFields, function))); }
/** * Filters out fields from a stream, resulting in a Stream containing only the fields specified by `keepFields`. * * For example, if you had a Stream `mystream` containing the fields `["a", "b", "c","d"]`, calling" * * ```java mystream.project(new Fields("b", "d")) ``` * * would produce a stream containing only the fields `["b", "d"]`. * * @param keepFields The fields in the Stream to keep * @return */ public Stream project(Fields keepFields) { projectionValidation(keepFields); return _topology.addSourcedNode(this, new ProcessorNode(_topology.getUniqueStreamId(), _name, keepFields, new Fields(), new ProjectedProcessor(keepFields))); }
/** * Returns a stream consisting of the result of applying the given mapping function to the values of this stream. This method replaces * old output fields with new output fields, achieving T -> V conversion. * * @param function a mapping function to be applied to each value in this stream. * @param outputFields new output fields * @return the new stream */ public Stream map(MapFunction function, Fields outputFields) { projectionValidation(getOutputFields()); return _topology.addSourcedNode(this, new ProcessorNode( _topology.getUniqueStreamId(), _name, outputFields, outputFields, new MapProcessor(getOutputFields(), new MapFunctionExecutor(function)))); }
/** * Returns a stream consisting of the results of replacing each value of this stream with the contents produced by applying the provided * mapping function to each value. This has the effect of applying a one-to-many transformation to the values of the stream, and then * flattening the resulting elements into a new stream. This method replaces old output fields with new output fields, achieving T -> V * conversion. * * @param function a mapping function to be applied to each value in this stream which produces new values. * @param outputFields new output fields * @return the new stream */ public Stream flatMap(FlatMapFunction function, Fields outputFields) { projectionValidation(getOutputFields()); return _topology.addSourcedNode(this, new ProcessorNode( _topology.getUniqueStreamId(), _name, outputFields, outputFields, new MapProcessor(getOutputFields(), new FlatMapFunctionExecutor(function)))); }
/** * Returns a stream consisting of the result of applying the given mapping function to the values of this stream. * * @param function a mapping function to be applied to each value in this stream. * @return the new stream */ public Stream map(MapFunction function) { projectionValidation(getOutputFields()); return _topology.addSourcedNode(this, new ProcessorNode( _topology.getUniqueStreamId(), _name, getOutputFields(), getOutputFields(), new MapProcessor(getOutputFields(), new MapFunctionExecutor(function)))); }
/** * Returns a stream consisting of the results of replacing each value of this stream with the contents produced by applying the provided * mapping function to each value. This has the effect of applying a one-to-many transformation to the values of the stream, and then * flattening the resulting elements into a new stream. * * @param function a mapping function to be applied to each value in this stream which produces new values. * @return the new stream */ public Stream flatMap(FlatMapFunction function) { projectionValidation(getOutputFields()); return _topology.addSourcedNode(this, new ProcessorNode( _topology.getUniqueStreamId(), _name, getOutputFields(), getOutputFields(), new MapProcessor(getOutputFields(), new FlatMapFunctionExecutor(function)))); }
/** * Returns a stream consisting of the trident tuples of this stream, additionally performing the provided action on each trident tuple * as they are consumed from the resulting stream. This is mostly useful for debugging to see the tuples as they flow past a certain * point in a pipeline. * * @param action the action to perform on the trident tuple as they are consumed from the stream * @return the new stream */ public Stream peek(Consumer action) { projectionValidation(getOutputFields()); return _topology.addSourcedNode(this, new ProcessorNode( _topology.getUniqueStreamId(), _name, getOutputFields(), getOutputFields(), new MapProcessor(getOutputFields(), new ConsumerExecutor(action)))); }
private Node makeIdentityNode(Fields allOutputFields) { return new ProcessorNode(getUniqueStreamId(), null, allOutputFields, new Fields(), new EachProcessor(new Fields(), new FilterExecutor(new TrueFilter()))); }
private Stream window(WindowConfig windowConfig, WindowsStoreFactory windowStoreFactory, Fields inputFields, Aggregator aggregator, Fields functionFields, boolean storeTuplesInStore) { projectionValidation(inputFields); windowConfig.validate(); Fields fields = addTriggerField(functionFields); // when storeTuplesInStore is false then the given windowStoreFactory is only used to store triggers and // that store is passed to WindowStateUpdater to remove them after committing the batch. Stream stream = _topology.addSourcedNode(this, new ProcessorNode(_topology.getUniqueStreamId(), _name, fields, fields, new WindowTridentProcessor(windowConfig, _topology.getUniqueWindowId(), windowStoreFactory, inputFields, aggregator, storeTuplesInStore))); Stream effectiveStream = stream.project(functionFields); // create StateUpdater with the given windowStoreFactory to remove triggered aggregation results form store // when they are successfully processed. StateFactory stateFactory = new WindowsStateFactory(); StateUpdater stateUpdater = new WindowsStateUpdater(windowStoreFactory); stream.partitionPersist(stateFactory, new Fields(WindowTridentProcessor.TRIGGER_FIELD_NAME), stateUpdater, new Fields()); return effectiveStream; }
public Stream multiReduce(List<Fields> inputFields, List<Stream> streams, MultiReducer function, Fields outputFields) { List<String> names = new ArrayList<>(); for(Stream s: streams) { if(s._name!=null) { names.add(s._name); } } Node n = new ProcessorNode(getUniqueStreamId(), Utils.join(names, "-"), outputFields, outputFields, new MultiReducerProcessor(inputFields, function)); return addSourcedNode(streams, n); }
public Stream stateQuery(TridentState state, Fields inputFields, QueryFunction function, Fields functionFields) { projectionValidation(inputFields); String stateId = state._node.stateInfo.id; Node n = new ProcessorNode(_topology.getUniqueStreamId(), _name, TridentUtils.fieldsConcat(getOutputFields(), functionFields), functionFields, new StateQueryProcessor(stateId, inputFields, function)); _topology._colocate.get(stateId).add(n); return _topology.addSourcedNode(this, n); }
@Override public Stream partitionAggregate(Fields inputFields, Aggregator agg, Fields functionFields) { projectionValidation(inputFields); return _topology.addSourcedNode(this, new ProcessorNode(_topology.getUniqueStreamId(), _name, functionFields, functionFields, new AggregateProcessor(inputFields, agg))); }
@Override public Stream each(Fields inputFields, Function function, Fields functionFields) { projectionValidation(inputFields); return _topology.addSourcedNode(this, new ProcessorNode(_topology.getUniqueStreamId(), _name, TridentUtils.fieldsConcat(getOutputFields(), functionFields), functionFields, new EachProcessor(inputFields, function))); } //creates brand new tuples with brand new fields
public TridentState partitionPersist(StateSpec stateSpec, Fields inputFields, StateUpdater updater, Fields functionFields) { projectionValidation(inputFields); String id = _topology.getUniqueStateId(); ProcessorNode n = new ProcessorNode(_topology.getUniqueStreamId(), _name, functionFields, functionFields, new PartitionPersistProcessor(id, inputFields, updater)); n.committer = true; n.stateInfo = new NodeStateInfo(id, stateSpec); return _topology.addSourcedStateNode(this, n); }
/** * Returns a stream consisting of the results of replacing each value of this stream with the contents * produced by applying the provided mapping function to each value. This has the effect of applying * a one-to-many transformation to the values of the stream, and then flattening the resulting elements into a new stream. * * @param function a mapping function to be applied to each value in this stream which produces new values. * @return the new stream */ public Stream flatMap(FlatMapFunction function) { projectionValidation(getOutputFields()); return _topology.addSourcedNode(this, new ProcessorNode( _topology.getUniqueStreamId(), _name, getOutputFields(), getOutputFields(), new MapProcessor(getOutputFields(), new FlatMapFunctionExecutor(function)))); }
private Node makeIdentityNode(Fields allOutputFields) { return new ProcessorNode(getUniqueStreamId(), null, allOutputFields, new Fields(), new EachProcessor(new Fields(), new FilterExecutor(new TrueFilter()))); }