@Override public IAggregatableStream aggPartition(GroupedStream s) { return new GroupedStream(s._stream.partitionBy(_groupFields), _groupFields); }
public Stream stateQuery(TridentState state, Fields inputFields, QueryFunction function, Fields functionFields) { return _stream.partitionBy(_groupFields).stateQuery(state, inputFields, function, functionFields); }
public Stream multiReduce(List<Fields> inputFields, List<GroupedStream> groupedStreams, GroupedMultiReducer function, Fields outputFields) { List<Fields> fullInputFields = new ArrayList<>(); List<Stream> streams = new ArrayList<>(); List<Fields> fullGroupFields = new ArrayList<>(); for(int i=0; i<groupedStreams.size(); i++) { GroupedStream gs = groupedStreams.get(i); Fields groupFields = gs.getGroupFields(); fullGroupFields.add(groupFields); streams.add(gs.toStream().partitionBy(groupFields)); fullInputFields.add(TridentUtils.fieldsUnion(groupFields, inputFields.get(i))); } return multiReduce(fullInputFields, streams, new GroupedMultiReducerExecutor(function, fullGroupFields, inputFields), outputFields); }
public TridentState persistentAggregate(StateSpec spec, Fields inputFields, ReducerAggregator agg, Fields functionFields) { return _stream.partitionBy(_groupFields).partitionPersist(spec, TridentUtils.fieldsUnion(_groupFields, inputFields), new MapReducerAggStateUpdater(agg, _groupFields, inputFields), TridentUtils.fieldsConcat(_groupFields, functionFields)); }
public Stream stateQuery(TridentState state, Fields inputFields, QueryFunction function, Fields functionFields) { return _stream.partitionBy(_groupFields) .stateQuery(state, inputFields, function, functionFields); }
@Override public IAggregatableStream aggPartition(GroupedStream s) { return new GroupedStream(s._stream.partitionBy(_groupFields), _groupFields); }
@Override public IAggregatableStream aggPartition(GroupedStream s) { return new GroupedStream(s._stream.partitionBy(_groupFields), _groupFields); }
public Stream stateQuery(TridentState state, Fields inputFields, QueryFunction function, Fields functionFields) { return _stream.partitionBy(_groupFields).stateQuery(state, inputFields, function, functionFields); }
public Stream multiReduce(List<Fields> inputFields, List<GroupedStream> groupedStreams, GroupedMultiReducer function, Fields outputFields) { List<Fields> fullInputFields = new ArrayList<Fields>(); List<Stream> streams = new ArrayList<Stream>(); List<Fields> fullGroupFields = new ArrayList<Fields>(); for(int i=0; i<groupedStreams.size(); i++) { GroupedStream gs = groupedStreams.get(i); Fields groupFields = gs.getGroupFields(); fullGroupFields.add(groupFields); streams.add(gs.toStream().partitionBy(groupFields)); fullInputFields.add(TridentUtils.fieldsUnion(groupFields, inputFields.get(i))); } return multiReduce(fullInputFields, streams, new GroupedMultiReducerExecutor(function, fullGroupFields, inputFields), outputFields); }
public Stream multiReduce(List<Fields> inputFields, List<GroupedStream> groupedStreams, GroupedMultiReducer function, Fields outputFields) { List<Fields> fullInputFields = new ArrayList<>(); List<Stream> streams = new ArrayList<>(); List<Fields> fullGroupFields = new ArrayList<>(); for(int i=0; i<groupedStreams.size(); i++) { GroupedStream gs = groupedStreams.get(i); Fields groupFields = gs.getGroupFields(); fullGroupFields.add(groupFields); streams.add(gs.toStream().partitionBy(groupFields)); fullInputFields.add(TridentUtils.fieldsUnion(groupFields, inputFields.get(i))); } return multiReduce(fullInputFields, streams, new GroupedMultiReducerExecutor(function, fullGroupFields, inputFields), outputFields); }
public TridentState persistentAggregate(StateSpec spec, Fields inputFields, ReducerAggregator agg, Fields functionFields) { return _stream.partitionBy(_groupFields).partitionPersist(spec, TridentUtils.fieldsUnion(_groupFields, inputFields), new MapReducerAggStateUpdater(agg, _groupFields, inputFields), TridentUtils.fieldsConcat(_groupFields, functionFields)); }
public TridentState persistentAggregate(StateSpec spec, Fields inputFields, ReducerAggregator agg, Fields functionFields) { return _stream.partitionBy(_groupFields) .partitionPersist(spec, TridentUtils.fieldsUnion(_groupFields, inputFields), new MapReducerAggStateUpdater(agg, _groupFields, inputFields), TridentUtils.fieldsConcat(_groupFields, functionFields)); }
public static StormTopology buildTopology(LocalDRPC drpc) throws IOException { FakeTweetsBatchSpout spout = new FakeTweetsBatchSpout(); TridentTopology topology = new TridentTopology(); topology.newStream("spout", spout) .parallelismHint(2) .partitionBy(new Fields("actor")) // .shuffle() .each(new Fields("actor", "text"), new PerActorTweetsFilter("dave")).parallelismHint(5) .each(new Fields("actor", "text"), new Utils.PrintFilter()); return topology.build(); }
stream = stream.partitionBy( new Fields(parseFieldsList(connector.getGroupingRef())));
.partitionBy(new Fields("location")) .partitionAggregate(new Fields("location"), new StringCounter(), new Fields("aggregated_result")) .parallelismHint(3) .partitionBy(new Fields("location")) .partitionAggregate(new Fields("location"), new StringCounter(), new Fields("count_map")) .each(new Fields("count_map"), new HasSpain())
new ExtractDomain(), new Fields("domain")) .partitionBy(new Fields("domain", "user")) .partitionPersist( new MemoryMapState.Factory(),
topology.newStream("parallel_and_partitioned", spout).partitionBy(new Fields("actor")) .each(new Fields("text", "actor"), new PereTweetsFilter()).parallelismHint(5) .each(new Fields("text", "actor"), new Utils.PrintFilter());