@Override public void prepare(Map<String, Object> topoConf, TopologyContext context, TridentContext tridentContext) { this.topologyContext = context; List<TridentTuple.Factory> parents = tridentContext.getParentTupleFactories(); if (parents.size() != 1) { throw new RuntimeException("Aggregation related operation can only have one parent"); } Long maxTuplesCacheSize = getWindowTuplesCacheSize(topoConf); this.tridentContext = tridentContext; collector = new FreshCollector(tridentContext); projection = new TridentTupleView.ProjectionFactory(parents.get(0), inputFields); windowStore = windowStoreFactory.create(topoConf); windowTaskId = windowId + WindowsStore.KEY_SEPARATOR + topologyContext.getThisTaskId() + WindowsStore.KEY_SEPARATOR; windowTriggerInprocessId = getWindowTriggerInprocessIdPrefix(windowTaskId); tridentWindowManager = storeTuplesInStore ? new StoreBasedTridentWindowManager(windowConfig, windowTaskId, windowStore, aggregator, tridentContext.getDelegateCollector(), maxTuplesCacheSize, inputFields) : new InMemoryTridentWindowManager(windowConfig, windowTaskId, windowStore, aggregator, tridentContext.getDelegateCollector()); tridentWindowManager.prepare(); }
@Override public void execute(ProcessorContext processorContext, String streamId, TridentTuple tuple) { TridentTuple toEmit = _factory.create(tuple); for (TupleReceiver r : _context.getReceivers()) { r.execute(processorContext, _context.getOutStreamId(), toEmit); } }
@Override public void prepare(Map<String, Object> conf, TopologyContext context, TridentContext tridentContext) { List<Factory> parents = tridentContext.getParentTupleFactories(); _context = tridentContext; _streamToIndex = new HashMap<>(); List<String> parentStreams = tridentContext.getParentStreams(); for (int i = 0; i < parentStreams.size(); i++) { _streamToIndex.put(parentStreams.get(i), i); } _projectionFactories = new ProjectionFactory[_projectFields.size()]; for (int i = 0; i < _projectFields.size(); i++) { _projectionFactories[i] = new ProjectionFactory(parents.get(i), _projectFields.get(i)); } _collector = new FreshCollector(tridentContext); _reducer.prepare(conf, new TridentMultiReducerContext((List) Arrays.asList(_projectionFactories))); }
public AppendCollector(TridentContext context) { _triContext = context; _factory = new OperationOutputFactory(context.getParentTupleFactories().get(0), context.getSelfOutputFields()); }
@Override public void startBatch(ProcessorContext processorContext) { // initialize state for batch processorContext.state[tridentContext.getStateIndex()] = new ArrayList<TridentTuple>(); }
@Override public void prepare(Map<String, Object> conf, TopologyContext context, TridentContext tridentContext) { if (tridentContext.getParentTupleFactories().size() != 1) { throw new RuntimeException("Projection processor can only have one parent"); } _context = tridentContext; _factory = new ProjectionFactory(tridentContext.getParentTupleFactories().get(0), _projectFields); }
@Override public void reportError(Throwable t) { _triContext.getDelegateCollector().reportError(t); }
@Override public void flush() { for (TupleReceiver r : _context.getReceivers()) { r.flush(); } }
public FreshCollector(TridentContext context) { _triContext = context; _factory = new FreshOutputFactory(context.getSelfOutputFields()); }
TridentContext triContext = new TridentContext( pn.selfOutFields, parentFactories,
@Override public void startBatch(ProcessorContext processorContext) { processorContext.state[_context.getStateIndex()] = new ArrayList<TridentTuple>(); }
@Override public void prepare(Map<String, Object> conf, TopologyContext context, TridentContext tridentContext) { List<TridentTuple.Factory> parents = tridentContext.getParentTupleFactories(); if (parents.size() != 1) { throw new RuntimeException("Map operation can only have one parent"); } _context = tridentContext; _collector = new FreshCollector(tridentContext); _projection = new TridentTupleView.ProjectionFactory(parents.get(0), _inputFields); _function.prepare(conf, new TridentOperationContext(context, _projection)); }
public AppendCollector(TridentContext context) { _triContext = context; _factory = new OperationOutputFactory(context.getParentTupleFactories().get(0), context.getSelfOutputFields()); }
@Override public void reportError(Throwable t) { _triContext.getDelegateCollector().reportError(t); }
@Override public void flush() { for (TupleReceiver r : _triContext.getReceivers()) { r.flush(); } }
public FreshCollector(TridentContext context) { _triContext = context; _factory = new FreshOutputFactory(context.getSelfOutputFields()); }
TridentContext triContext = new TridentContext( pn.selfOutFields, parentFactories,
@Override public void execute(ProcessorContext processorContext, String streamId, TridentTuple tuple) { BatchState state = (BatchState) processorContext.state[_context.getStateIndex()]; state.tuples.add(tuple); state.args.add(_projection.create(tuple)); }
@Override public void emit(List<Object> values) { TridentTuple toEmit = _factory.create(values); for (TupleReceiver r : _triContext.getReceivers()) { r.execute(context, _triContext.getOutStreamId(), toEmit); } }
@Override public void prepare(Map<String, Object> conf, TopologyContext context, TridentContext tridentContext) { List<Factory> parents = tridentContext.getParentTupleFactories(); if (parents.size() != 1) { throw new RuntimeException("Each operation can only have one parent"); } _context = tridentContext; _collector = new AppendCollector(tridentContext); _projection = new ProjectionFactory(parents.get(0), _inputFields); _function.prepare(conf, new TridentOperationContext(context, _projection)); }