public boolean shouldHandle(Tuple tuple) { if (TupleUtils.isTick(tuple)) { LOG.debug("TICK received! current batch status [{}/{}]", tupleBatch.size(), batchSize); forceFlush = true; return false; } else { return true; } }
@Override public void execute(Tuple tuple) { if (TupleUtils.isTick(tuple)) { LOG.debug("Received tick tuple, triggering emit of current window counts"); emitCurrentWindowCounts(); } else { countObjAndAck(tuple); } }
/** * {@inheritDoc} * * @param tuple the tuple to process. */ @Override public void execute(final Tuple tuple) { if (TupleUtils.isTick(tuple)) { onTickTuple(tuple); } else { process(tuple); } }
/** * This method functions as a template method (design pattern). */ @Override public final void execute(Tuple tuple, BasicOutputCollector collector) { if (TupleUtils.isTick(tuple)) { getLogger().debug("Received tick tuple, triggering emit of current rankings"); emitRankings(collector); } else { updateRankingsWithTuple(tuple); } }
@Override public void execute(Tuple tuple) { if (TupleUtils.isTick(tuple)) { // if we have a tick tuple then lets see if enough time has passed since our last batch was processed if ((System.currentTimeMillis() / 1000 - lastBatchProcessTimeSeconds) >= batchIntervalInSec) { LOGGER.debug("Received tick tuple and reached batch interval, executing batch"); finishBatch(); } else { LOGGER.debug("Received tick tuple, but haven't reached batch interval, nothing to do"); } } else { // for a regular tuple we add it to the queue and then see if our queue size exceeds batch size this.queue.add(tuple); int queueSize = this.queue.size(); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Current queue size is " + queueSize + ", and batch size is " + batchSize); } if (queueSize >= batchSize) { LOGGER.debug("Queue Size is greater than or equal to batch size, executing batch"); finishBatch(); } } }
private void ack(Tuple tuple) throws SolrServerException, IOException { if (commitStgy == null) { collector.ack(tuple); } else { final boolean isTickTuple = TupleUtils.isTick(tuple); if (!isTickTuple) { // Don't ack tick tuples toCommitTuples.add(tuple); commitStgy.update(); } if (isTickTuple || commitStgy.commit()) { solrClient.commit(solrMapper.getCollection()); ackCommittedTuples(); } } }
@Override public void execute(Tuple tuple) { LOG.info("GOT {} at time {}", tuple, Time.currentTimeMillis()); if (!receivedAnyTuple.get() && Time.currentTimeSecs() > TICK_INTERVAL_SECS) { throw new RuntimeException("Simulated time was higher than " + TICK_INTERVAL_SECS + " at start of test." + " Increase the interval until this no longer occurs, but keep an eye on Storm's timeouts for e.g. worker heartbeat."); } receivedAnyTuple.set(true); if (tickTupleCount.get() > 3) { throw new RuntimeException("Unexpectedly many tick tuples"); } if (TupleUtils.isTick(tuple)) { tickTupleCount.incrementAndGet(); collector.ack(tuple); } else { if (tuple.getValues().size() == 1 && "val".equals(tuple.getValue(0))) { collector.ack(tuple); } else { nonTickTuple.set(tuple); } } }
String writerKey = null; if (TupleUtils.isTick(tuple)) { LOG.debug("TICK! forcing a file system flush"); this.collector.ack(tuple);
@Override public void execute(Tuple tuple) { if (TupleUtils.isTick(tuple)) { return; } try { //get document Document doc = mapper.toDocument(tuple); //get query filter Bson filter = queryCreator.createFilter(tuple); mongoClient.update(filter, doc, upsert, many); this.collector.ack(tuple); } catch (Exception e) { this.collector.reportError(e); this.collector.fail(tuple); } }
@Override public void execute(Tuple tuple) { if (TupleUtils.isTick(tuple)) { return; } try { //get query filter Bson filter = queryCreator.createFilter(tuple); //find document from mongodb Document doc = mongoClient.find(filter); //get storm values and emit List<Values> valuesList = mapper.toTuple(tuple, doc); for (Values values : valuesList) { this.collector.emit(tuple, values); } this.collector.ack(tuple); } catch (Exception e) { this.collector.reportError(e); this.collector.fail(tuple); } }
@Override public void execute(Tuple input) { if (!batch && TupleUtils.isTick(input)) { return;
@Override public void execute(Tuple tuple) { if (TupleUtils.isTick(tuple)) { collector.ack(tuple); return; } byte[] rowKey = this.mapper.rowKey(tuple); Result result = null; try { if (cacheEnabled) { result = cache.get(rowKey); } else { Get get = hBaseClient.constructGetRequests(rowKey, projectionCriteria); result = hBaseClient.batchGet(Lists.newArrayList(get))[0]; } for (Values values : rowToTupleMapper.toValues(tuple, result)) { this.collector.emit(tuple, values); } this.collector.ack(tuple); } catch (Exception e) { this.collector.reportError(e); this.collector.fail(tuple); } }
@Override public void execute(Tuple input) { if (TupleUtils.isTick(input)) { Map<Object, AckObject> tmp = pending.rotate(); LOG.debug("Number of timeout tuples:{}", tmp.size());
@Override public void execute(Tuple tuple) { try { if (batchHelper.shouldHandle(tuple)) { List<String> partitionVals = options.getMapper().mapPartitions(tuple); HiveEndPoint endPoint = HiveUtils.makeEndPoint(partitionVals, options); HiveWriter writer = getOrCreateWriter(endPoint); writer.write(options.getMapper().mapRecord(tuple)); batchHelper.addBatch(tuple); } if (batchHelper.shouldFlush()) { flushAllWriters(true); LOG.info("acknowledging tuples after writers flushed "); batchHelper.ack(); } if (TupleUtils.isTick(tuple)) { retireIdleWriters(); } } catch (SerializationError se) { LOG.info("Serialization exception occurred, tuple is acknowledged but not written to Hive.", tuple); this.collector.reportError(se); collector.ack(tuple); } catch (Exception e) { batchHelper.fail(e); abortAndCloseWriters(); } }
@Override public void execute(Tuple tuple) { if (TupleUtils.isTick(tuple)) { long now = System.currentTimeMillis(); if (now - _lastRotate > _messageTimeoutMs) {
@SuppressWarnings("unchecked") @Override public void execute(Tuple tuple) { if (TupleUtils.isTick(tuple)) { handleTickTuple(tuple); return;
@SuppressWarnings("unchecked") @Override public void execute(Tuple tuple) { if (isTick(tuple)) { try { if (!(bulkMessageWriter instanceof WriterToBulkWriter)) {
public boolean shouldHandle(Tuple tuple) { if (TupleUtils.isTick(tuple)) { LOG.debug("TICK received! current batch status [{}/{}]", tupleBatch.size(), batchSize); forceFlush = true; return false; } else { return true; } }
/** * {@inheritDoc} * * @param tuple the tuple to process. */ @Override public void execute(final Tuple tuple) { if (TupleUtils.isTick(tuple)) { onTickTuple(tuple); } else { process(tuple); } }
/** * {@inheritDoc} * * @param tuple the tuple to process. */ @Override public void execute(final Tuple tuple) { if (TupleUtils.isTick(tuple)) { onTickTuple(tuple); } else { process(tuple); } }