@Override public void init() throws Exception { super.init(); }
@Override public BatchResponse onIdle(BatchSource source) { return autoAdvanceResponse(source); }
@Override public void init() throws Exception { if (m_config.isNoDuplication() && !isAutoAdvanceEveryBatch()) throw new RuntimeException( "NoDuplication only applies to AUTO_ADVANCE_EVERYBATCH mode."); }
protected void simplySendBatch(BatchSource source, Collection<JetstreamEvent> events) { long start = System.currentTimeMillis(); // send to batch event sinks if (!getBatchEventSinks().isEmpty()) { try { simplySendEvents(events); } catch (Throwable e) { incrementEventDroppedCounter(events.size()); } } // send to non-batch event sinks if (!getEventSinks().isEmpty()) { Iterator<JetstreamEvent> it = events.iterator(); while (it.hasNext()) { try { JetstreamEvent event = it.next(); simplySendEvent(event); } catch (Throwable e) { super.incrementEventDroppedCounter(); } } } handleReadRate(start, source, events.size()); }
@Override public BatchResponse onNextBatch(BatchSource source, Collection<JetstreamEvent> events) throws EventException { initAutoAdvanceTime(source); incrementEventRecievedCounter(events.size()); if (m_config.isNoDuplication()) { cacheBatch(source, events); } else { BatchResponse ret = sendBatch(source, events); if (ret != null) return ret; } BatchResponse ret = autoAdvanceResponse(source); // check if the read rate exceeds the max, if exceeds, make the next // batch wait for a while String key = getKey(source); Long waitMs = waitForLastBatch.get(key); if (waitMs != null && waitMs > 0) { ret.setWaitTimeInMs(waitMs); } return ret; }
@Override public void onBatchProcessed(BatchSource source) { if (m_config.isNoDuplication()) { String key = getKey(source); List<JetstreamEvent> cached = cachedBatches.get(key); if (cached != null) { simplySendBatch(source, cached); cached.clear(); } } }
private void handleReadRate(long start, BatchSource source, int eventCount) { if (m_config.getMaxReadRate() > 0) { long end = System.currentTimeMillis(); int tasks; String key = getKey(source); if (waitForLastBatch.containsKey(key)) { tasks = waitForLastBatch.size(); } else { tasks = waitForLastBatch.size() + 1; } long durInRate = (1000 * eventCount * tasks) / m_config.getMaxReadRate(); long waitMs = durInRate - (end - start); waitForLastBatch.put(key, waitMs); } }
protected BatchResponse autoAdvanceResponse(BatchSource source) { if (isAutoAdvanceEveryBatch()) { return BatchResponse.advanceAndGetNextBatch(); } else { String key = new StringBuilder().append(source.getTopic()) .append("-").append(source.getPartition()).toString(); Long lastTs = m_lastAdvanceTimes.get(key); long curTime = System.currentTimeMillis(); boolean advance = false; if (lastTs != null && (curTime - lastTs.longValue()) >= m_config .getAutoAdvanceInterval()) { advance = true; m_lastAdvanceTimes.put(key, curTime); } if (advance) { return BatchResponse.advanceAndGetNextBatch(); } else { return BatchResponse.getNextBatch(); } } }
protected void simplySendEvent(JetstreamEvent event) { super.fireSendEvent(event); incrementEventSentCounter(); }
protected void initAutoAdvanceTime(BatchSource source) { String key = getKey(source); if (!m_lastAdvanceTimes.containsKey(key)) m_lastAdvanceTimes.put(key, System.currentTimeMillis()); }
private boolean isAutoAdvanceEveryBatch() { return SimpleKafkaProcessorConfig.AUTO_ADVANCE_EVERYBATCH .equals(m_config.getAutoAdvanceMode()); }
@Override public BatchResponse onNextBatch(BatchSource source, Collection<JetstreamEvent> events) throws EventException { // check if it's time to send the batch, check the delay time of the // lastEvent, if it's no time to send the batch, tell the IKC to wait // and resend this batch long start = System.currentTimeMillis(); if (getReplayConfig().getTimestampKey() != null && getReplayConfig().getDelayInMs() > 0) { JetstreamEvent[] eventArr = events .toArray(new JetstreamEvent[events.size()]); JetstreamEvent lastEvent = eventArr[eventArr.length - 1]; Long ts = (Long) lastEvent.get(getReplayConfig().getTimestampKey()); if (ts != null) { long v = ts + getReplayConfig().getDelayInMs() - start; if (v > 0) { incrementEventRecievedCounter(events.size()); incrementEventDelayedCounter(events.size()); return BatchResponse.getNextBatch() .setOffset(source.getHeadOffset()) .setWaitTimeInMs(v); } } } return super.onNextBatch(source, events); }
@Override protected void processApplicationEvent(ApplicationEvent event) { if (event instanceof ContextBeanChangedEvent) { ContextBeanChangedEvent bcInfo = (ContextBeanChangedEvent) event; if (bcInfo.isChangedBean(m_config)) { setConfig((SimpleKafkaProcessorConfig) bcInfo.getChangedBean()); } } }
super.fireSendEvent(event);
if (!getBatchEventSinks().isEmpty()) { try { simplySendEvents(events); } catch (Throwable e) { incrementEventDroppedCounter(events.size()); return BatchResponse.getNextBatch().setOffset( source.getHeadOffset()); if (!getEventSinks().isEmpty()) { long startOffset = source.getHeadOffset(); int index = 0; simplySendEvent(event); index++; handleReadRate(start, source, events.size());
protected void simplySendEvents(Collection<JetstreamEvent> events) { EventMetaInfo meta = new EventMetaInfo(); super.fireSendEvents(events, meta); incrementEventSentCounter(events.size()); }
protected void cacheBatch(BatchSource source, Collection<JetstreamEvent> events) { String key = getKey(source); List<JetstreamEvent> cache = cachedBatches.get(key); if (cache == null) { cache = new ArrayList<JetstreamEvent>(); cachedBatches.put(key, cache); } cache.addAll(events); }