@Override protected void processMessage(MessageAndMetadata<byte[], byte[]> message) { try { Collection<Either<JobSpec, URI>> parsedCollection = parseJobSpec(message.message()); for (Either<JobSpec, URI> parsedMessage : parsedCollection) { if (parsedMessage instanceof Either.Left) { this.newSpecs.inc(); this.jobCatalog.put(((Either.Left<JobSpec, URI>) parsedMessage).getLeft()); } else if (parsedMessage instanceof Either.Right) { this.removedSpecs.inc(); this.jobCatalog.remove(((Either.Right<JobSpec, URI>) parsedMessage).getRight()); } } } catch (IOException ioe) { String messageStr = new String(message.message(), Charsets.UTF_8); log.error(String.format("Failed to parse kafka message with offset %d: %s.", message.offset(), messageStr), ioe); } }
log.error(String.format("Failed to delete job/jobStateStore or parse kafka message with offset %d: %s.", message.offset(), messageStr), ioe);
GenericRecord record = (GenericRecord) deserializer.deserialize(topic, payload); System.out.println(record.toString()); checkpoint.update(messagePlusMeta.partition(), messagePlusMeta.offset());
attributes.put("kafka.offset", String.valueOf(mam.offset())); attributes.put("kafka.partition", String.valueOf(mam.partition()));
@Override public Message next() { MessageAndMetadata<byte[], byte[]> kafkaMessage; try { kafkaMessage = mIterator.next(); } catch (ConsumerTimeoutException e) { throw new LegacyConsumerTimeoutException(e); } long timestamp = 0L; if (mConfig.useKafkaTimestamp()) { timestamp = mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(kafkaMessage); } return new Message(kafkaMessage.topic(), kafkaMessage.partition(), kafkaMessage.offset(), kafkaMessage.key(), kafkaMessage.message(), timestamp); }
private void processAuditMsg(final MessageAndMetadata mm) throws Exception { JSONObject record = JSON.parseObject(StringUtils.toEncodedString((byte[]) mm.message(), Charset.forName("UTF-8"))); String topicName = record.getString(AuditMsgField.TOPICNAME.getName()); if (blacklistedTopics.contains(topicName)) { logger.debug("Topic={} is blacklisted", topicName); return; } if (deduplicator != null) { String uuid = record.getString(AuditMsgField.UUID.getName()); String host = record.getString(AuditMsgField.HOSTNAME.getName()); if (deduplicator.isDuplicated(topicName, mm.partition(), mm.offset(), host, uuid)) { return; } } if (enablePersistentStore) { auditReporter.submit(mm.topic(), mm.partition(), mm.offset(), record); } }
@Override public long offset() { return rec.offset(); } }
@Override public long offset() { return rec.offset(); } }
public static String messageAndOffset(MessageAndMetadata<byte[], byte[]> mamd) { return asUtf8(mamd.message()) + "@offset" + mamd.offset(); }
public Message(MessageAndMetadata<byte[], byte[]> message) { this.topic = message.topic(); this.key = message.key() != null ? new String(message.key(), Charset.forName("utf-8")) : null; this.message = new String(message.message(), Charset.forName("utf-8")); this.partition = message.partition(); this.offset = message.offset(); } }
public void run() { ConsumerIterator<byte[], byte[]> itr = stream.iterator(); logger.debug("Thread {} starts consuming message...", Thread.currentThread().getName()); while (itr.hasNext() && isAlive) { MessageAndMetadata<byte[], byte[]> mam = itr.next(); try { kp.setPartitionId(mam.partition()); putMessage(kp, new Message(mam.message()), mam.offset()); } catch (InterruptedException e) { logger.error("Message Enqueue has been interrupted", e); } } logger.debug("Thread {} stops consuming message...", Thread.currentThread().getName()); } });
public void run() { ConsumerIterator<byte[], byte[]> itr = stream.iterator(); logger.debug("Thread {} starts consuming message...", Thread.currentThread().getName()); while (itr.hasNext() && isAlive) { MessageAndMetadata<byte[], byte[]> mam = itr.next(); try { kp.setPartitionId(mam.partition()); putMessage(kp, new Message(mam.message()), mam.offset()); } catch (InterruptedException e) { logger.error("Message Enqueue has been interrupted", e); } } logger.debug("Thread {} stops consuming message...", Thread.currentThread().getName()); } });
@Override protected void processMessage(MessageAndMetadata<byte[], byte[]> message) { try { Collection<Either<JobSpec, URI>> parsedCollection = parseJobSpec(message.message()); for (Either<JobSpec, URI> parsedMessage : parsedCollection) { if (parsedMessage instanceof Either.Left) { this.newSpecs.inc(); this.jobCatalog.put(((Either.Left<JobSpec, URI>) parsedMessage).getLeft()); } else if (parsedMessage instanceof Either.Right) { this.remmovedSpecs.inc(); this.jobCatalog.remove(((Either.Right<JobSpec, URI>) parsedMessage).getRight()); } } } catch (IOException ioe) { String messageStr = new String(message.message(), Charsets.UTF_8); log.error(String.format("Failed to parse kafka message with offset %d: %s.", message.offset(), messageStr), ioe); } }
@Override protected void processMessage(MessageAndMetadata<byte[], byte[]> message) { try { Collection<Either<JobSpec, URI>> parsedCollection = parseJobSpec(message.message()); for (Either<JobSpec, URI> parsedMessage : parsedCollection) { if (parsedMessage instanceof Either.Left) { this.newSpecs.inc(); this.jobCatalog.put(((Either.Left<JobSpec, URI>) parsedMessage).getLeft()); } else if (parsedMessage instanceof Either.Right) { this.removedSpecs.inc(); this.jobCatalog.remove(((Either.Right<JobSpec, URI>) parsedMessage).getRight()); } } } catch (IOException ioe) { String messageStr = new String(message.message(), Charsets.UTF_8); log.error(String.format("Failed to parse kafka message with offset %d: %s.", message.offset(), messageStr), ioe); } }
@Override public void run() { try { long start = logger.isDebugEnabled() ? System.currentTimeMillis() : 0; messageHandler.p2Process(message); if(logger.isDebugEnabled()){ long useTime = System.currentTimeMillis() - start; if(useTime > 1000)logger.debug("received_topic_useTime [{}]process topic:{} use time {} ms",processorName,topicName,useTime); } //回执 if(message.isConsumerAckRequired()){ consumerContext.sendConsumerAck(message.getMsgId()); } consumerContext.saveOffsetsAfterProcessed(messageAndMeta.topic(), messageAndMeta.partition(), messageAndMeta.offset()); } catch (Exception e) { boolean processed = messageHandler.onProcessError(message); if(processed == false){ consumerContext.processErrorMessage(topicName, message); } logger.error("received_topic_process_error ["+processorName+"]processMessage error,topic:"+topicName,e); } consumerContext.updateConsumerStats(messageAndMeta.topic(),-1); } });
/** * Processes a {@link Iterable} by iteratively processing each message. * * @param stream the stream of messages to process. * @param topic the topic the {@code stream} belongs to. * * @see StreamProcessor#process(Iterable, String) */ public void process(final Iterable<MessageAndMetadata<K, V>> stream, final String topic) { for (final MessageAndMetadata<K, V> entry : stream) { // final Timer.Context context = processed.time(); process(entry.key(), entry.message(), topic, entry.partition(), entry.offset()); // context.stop(); } } }
/** * Consumes the messages from the partition via specified stream. */ private void consumeMessagesAndAddToBulkProcessor(final KafkaStream stream) { try { // by default it waits forever for message, but there is timeout configured final ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator(); // Consume all the messages of the stream (partition) while (consumerIterator.hasNext() && consume) { final MessageAndMetadata messageAndMetadata = consumerIterator.next(); logMessage(messageAndMetadata); elasticsearchProducer.addMessagesToBulkProcessor(messageAndMetadata); // StatsD reporting stats.messagesReceived.incrementAndGet(); stats.lastCommitOffsetByPartitionId.put(messageAndMetadata.partition(), messageAndMetadata.offset()); } } catch (ConsumerTimeoutException ex) { logger.debug("Nothing to be consumed for now. Consume flag is: {}", consume); } }
void accept(MessageAndMetadata<byte[],byte[]> rec) { try { trace.trace("{} received rec for topic:{} partition:{} offset:{}", id(), rec.topic(), rec.partition(), rec.offset()); T tuple; if (stringToTupleFn != null) tuple = stringToTupleFn.apply(new StringConsumerRecord(rec)); else tuple = byteToTupleFn.apply(new ByteConsumerRecord(rec)); eventSubmitter.accept(tuple); } catch (Exception e) { String tp = String.format("[%s,%d]", rec.topic(), rec.partition()); trace.error("{} failure processing record from {}", id(), tp, e); } }
void accept(MessageAndMetadata<byte[],byte[]> rec) { try { trace.trace("{} received rec for topic:{} partition:{} offset:{}", id(), rec.topic(), rec.partition(), rec.offset()); T tuple; if (stringToTupleFn != null) tuple = stringToTupleFn.apply(new StringConsumerRecord(rec)); else tuple = byteToTupleFn.apply(new ByteConsumerRecord(rec)); eventSubmitter.accept(tuple); } catch (Exception e) { String tp = String.format("[%s,%d]", rec.topic(), rec.partition()); trace.error("{} failure processing record from {}", id(), tp, e); } }
public void run() { ConsumerIterator<byte[], byte[]> it = stream.iterator(); while (it.hasNext()) { try { KafkaMsg msg = KafkaMsg.createBuilder() .key(new String(it.next().key())) .val(it.next().message()) .offset(it.next().offset()) .partition(it.next().partition()) .topic(it.next().topic()) .build(); while (true) {//retry put try { queue.put(msg); break; } catch (InterruptedException e) { logger.error(e.getMessage(), e); try { Thread.sleep(SLEEPING_INTERVAL); } catch (InterruptedException ee) { logger.error(e.getMessage(), e); } } } } catch (Throwable e) { logger.error(e.getMessage(), e); } } } }