ConsumerAndRecords(KafkaConsumer<String, byte[]> consumer, String uuid) { this.consumer = consumer; this.uuid = uuid; this.records = ConsumerRecords.empty(); this.recordIterator = records.iterator(); }
return ConsumerRecords.empty();
return ConsumerRecords.empty();
ConsumerRecords<byte[], byte[]> records = ConsumerRecords.empty(); try { records = consumer.poll(task.getIOConfig().getPollTimeout());
ConsumerAndRecords(KafkaConsumer<String, byte[]> consumer, String uuid) { this.consumer = consumer; this.uuid = uuid; this.records = ConsumerRecords.empty(); this.recordIterator = records.iterator(); }
@Override public void poll(long timeout, Handler<AsyncResult<ConsumerRecords<K, V>>> handler) { this.worker.submit(() -> { if (!this.closed.get()) { try { ConsumerRecords<K, V> records = this.consumer.poll(timeout); this.context.runOnContext(v -> handler.handle(Future.succeededFuture(records))); } catch (WakeupException ignore) { this.context.runOnContext(v -> handler.handle(Future.succeededFuture(ConsumerRecords.empty()))); } catch (Exception e) { this.context.runOnContext(v -> handler.handle(Future.failedFuture(e))); } } }); } }
return ConsumerRecords.empty();
return new ConsumerRecordsAndPositions(ConsumerRecords.empty(), Collections.emptyMap());
private void consumerPollLoop() { // Read in a loop and enqueue the batch of records, if any, to availableRecordsQueue. try { ConsumerRecords<byte[], byte[]> records = ConsumerRecords.empty(); while (!closed.get()) { try { if (records.isEmpty()) { records = consumer.poll(KAFKA_POLL_TIMEOUT.getMillis()); } else if (availableRecordsQueue.offer( records, RECORDS_ENQUEUE_POLL_TIMEOUT.getMillis(), TimeUnit.MILLISECONDS)) { records = ConsumerRecords.empty(); } KafkaCheckpointMark checkpointMark = finalizedCheckpointMark.getAndSet(null); if (checkpointMark != null) { commitCheckpointMark(checkpointMark); } } catch (InterruptedException e) { LOG.warn("{}: consumer thread is interrupted", this, e); // not expected break; } catch (WakeupException e) { break; } } LOG.info("{}: Returning from consumer pool loop", this); } catch (Exception e) { // mostly an unrecoverable KafkaException. LOG.error("{}: Exception while reading from Kafka", this, e); consumerPollException.set(e); throw e; } }
private void consumerPollLoop() { // Read in a loop and enqueue the batch of records, if any, to availableRecordsQueue. try { ConsumerRecords<byte[], byte[]> records = ConsumerRecords.empty(); while (!closed.get()) { try { if (records.isEmpty()) { records = consumer.poll(KAFKA_POLL_TIMEOUT.getMillis()); } else if (availableRecordsQueue.offer( records, RECORDS_ENQUEUE_POLL_TIMEOUT.getMillis(), TimeUnit.MILLISECONDS)) { records = ConsumerRecords.empty(); } KafkaCheckpointMark checkpointMark = finalizedCheckpointMark.getAndSet(null); if (checkpointMark != null) { commitCheckpointMark(checkpointMark); } } catch (InterruptedException e) { LOG.warn("{}: consumer thread is interrupted", this, e); // not expected break; } catch (WakeupException e) { break; } } LOG.info("{}: Returning from consumer pool loop", this); } catch (Exception e) { // mostly an unrecoverable KafkaException. LOG.error("{}: Exception while reading from Kafka", this, e); consumerPollException.set(e); throw e; } }
private ConsumerRecords<K, V> pollRecords(long timeout) { try { long currentTime = System.currentTimeMillis(); if (lastPollTime != -1L) { long pollLatency = currentTime - lastPollTime; POLL_LATENCY.update(pollLatency); if (pollLatency > config.getMaxPollInterval() && LOGGER.isWarnEnabled()) { LOGGER.warn("{}ms has elapsed since last #poll(). This is greater than max.poll.interval.ms {}. If this " + "continues you may need to increase max.poll.interval.ms", pollLatency, config.getMaxPollInterval()); } } lastPollTime = currentTime; return consumer.poll(timeout); } catch (IllegalStateException e) { // The Kafka consumer will throw this exception if the consumer is not currently subscribed to any topics. Return an // empty record collection after verifying that is in fact the case, otherwise rethrow the exception. if (consumer.subscription().isEmpty()) { LOGGER.debug("Consumer with no subscriptions polled for records."); return ConsumerRecords.empty(); } else { throw e; } } }
recordIterator = records != null ? records.iterator() : ConsumerRecords.<K, V>empty().iterator();
LOG.info("Retrieved records from Kafka to iterate over."); return records != null ? records.iterator() : ConsumerRecords.<K, V>empty().iterator();
_lastAutoCommitMs = now; ConsumerRecords<byte[], byte[]> rawRecords = ConsumerRecords.empty(); try { rawRecords = _kafkaConsumer.poll(deadline - now);
@Test public void testZeroLengthValue() throws Exception { Properties producerPropertyOverrides = new Properties(); producerPropertyOverrides.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); try (LiKafkaProducer producer = createProducer(producerPropertyOverrides)) { producer.send(new ProducerRecord<>("testZeroLengthValue", "key", new byte[0])).get(); } Properties consumerProps = new Properties(); consumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); consumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); try (LiKafkaConsumer consumer = createConsumer(consumerProps)) { consumer.subscribe(Collections.singleton("testZeroLengthValue")); long startMs = System.currentTimeMillis(); ConsumerRecords records = ConsumerRecords.empty(); while (records.isEmpty() && System.currentTimeMillis() < startMs + 30000) { records = consumer.poll(100); } assertEquals(1, records.count()); ConsumerRecord record = (ConsumerRecord) records.iterator().next(); assertEquals("key", record.key()); assertEquals(((byte[]) record.value()).length, 0); } }
@Test public void testNullValue() { try (LiKafkaProducer<String, String> producer = createProducer(null)) { producer.send(new ProducerRecord<>("testNullValue", "key", null)); } Properties consumerProps = new Properties(); consumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); try (LiKafkaConsumer<String, String> consumer = createConsumer(consumerProps)) { consumer.subscribe(Collections.singleton("testNullValue")); long startMs = System.currentTimeMillis(); ConsumerRecords<String, String> records = ConsumerRecords.empty(); while (records.isEmpty() && System.currentTimeMillis() < startMs + 30000) { records = consumer.poll(100); } assertEquals(1, records.count()); ConsumerRecord<String, String> record = records.iterator().next(); assertEquals("key", record.key()); assertNull(record.value()); } }
@Test public void testBootstrapWithLiClosest() { produceRecordsWithKafkaProducer(); Properties props = new Properties(); props.setProperty("auto.offset.reset", LiOffsetResetStrategy.LICLOSEST.name()); props.setProperty("group.id", "testBootstrapLICLOSEST"); LiKafkaConsumer<String, String> consumer = createConsumer(props); TopicPartition tp = new TopicPartition(TOPIC1, 0); try { consumer.assign(Collections.singleton(tp)); ConsumerRecords<String, String> consumerRecords = ConsumerRecords.empty(); while (consumerRecords.isEmpty()) { consumerRecords = consumer.poll(1000); } long bootstrappedReset = consumerRecords.iterator().next().offset(); consumer.seekToBeginning(Collections.singleton(tp)); consumerRecords = ConsumerRecords.empty(); while (consumerRecords.isEmpty()) { consumerRecords = consumer.poll(1000); } assertEquals(bootstrappedReset, consumerRecords.iterator().next().offset()); // because we seek to beginning above } finally { consumer.close(); } }
ConsumerRecords<String, String> consumerRecords = ConsumerRecords.empty(); consumer.assign(tpAsCollection); long giveUp = System.currentTimeMillis() + TimeUnit.MINUTES.toMillis(5);
try (LiKafkaConsumer<String, String> consumer = createConsumer(props)) { consumer.assign(Collections.singleton(tp)); ConsumerRecords<String, String> consumerRecords = ConsumerRecords.empty(); consumer.seek(tp, 0); while (consumerRecords.isEmpty()) { case EARLIEST: long expectedEarliestOffset = consumerRecords.iterator().next().offset(); consumerRecords = ConsumerRecords.empty(); while (consumerRecords.isEmpty()) { consumerRecords = consumer.poll(1000);