frb.addFetch( partition.getKafkaTopicPartition().getTopic(), partition.getKafkaTopicPartition().getPartition(),
.addFetch(_topic, _partition, startOffset, 500000).build());
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException { FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes) .maxWait(config.fetchWaitMaxMs).build(); FetchResponse fetchResponse = null;
FetchRequest req = new FetchRequestBuilder() .clientId("presto-worker-" + Thread.currentThread().getName()) .addFetch(split.getTopicName(), split.getPartitionId(), cursorOffset, KAFKA_READ_BUFFER_SIZE) .build();
final String clientName = getClientName(topicPartition); kafka.api.FetchRequest request = new FetchRequestBuilder().clientId(clientName) .addFetch(topicPartition.getTopic(), topicPartition.getPartition(), offset, MAX_MESSAGE_SIZE_BYTES) .build();
.addFetch(topic, partitionId, offset, FETCH_SIZE) .maxWait(timeoutMs) .minBytes(1)
consumer = new SimpleConsumer("127.0.0.1", cluster.getKafkaServerPort(0), DEFAULT_SO_TIMEOUT, DEFAULT_BUFFER_SIZE, "foo"); FetchRequest req = new FetchRequestBuilder().addFetch(sinkTopic, 0, 0, DEFAULT_BUFFER_SIZE).build(); FetchResponse resp = consumer.fetch(req); ByteBufferMessageSet ms = resp.messageSet(sinkTopic, 0);
public static void main(String[] args) throws Exception { final String topic = "topic1"; String clientID = "DemoLowLevelConsumer1"; SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID); FetchRequest req = new FetchRequestBuilder().clientId(clientID) .addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build(); FetchResponse fetchResponse = simpleConsumer.fetch(req); ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0); for (MessageAndOffset messageAndOffset : messageSet) { ByteBuffer payload = messageAndOffset.message().payload(); long offset = messageAndOffset.offset(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8")); } }
public void consumeMessagesWithOldApi(String topic, int nbMessageToRead) throws UnsupportedEncodingException { SimpleConsumer simpleConsumer = new SimpleConsumer(kafkaHostname, kafkaPort, 30000, 2, "test"); System.out.println("Testing single fetch"); kafka.api.FetchRequest req = new FetchRequestBuilder() .clientId("test") .addFetch(topic, 0, 0L, 100) .build(); while (numRead != nbMessageToRead) { FetchResponse fetchResponse = simpleConsumer.fetch(req); printMessages(fetchResponse.messageSet(topic, 0)); numRead++; } }
private ByteBufferMessageSet fetchMessageSet(long fetchOffset) throws OffsetOutOfRangeException { Preconditions.checkArgument(fetchOffset >= 0, String.format("Illegal fetch offset %d", fetchOffset)); int failureCount = 0; while (true) { SimpleConsumer consumer = getConsumer(); FetchRequest req = new FetchRequestBuilder() .clientId(clientName) .addFetch(topic, partition, fetchOffset, BUFFER_SIZE_BYTES) .maxWait(fetchTimeoutMs) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (!fetchResponse.hasError()) { return fetchResponse.messageSet(topic, partition); } short errorCode = fetchResponse.errorCode(topic, partition); if (++failureCount >= MAX_KAFKA_FETCH_RETRIES) { throw new RuntimeException( String.format("Error fetching data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, errorCode)); } LOG.warn("Error fetching data from broker {}:{} for topic {}, partition {}. Error code: {}", consumer.host(), consumer.port(), topic, partition, errorCode); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { throw new OffsetOutOfRangeException(String.format( "Requested offset %d is out of range for topic %s partition %d", fetchOffset, topic, partition)); } closeConsumer(); } }
private ByteBufferMessageSet fetchMessageSet(long fetchOffset) throws OffsetOutOfRangeException { Preconditions.checkArgument(fetchOffset >= 0, String.format("Illegal fetch offset %d", fetchOffset)); int failureCount = 0; while (true) { SimpleConsumer consumer = getConsumer(); FetchRequest req = new FetchRequestBuilder() .clientId(clientName) .addFetch(topic, partition, fetchOffset, BUFFER_SIZE_BYTES) .maxWait(fetchTimeoutMs) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (!fetchResponse.hasError()) { return fetchResponse.messageSet(topic, partition); } short errorCode = fetchResponse.errorCode(topic, partition); if (++failureCount >= MAX_KAFKA_FETCH_RETRIES) { throw new RuntimeException( String.format("Error fetching data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, errorCode)); } LOG.warn("Error fetching data from broker {}:{} for topic {}, partition {}. Error code: {}", consumer.host(), consumer.port(), topic, partition, errorCode); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { throw new OffsetOutOfRangeException(String.format( "Requested offset %d is out of range for topic %s partition %d", fetchOffset, topic, partition)); } closeConsumer(); } }
private void initializeLastProcessingOffset() { // read last received kafka message TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic()); if (tm == null) { throw new RuntimeException("Failed to retrieve topic metadata"); } partitionNum = tm.partitionsMetadata().size(); lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum); for (PartitionMetadata pm : tm.partitionsMetadata()) { String leadBroker = pm.leader().host(); int port = pm.leader().port(); String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId(); SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName); long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName); FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build(); FetchResponse fetchResponse = consumer.fetch(req); for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) { Message m = messageAndOffset.message(); ByteBuffer payload = m.payload(); ByteBuffer key = m.key(); byte[] valueBytes = new byte[payload.limit()]; byte[] keyBytes = new byte[key.limit()]; payload.get(valueBytes); key.get(keyBytes); lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes)); } } }
/** * Makes a call to kafka to fetch messages. */ private FetchResponse fetchMessages(SimpleConsumer consumer, long offset) { FetchRequest request = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topicPart.getTopic(), topicPart.getPartition(), offset, FETCH_SIZE) .maxWait(MAX_WAIT) .build(); return consumer.fetch(request); }
private void initializeLastProcessingOffset() { // read last received kafka message TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic()); if (tm == null) { throw new RuntimeException("Failed to retrieve topic metadata"); } partitionNum = tm.partitionsMetadata().size(); lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum); for (PartitionMetadata pm : tm.partitionsMetadata()) { String leadBroker = pm.leader().host(); int port = pm.leader().port(); String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId(); SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName); long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName); FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build(); FetchResponse fetchResponse = consumer.fetch(req); for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) { Message m = messageAndOffset.message(); ByteBuffer payload = m.payload(); ByteBuffer key = m.key(); byte[] valueBytes = new byte[payload.limit()]; byte[] keyBytes = new byte[key.limit()]; payload.get(valueBytes); key.get(keyBytes); lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes)); } } }
/** * Makes a call to kafka to fetch messages. */ private FetchResponse fetchMessages(SimpleConsumer consumer, long offset) { FetchRequest request = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topicPart.getTopic(), topicPart.getPartition(), offset, FETCH_SIZE) .maxWait(MAX_WAIT) .build(); return consumer.fetch(request); }
for (int errors = 0; errors < 2 && msgs == null; errors++) { FetchRequestBuilder builder = new FetchRequestBuilder(); FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes). clientId(config.clientId).build(); FetchResponse fetchResponse;
private SimpleConsumer assertMsgFromKafka(KafkaEmbedded kafkaEmbedded) throws IOException { SimpleConsumer consumer = new SimpleConsumer("localhost", kafkaEmbedded.getPort(), 100000, 64 * 1024, clientName); long readOffset = getLastOffset(consumer, TOPIC, 0, kafka.api.OffsetRequest.EarliestTime(), clientName); FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(TOPIC, 0, readOffset, 100000).build(); FetchResponse fetchResponse = consumer.fetch(req); Map<Integer, Map<String, String>> resultCollector = new HashMap<>(); int count = 1; for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(TOPIC, 0)) { long currentOffset = messageAndOffset.offset(); if (currentOffset < readOffset) { System.out.println("found an old offset: " + currentOffset + " expecting: " + readOffset); continue; } readOffset = messageAndOffset.nextOffset(); ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); String message = new String(bytes, "UTF-8"); Map<String, String> covertedMsg = JsonUtils.mapper.readValue(message, Map.class); covertedMsg.remove("timestamp"); resultCollector.put(count, covertedMsg); count++; } Assert.assertEquals("{1={name=heap.committed, value=175636480}, 2={name=heap.init, value=262144000}, 3={name=heap.max, value=3704094720}, 4={name=heap.usage, value=0.01570181876990446}, 5={name=heap.used, value=58491576}, 6={name=name, value=testname}, 7={name=non-heap.committed, value=36405248}, 8={name=non-heap.init, value=2555904}, 9={name=non-heap.max, value=-1}, 10={name=non-heap.usage, value=-3.5588712E7}, 11={name=non-heap.used, value=35596496}, 12={name=pools.Code-Cache.usage, value=0.020214080810546875}, 13={name=pools.Compressed-Class-Space.usage, value=0.0035556256771087646}, 14={name=pools.Metaspace.usage, value=0.9777212526244751}, 15={name=pools.PS-Eden-Space.usage, value=0.03902325058129612}, 16={name=pools.PS-Old-Gen.usage, value=0.001959359247654333}, 17={name=pools.PS-Survivor-Space.usage, value=0.0}, 18={name=total.committed, value=212107264}, 19={name=total.init, value=264699904}, 20={name=total.max, value=3704094719}, 21={name=total.used, value=94644240}, 22={name=uptime, value=testuptime}, 23={name=vendor, value=testvendor}}", resultCollector.toString()); return consumer; }
@Override public void run() { long offset = 0; while (isAlive) { // create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build(); // FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000); // get the message set from the consumer and print them out ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1); Iterator<MessageAndOffset> itr = messages.iterator(); while (itr.hasNext() && isAlive) { MessageAndOffset msg = itr.next(); // advance the offset after consuming each message offset = msg.offset(); logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset); receiveCount++; } } }
/** * Fetches messages from the given topic, partition and offset using the provided {@link SimpleConsumer}. * * @return A {@link ByteBufferMessageSet} of the given topic, partition for messages fetched * * @throws OffsetOutOfRangeException if the given offset is out of range. * @throws NotLeaderForPartitionException if the broker that the consumer is talking to is not the leader * for the given topic and partition. * @throws UnknownTopicOrPartitionException if the topic or partition is not known by the Kafka server * @throws UnknownServerException if the Kafka server responded with error. */ public static ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, String topic, int partition, int fetchSize, long requestOffset) throws KafkaException { FetchRequest req = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topic, partition, requestOffset, fetchSize) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (fetchResponse.hasError()) { throw Errors.forCode(fetchResponse.errorCode(topic, partition)).exception(); } return fetchResponse.messageSet(topic, partition); }
/** * Fetches messages from the given topic, partition and offset using the provided {@link SimpleConsumer}. * * @return A {@link ByteBufferMessageSet} of the given topic, partition for messages fetched * * @throws OffsetOutOfRangeException if the given offset is out of range. * @throws NotLeaderForPartitionException if the broker that the consumer is talking to is not the leader * for the given topic and partition. * @throws UnknownTopicOrPartitionException if the topic or partition is not known by the Kafka server * @throws UnknownServerException if the Kafka server responded with error. */ public static ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, String topic, int partition, int fetchSize, long requestOffset) throws KafkaException { FetchRequest req = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topic, partition, requestOffset, fetchSize) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (fetchResponse.hasError()) { throw Errors.forCode(fetchResponse.errorCode(topic, partition)).exception(); } return fetchResponse.messageSet(topic, partition); }