Refine search
FetchResponse fetchResponse = _simpleConsumer.fetch( new FetchRequestBuilder().minBytes(100000).maxWait(timeoutMillis) .addFetch(_topic, _partition, startOffset, 500000).build()); if (!fetchResponse.hasError()) { final Iterable<MessageAndOffset> messageAndOffsetIterable = buildOffsetFilteringIterable(fetchResponse.messageSet(_topic, _partition), startOffset, endOffset); throw exceptionForKafkaErrorCode(fetchResponse.errorCode(_topic, _partition));
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest, KafkaPartition partition) { SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort()); FetchResponse fetchResponse = consumer.fetch(fetchRequest); if (fetchResponse.hasError()) { throw new RuntimeException( String.format("error code %d", fetchResponse.errorCode(partition.getTopicName(), partition.getId()))); } return fetchResponse; }
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException { FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes) .maxWait(config.fetchWaitMaxMs).build(); FetchResponse fetchResponse = null; fetchResponse = simpleConsumer.fetch(req); } catch (Exception e) { if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException LOG.warn("Network error when fetching messages:", e); if (simpleConsumer != null) { String host = simpleConsumer.host(); int port = simpleConsumer.port(); simpleConsumer = null; throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e); if (fetchResponse.hasError()) { short code = fetchResponse.errorCode(topic, partition); if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) { long startOffset = getOffset(topic, partition, config.startOffsetTime); ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition); return msgs;
consumer = new SimpleConsumer("127.0.0.1", cluster.getKafkaServerPort(0), DEFAULT_SO_TIMEOUT, DEFAULT_BUFFER_SIZE, "foo"); FetchRequest req = new FetchRequestBuilder().addFetch(sinkTopic, 0, 0, DEFAULT_BUFFER_SIZE).build(); FetchResponse resp = consumer.fetch(req); ByteBufferMessageSet ms = resp.messageSet(sinkTopic, 0); assertTrue(ms.validBytes() > 0); } finally { if (consumer != null) { consumer.close();
SimpleConsumer consumer = new SimpleConsumer(broker.getHost(), broker.getPort(), 10000, 100000, ""); final FetchRequestBuilder fetchRequestBuilder = new FetchRequestBuilder() .clientId("KafDrop") .maxWait(5000) // todo: make configurable .minBytes(1); .build(); FetchResponse fetchResponse = consumer.fetch(fetchRequest); final ByteBufferMessageSet messageSet = fetchResponse.messageSet(topicName, partitionId); if (messageSet.validBytes() <= 0) break;
fetchResponse = simpleConsumer.fetch(fetchRequest); if (fetchResponse.hasError()) { log.info("Error encountered during a fetch request from Kafka"); log.info("Error Code generated : " + fetchResponse.errorCode(kafkaRequest.getTopic(), kafkaRequest.getPartition())); return false; } else { ByteBufferMessageSet messageBuffer = fetchResponse.messageSet( kafkaRequest.getTopic(), kafkaRequest.getPartition()); lastFetchTime = (System.currentTimeMillis() - tempTime);
if (response.hasError()) { short errorCode = response.errorCode(topicPart.getTopic(), topicPart.getPartition()); LOG.info("Failed to fetch message on {}. Error: {}", topicPart, errorCode); ByteBufferMessageSet messages = response.messageSet(topicPart.getTopic(), topicPart.getPartition()); if (sleepIfEmpty(messages)) { continue;
private Iterator<MessageAndOffset> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) { try { ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId()); return messageBuffer.iterator(); } catch (Exception e) { LOG.warn(String.format("Failed to retrieve next message buffer for partition %s: %s." + "The remainder of this partition will be skipped.", partition, e)); return null; } }
private FetchResponse mockFetchResponse(List<MyMessage> myMessages) { FetchResponse fetchResponse = EasyMock.createMock(FetchResponse.class); EasyMock.expect(fetchResponse.hasError()).andReturn(false).times(1); List<Message> messages = new ArrayList<Message>(); for (MyMessage myMessage:myMessages) { String payload = gson.toJson(myMessage); String msgKey = Integer.toString(PARTITION_1_ID); Message message = new Message(payload.getBytes(), msgKey.getBytes()); messages.add(message); } ByteBufferMessageSet messageSet = new ByteBufferMessageSet(messages); EasyMock.expect(fetchResponse.messageSet(EasyMock.anyString(), EasyMock.anyInt())).andReturn(messageSet).times(1); mocks.add(fetchResponse); return fetchResponse; }
private ByteBufferMessageSet fetchMessageSet(long fetchOffset) throws OffsetOutOfRangeException { Preconditions.checkArgument(fetchOffset >= 0, String.format("Illegal fetch offset %d", fetchOffset)); int failureCount = 0; while (true) { SimpleConsumer consumer = getConsumer(); FetchRequest req = new FetchRequestBuilder() .clientId(clientName) .addFetch(topic, partition, fetchOffset, BUFFER_SIZE_BYTES) .maxWait(fetchTimeoutMs) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (!fetchResponse.hasError()) { return fetchResponse.messageSet(topic, partition); } short errorCode = fetchResponse.errorCode(topic, partition); if (++failureCount >= MAX_KAFKA_FETCH_RETRIES) { throw new RuntimeException( String.format("Error fetching data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, errorCode)); } LOG.warn("Error fetching data from broker {}:{} for topic {}, partition {}. Error code: {}", consumer.host(), consumer.port(), topic, partition, errorCode); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { throw new OffsetOutOfRangeException(String.format( "Requested offset %d is out of range for topic %s partition %d", fetchOffset, topic, partition)); } closeConsumer(); } }
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest, KafkaPartition partition) { SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort()); FetchResponse fetchResponse = consumer.fetch(fetchRequest); if (fetchResponse.hasError()) { throw new RuntimeException(String.format("error code %d", fetchResponse.errorCode(partition.getTopicName(), partition.getId()))); } return fetchResponse; }
if (response.hasError()) { short errorCode = response.errorCode(topicPart.getTopic(), topicPart.getPartition()); LOG.info("Failed to fetch message on {}. Error: {}", topicPart, errorCode); ByteBufferMessageSet messages = response.messageSet(topicPart.getTopic(), topicPart.getPartition()); if (sleepIfEmpty(messages)) { continue;
private Iterator<KafkaConsumerRecord> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) { try { ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId()); return Iterators.transform(messageBuffer.iterator(), new Function<kafka.message.MessageAndOffset, KafkaConsumerRecord>() { @Override public KafkaConsumerRecord apply(kafka.message.MessageAndOffset input) { return new Kafka08ConsumerRecord(input); } }); } catch (Exception e) { log.warn(String.format("Failed to retrieve next message buffer for partition %s: %s." + "The remainder of this partition will be skipped.", partition, e)); return null; } }
ensureConsumer(previousLeader); FetchRequest request = new FetchRequestBuilder() .clientId(clientId) .addFetch(topic, partitionId, offset, FETCH_SIZE) .maxWait(timeoutMs) .minBytes(1) response = consumer.fetch(request); if (response == null || response.hasError()) { short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode(); log.warn("fetch %s - %s with offset %s encounters error: [%s]", topic, partitionId, offset, errorCode); return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) : EMPTY_MSGS;
private ByteBufferMessageSet fetchMessageSet(long fetchOffset) throws OffsetOutOfRangeException { Preconditions.checkArgument(fetchOffset >= 0, String.format("Illegal fetch offset %d", fetchOffset)); int failureCount = 0; while (true) { SimpleConsumer consumer = getConsumer(); FetchRequest req = new FetchRequestBuilder() .clientId(clientName) .addFetch(topic, partition, fetchOffset, BUFFER_SIZE_BYTES) .maxWait(fetchTimeoutMs) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (!fetchResponse.hasError()) { return fetchResponse.messageSet(topic, partition); } short errorCode = fetchResponse.errorCode(topic, partition); if (++failureCount >= MAX_KAFKA_FETCH_RETRIES) { throw new RuntimeException( String.format("Error fetching data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, errorCode)); } LOG.warn("Error fetching data from broker {}:{} for topic {}, partition {}. Error code: {}", consumer.host(), consumer.port(), topic, partition, errorCode); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { throw new OffsetOutOfRangeException(String.format( "Requested offset %d is out of range for topic %s partition %d", fetchOffset, topic, partition)); } closeConsumer(); } }
private boolean refreshTopicMetadataAndRetryFetch(FetchRequest fetchRequest, long tempTime) { try { refreshTopicMetadata(); FetchResponse fetchResponse = simpleConsumer.fetch(fetchRequest); if (fetchResponse.hasError()) { log.warn("Error encountered during fetch request retry from Kafka"); log.warn("Error Code generated : " + fetchResponse.errorCode(kafkaRequest.getTopic(), kafkaRequest.getPartition())); return false; } return processFetchResponse(fetchResponse, tempTime); } catch (Exception e) { log.info("Exception generated during fetch for topic " + kafkaRequest.getTopic() + ". This topic will be skipped."); return false; } }
private boolean processFetchResponse(FetchResponse fetchResponse, long tempTime) { try { ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(kafkaRequest.getTopic(), kafkaRequest.getPartition()); lastFetchTime = (System.currentTimeMillis() - tempTime); log.debug("Time taken to fetch : " + (lastFetchTime / 1000) + " seconds");
if (messageAndOffsetIterator == null) { log.debug("Fetching %d bytes from offset %d (%d - %d). %d messages read so far", KAFKA_READ_BUFFER_SIZE, cursorOffset, split.getStart(), split.getEnd(), totalMessages); FetchRequest req = new FetchRequestBuilder() .clientId("presto-worker-" + Thread.currentThread().getName()) .addFetch(split.getTopicName(), split.getPartitionId(), cursorOffset, KAFKA_READ_BUFFER_SIZE) .build(); FetchResponse fetchResponse = consumer.fetch(req); if (fetchResponse.hasError()) { short errorCode = fetchResponse.errorCode(split.getTopicName(), split.getPartitionId()); log.warn("Fetch response has error: %d", errorCode); throw new RuntimeException("could not fetch data from Kafka, error code is '" + errorCode + "'"); messageAndOffsetIterator = fetchResponse.messageSet(split.getTopicName(), split.getPartitionId()).iterator();
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException { FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes) .maxWait(config.fetchWaitMaxMs).build(); FetchResponse fetchResponse = null; fetchResponse = simpleConsumer.fetch(req); } catch (Exception e) { if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException String host = simpleConsumer.host(); int port = simpleConsumer.port(); simpleConsumer = null; throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e); if (fetchResponse.hasError()) { fetchResponseCode = fetchResponse.errorCode(topic, partition); if (fetchResponseCode == ErrorMapping.OffsetOutOfRangeCode()) { ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition); return msgs;