@Override protected TopicAndPartition createKafkaPartitionHandle(KafkaTopicPartition partition) { return new TopicAndPartition(partition.getTopic(), partition.getPartition()); }
/** * Check whether the topic has partitions undergoing partition reassignment and wait for the reassignments to finish. * * @param zkUtils the ZkUtils class used to check ongoing partition reassignments. * @return Whether there are no ongoing partition reassignments. */ public static boolean ensureTopicNotUnderPartitionReassignment(ZkUtils zkUtils, String topic) { int attempt = 0; while (JavaConversions.asJavaCollection(zkUtils.getPartitionsBeingReassigned().keys()).stream() .anyMatch(tp -> tp.topic().equals(topic))) { try { sleep(1000 << attempt); } catch (InterruptedException e) { // Let it go. } if (++attempt == 10) { return false; } } return true; }
int port = simpleConsumer.port(); simpleConsumer = null; throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e); if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) { long startOffset = getOffset(topic, partition, config.startOffsetTime); offset = startOffset;
@Override public FetchResponse fetch(FetchRequest request) { scala.collection.Traversable<Tuple2<TopicAndPartition, PartitionFetchInfo>> requestInfo = request.requestInfo(); java.util.Map<TopicAndPartition, Short> errorMap = new HashMap<>(); while (requestInfo.headOption().isDefined()) { // jfim: IntelliJ erroneously thinks the following line is an incompatible type error, but it's only because // it doesn't understand scala covariance when called from Java (ie. it thinks head() is of type A even though // it's really of type Tuple2[TopicAndPartition, PartitionFetchInfo]) Tuple2<TopicAndPartition, PartitionFetchInfo> t2 = requestInfo.head(); TopicAndPartition topicAndPartition = t2._1(); PartitionFetchInfo partitionFetchInfo = t2._2(); if (!topicAndPartition.topic().equals(topicName)) { errorMap.put(topicAndPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code()); } else if (partitionLeaderIndices.length < topicAndPartition.partition()) { errorMap.put(topicAndPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code()); } else if (partitionLeaderIndices[topicAndPartition.partition()] != index) { errorMap.put(topicAndPartition, Errors.NOT_LEADER_FOR_PARTITION.code()); } else { // Do nothing, we'll generate a fake message } requestInfo = requestInfo.tail(); } return new MockFetchResponse(errorMap); }
short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode(); log.warn("fetch %s - %s with offset %s encounters error: [%s]", topic, partitionId, offset, errorCode); if (errorCode == ErrorMapping.RequestTimedOutCode()) { log.info("kafka request timed out, response[%s]", response); } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { long newOffset = getOffset(earliest); log.info("got [%s] offset[%s] for [%s][%s]", earliest ? "earliest" : "latest", newOffset, topic, partitionId);
for (KafkaTopicPartitionState<TopicAndPartition> part : partitionStates) { short code; if ((code = response.errorCode(part.getTopic(), part.getPartition())) != ErrorMapping.NoError()) { exception.append("\nException for topic=").append(part.getTopic()) .append(" partition=").append(part.getPartition()).append(": ") .append(ExceptionUtils.stringifyException(ErrorMapping.exceptionFor(code)));
consumerRecords = kafkaConsumer.poll(kafkaPollTimeOut); } catch (KafkaException ke) { logger.error(ke.getMessage(), ke); throw UserException.dataReadError(ke).message(ke.getMessage()).build(logger);
@Override public short errorCode(String topic, int partition) { TopicAndPartition key = new TopicAndPartition(topic, partition); if (errorMap.containsKey(key)) { return errorMap.get(key); } else { return Errors.NONE.code(); } }
/** * A Java transliteration of what the scala implementation does, which unfortunately is declared as private */ protected void flushDirtyLogs() { LOG.debug("Checking for dirty logs to flush..."); final Set<Map.Entry<TopicAndPartition, Log>> entries = JavaConversions.mapAsJavaMap(logManager.logsByTopicPartition()).entrySet(); for (final Map.Entry<TopicAndPartition, Log> topicAndPartitionLogEntry : entries) { final TopicAndPartition topicAndPartition = topicAndPartitionLogEntry.getKey(); final Log kafkaLog = topicAndPartitionLogEntry.getValue(); final long timeSinceLastFlush = JODA_TIME.milliseconds() - kafkaLog.lastFlushTime(); try { LOG.debug( "Checking if flush is needed on {} flush interval {} last flushed {} time since last flush: {}", topicAndPartition.topic(), kafkaLog.config().flushInterval(), kafkaLog.lastFlushTime(), timeSinceLastFlush); if (timeSinceLastFlush >= kafkaLog.config().flushMs()) { kafkaLog.flush(); } } catch (Exception e) { LOG.error("Error flushing topic " + topicAndPartition.topic(), e); } } }
@Override public ByteBufferMessageSet messageSet(String topic, int partition) { if (errorMap.containsKey(new TopicAndPartition(topic, partition))) { throw new IllegalArgumentException(); } else { // TODO Maybe generate dummy messages here? return new ByteBufferMessageSet(Collections.<Message>emptyList()); } }
private FetchRequest createFetchRequest(KafkaPartition partition, long nextOffset) { TopicAndPartition topicAndPartition = new TopicAndPartition(partition.getTopicName(), partition.getId()); PartitionFetchInfo partitionFetchInfo = new PartitionFetchInfo(nextOffset, this.bufferSize); Map<TopicAndPartition, PartitionFetchInfo> fetchInfo = Collections.singletonMap(topicAndPartition, partitionFetchInfo); return new FetchRequest(this.fetchCorrelationId, this.clientName, this.fetchTimeoutMillis, this.fetchMinBytes, fetchInfo); }
private FetchRequest createFetchRequest(KafkaPartition partition, long nextOffset) { TopicAndPartition topicAndPartition = new TopicAndPartition(partition.getTopicName(), partition.getId()); PartitionFetchInfo partitionFetchInfo = new PartitionFetchInfo(nextOffset, this.bufferSize); Map<TopicAndPartition, PartitionFetchInfo> fetchInfo = Collections.singletonMap(topicAndPartition, partitionFetchInfo); return new FetchRequest(this.fetchCorrelationId, this.clientName, this.fetchTimeoutMillis, this.fetchMinBytes, fetchInfo); }
@Override protected long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo = Collections.singletonMap(new TopicAndPartition(partition.getTopicName(), partition.getId()), new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.EarliestTime(), 1)); return getOffset(partition, offsetRequestInfo); }
@Override protected long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo = Collections.singletonMap(new TopicAndPartition(partition.getTopicName(), partition.getId()), new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); return getOffset(partition, offsetRequestInfo); }
@Override public long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo = Collections.singletonMap(new TopicAndPartition(partition.getTopicName(), partition.getId()), new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.EarliestTime(), 1)); return getOffset(partition, offsetRequestInfo); }
@Override public long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException { Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo = Collections.singletonMap(new TopicAndPartition(partition.getTopicName(), partition.getId()), new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1)); return getOffset(partition, offsetRequestInfo); }
public long getOffset(String topic, int partition, long startOffsetTime) { SimpleConsumer simpleConsumer = findLeaderConsumer(partition); if (simpleConsumer == null) { LOG.error("Error consumer is null get offset from partition:" + partition); return -1; } TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1)); OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId()); long[] offsets = simpleConsumer.getOffsetsBefore(request).offsets(topic, partition); if (offsets.length > 0) { return offsets[0]; } else { return NO_OFFSET; } }
private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId) { TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId); // The API implies that this will always return all of the offsets. So it seems a partition can not have // more than Integer.MAX_VALUE-1 segments. // // This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value // should not be 0. PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), Integer.MAX_VALUE); OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId()); OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest); if (offsetResponse.hasError()) { short errorCode = offsetResponse.errorCode(topicName, partitionId); throw new RuntimeException("could not fetch data from Kafka, error code is '" + errorCode + "'"); } return offsetResponse.offsets(topicName, partitionId); }
private long getOffset(boolean earliest) throws InterruptedException { TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put( topicAndPartition, new PartitionOffsetRequestInfo( earliest ? kafka.api.OffsetRequest.EarliestTime() : kafka.api.OffsetRequest.LatestTime(), 1 ) ); OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId); OffsetResponse response; try { response = consumer.getOffsetsBefore(request); } catch (Exception e) { ensureNotInterrupted(e); log.error(e, "caught exception in getOffsetsBefore [%s] - [%s]", topic, partitionId); return -1; } if (response.hasError()) { log.error( "error fetching data Offset from the Broker [%s]. reason: [%s]", leaderBroker.host(), response.errorCode(topic, partitionId) ); return -1; } long[] offsets = response.offsets(topic, partitionId); return earliest ? offsets[0] : offsets[offsets.length - 1]; }
private long findLastOffset(TopicPartition topicPartition, SimpleConsumer consumer) { TopicAndPartition topicAndPartition = new TopicAndPartition(topicPartition.getTopic(), topicPartition.getPartition()); Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo( kafka.api.OffsetRequest.LatestTime(), 1)); final String clientName = getClientName(topicPartition); OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) { throw new RuntimeException("Error fetching offset data. Reason: " + response.errorCode(topicPartition.getTopic(), topicPartition.getPartition())); } long[] offsets = response.offsets(topicPartition.getTopic(), topicPartition.getPartition()); return offsets[0] - 1; }