short code = fetchResponse.errorCode(fp.getTopic(), fp.getPartition()); if (code == ErrorMapping.OffsetOutOfRangeCode()) { else if (code == ErrorMapping.NotLeaderForPartitionCode() || code == ErrorMapping.LeaderNotAvailableCode() || code == ErrorMapping.BrokerNotAvailableCode() || code == ErrorMapping.UnknownCode()) { else if (code != ErrorMapping.NoError()) { exception += "\nException for " + fp.getTopic() + ":" + fp.getPartition() + ": " + ExceptionUtils.stringifyException(ErrorMapping.exceptionFor(code));
if (item.errorCode() != ErrorMapping.NoError()) { seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());
short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode(); log.warn("fetch %s - %s with offset %s encounters error: [%s]", topic, partitionId, offset, errorCode); if (errorCode == ErrorMapping.RequestTimedOutCode()) { log.info("kafka request timed out, response[%s]", response); } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { long newOffset = getOffset(earliest); log.info("got [%s] offset[%s] for [%s][%s]", earliest ? "earliest" : "latest", newOffset, topic, partitionId);
if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) { long startOffset = getOffset(topic, partition, config.startOffsetTime); offset = startOffset;
KafkaPartition kafkaPartition = iterator.next(); short errorCode = fetchResponse.errorCode(consumer.topic, kafkaPartition.getPartitionId()); if (fetchResponse.hasError() && errorCode != ErrorMapping.NoError()) { ErrorMapping.exceptionFor(errorCode)); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { long seekTo = consumer.initialOffset.toLowerCase().equals("earliest") ? OffsetRequest.EarliestTime() : OffsetRequest.LatestTime();
} else if (!createMessageDecoder(context, topicMetadata.topic())) { log.info("Discarding topic (Decoder generation failed) : " + topicMetadata.topic()); } else if (topicMetadata.errorCode() != ErrorMapping.NoError()) { log.info("Skipping the creation of ETL request for Whole Topic : " + topicMetadata.topic() + " Exception : " + ErrorMapping.exceptionFor(topicMetadata.errorCode())); } else { for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { NUM_TRIES_PARTITION_METADATA); if (partitionMetadata.errorCode() == ErrorMapping.LeaderNotAvailableCode()) { log.info("Skipping the creation of ETL request for Topic : " + topicMetadata.topic() + " and Partition : " + partitionMetadata.partitionId() + " Exception : " + ErrorMapping.exceptionFor(partitionMetadata.errorCode())); reportJobFailureDueToLeaderNotAvailable = true; } else { if (partitionMetadata.errorCode() != ErrorMapping.NoError()) { log.warn("Receiving non-fatal error code, Continuing the creation of ETL request for Topic : " + topicMetadata.topic() + " and Partition : " + partitionMetadata.partitionId() + " Exception : " + ErrorMapping.exceptionFor(partitionMetadata.errorCode()));
private Map<String, TopicVO> getTopicMetadata(BlockingChannel channel, String... topics) { final TopicMetadataRequest request = new TopicMetadataRequest((short) 0, 0, clientId(), Arrays.asList(topics)); LOG.debug("Sending topic metadata request: {}", request); channel.send(request); final kafka.api.TopicMetadataResponse underlyingResponse = kafka.api.TopicMetadataResponse.readFrom(channel.receive().buffer()); LOG.debug("Received topic metadata response: {}", underlyingResponse); TopicMetadataResponse response = new TopicMetadataResponse(underlyingResponse); return response.topicsMetadata().stream() .filter(tmd -> tmd.errorCode() == ErrorMapping.NoError()) .map(this::processTopicMetadata) .collect(Collectors.toMap(TopicVO::getName, t -> t)); }
try { short errorCode = adminUtilsOperation.errorCodeFromTopicMetadata(topicName, zkUtils); if (errorCode == ErrorMapping.NoError()) { else if (errorCode == ErrorMapping.UnknownTopicOrPartitionCode()) { ErrorMapping.exceptionFor(errorCode));
if (fetchResponseCode == ErrorMapping.OffsetOutOfRangeCode()) { this.emittingOffset = consumer.getOffset(config.topic, partition, kafka.api.OffsetRequest.LatestTime()); } else if (fetchResponseCode == ErrorMapping.NotLeaderForPartitionCode()) { consumer.setConsumer(null);
expect(partitionMetadata1.errorCode()).andReturn(ErrorMapping.LeaderNotAvailableCode()); expect(partitionMetadata1.partitionId()).andReturn(0); replay(partitionMetadata1); expect(partitionMetadata2.errorCode()).andReturn(ErrorMapping.InvalidMessageCode()); expect(partitionMetadata2.partitionId()).andReturn(0); replay(partitionMetadata2); expect(mockedReturnedPartitionMetadata.errorCode()).andReturn(ErrorMapping.NoError()); expect(mockedReturnedPartitionMetadata.partitionId()).andReturn(0); replay(mockedReturnedPartitionMetadata);
ConsumerMetadataResponse metadataResponse = ConsumerMetadataResponse.readFrom(channel.receive().buffer()); if (metadataResponse.errorCode() == ErrorMapping.NoError()) { Broker offsetManager = metadataResponse.coordinator(); if (commitResponse.hasError()) { for (final Object partitionErrorCode : commitResponse.errors().values()) { if (((int) partitionErrorCode == ErrorMapping.OffsetMetadataTooLargeCode())) { } else if ((int) partitionErrorCode == ErrorMapping.NotCoordinatorForConsumerCode() || (int) partitionErrorCode == ErrorMapping.ConsumerCoordinatorNotAvailableCode()) { channel.disconnect();
ConsumerMetadataResponse metadataResponse = ConsumerMetadataResponse.readFrom(channel.receive().buffer()); if (metadataResponse.errorCode() == ErrorMapping.NoError()) { Broker offsetManager = metadataResponse.coordinator(); OffsetMetadataAndError result = fetchResponse.offsets().get(partitionToRead); short offsetFetchErrorCode = result.error(); if (offsetFetchErrorCode == ErrorMapping.NotCoordinatorForConsumerCode()) { channel.disconnect();
public PartitionMetadata refreshPartitionMetadataOnLeaderNotAvailable(PartitionMetadata partitionMetadata, TopicMetadata topicMetadata, JobContext context, int numTries) throws InterruptedException { int tryCounter = 0; while (tryCounter < numTries && partitionMetadata.errorCode() == ErrorMapping.LeaderNotAvailableCode()) { log.info("Retry to referesh the topicMetadata on LeaderNotAvailable..."); List<TopicMetadata> topicMetadataList = if (metadataPerPartition.partitionId() == partitionMetadata.partitionId()) { partitionFound = true; if (metadataPerPartition.errorCode() != ErrorMapping.LeaderNotAvailableCode()) { return metadataPerPartition; } else { //retry again.
if (item.errorCode() != ErrorMapping.NoError()) { seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());
LOG.info("Failed to fetch message on {}. Error: {}", topicPart, errorCode); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { offset.set(kafka.api.OffsetRequest.EarliestTime());
KafkaPartition kafkaPartition = iterator.next(); short errorCode = fetchResponse.errorCode(consumer.topic, kafkaPartition.getPartitionId()); if (fetchResponse.hasError() && errorCode != ErrorMapping.NoError()) { ErrorMapping.exceptionFor(errorCode)); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { long seekTo = consumer.initialOffset.toLowerCase().equals("earliest") ? OffsetRequest.EarliestTime() : OffsetRequest.LatestTime();
} else if (!createMessageDecoder(context, topicMetadata.topic())) { log.info("Discarding topic (Decoder generation failed) : " + topicMetadata.topic()); } else if (topicMetadata.errorCode() != ErrorMapping.NoError()) { log.info("Skipping the creation of ETL request for Whole Topic : " + topicMetadata.topic() + " Exception : " + ErrorMapping.exceptionFor(topicMetadata.errorCode())); } else { for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) { NUM_TRIES_PARTITION_METADATA); if (partitionMetadata.errorCode() == ErrorMapping.LeaderNotAvailableCode()) { log.info("Skipping the creation of ETL request for Topic : " + topicMetadata.topic() + " and Partition : " + partitionMetadata.partitionId() + " Exception : " + ErrorMapping.exceptionFor(partitionMetadata.errorCode())); reportJobFailureDueToLeaderNotAvailable = true; } else { if (partitionMetadata.errorCode() != ErrorMapping.NoError()) { log.warn("Receiving non-fatal error code, Continuing the creation of ETL request for Topic : " + topicMetadata.topic() + " and Partition : " + partitionMetadata.partitionId() + " Exception : " + ErrorMapping.exceptionFor(partitionMetadata.errorCode()));
.filter(entry -> entry.getValue().error() == ErrorMapping.NoError()) .collect(Collectors.toMap(entry -> entry.getKey().partition(), entry -> entry.getValue().offset()));
/** * Test only refreshing the paritionMetadata when the error code is LeaderNotAvailable. * @throws Exception */ @Test public void testRefreshPartitioMetadataWithThreeRetries() throws Exception { JobContext dummyContext = null; //A partitionMetadata with errorCode LeaderNotAvailable PartitionMetadata partitionMetadata = createMock(PartitionMetadata.class); expect(partitionMetadata.errorCode()).andReturn(ErrorMapping.LeaderNotAvailableCode()).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA * 2); expect(partitionMetadata.partitionId()).andReturn(0).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA * 2); replay(partitionMetadata); TopicMetadata mockedTopicMetadata = createMock(TopicMetadata.class); expect(mockedTopicMetadata.topic()).andReturn("testTopic").times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA); expect(mockedTopicMetadata.partitionsMetadata()).andReturn(Collections.singletonList(partitionMetadata)).times( EtlInputFormat.NUM_TRIES_PARTITION_METADATA); replay(mockedTopicMetadata); EtlInputFormat etlInputFormat = createMock(EtlInputFormat.class, EtlInputFormat.class.getMethod("getKafkaMetadata", new Class[] { JobContext.class, List.class })); EasyMock.expect(etlInputFormat.getKafkaMetadata(dummyContext, Collections.singletonList("testTopic"))).andReturn( Collections.singletonList(mockedTopicMetadata)).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA); etlInputFormat.setLogger(Logger.getLogger(getClass())); replay(etlInputFormat); etlInputFormat.refreshPartitionMetadataOnLeaderNotAvailable(partitionMetadata, mockedTopicMetadata, dummyContext, EtlInputFormat.NUM_TRIES_PARTITION_METADATA); verify(mockedTopicMetadata); verify(etlInputFormat); }
short code = fetchResponse.errorCode(fp.getTopic(), fp.getPartition()); if (code == ErrorMapping.OffsetOutOfRangeCode()) { else if (code == ErrorMapping.NotLeaderForPartitionCode() || code == ErrorMapping.LeaderNotAvailableCode() || code == ErrorMapping.BrokerNotAvailableCode() || code == ErrorMapping.UnknownCode()) else if (code != ErrorMapping.NoError()) { exception += "\nException for " + fp.getTopic() +":"+ fp.getPartition() + ": " + ExceptionUtils.stringifyException(ErrorMapping.exceptionFor(code));