private void resetOffsetPosition(TopicPartition tp) { OffsetResetStrategy strategy = subscriptions.resetStrategy(tp); Long offset; if (strategy == OffsetResetStrategy.EARLIEST) { offset = beginningOffsets.get(tp); if (offset == null) throw new IllegalStateException("MockConsumer didn't have beginning offset specified, but tried to seek to beginning"); } else if (strategy == OffsetResetStrategy.LATEST) { offset = getEndOffset(endOffsets.get(tp)); if (offset == null) throw new IllegalStateException("MockConsumer didn't have end offset specified, but tried to seek to end"); } else { throw new NoOffsetForPartitionException(tp); } seek(tp, offset); }
protected void handleNoOffsetForPartitionException(NoOffsetForPartitionException e, AbstractKafkaConsumer consumer) { // if initialOffset is set to EARLIST or LATEST // and the application is run as first time // then there is no existing committed offset and this error will be caught // we need to seek to either beginning or end of the partition // based on the initial offset setting AbstractKafkaInputOperator.InitialOffset io = AbstractKafkaInputOperator.InitialOffset.valueOf(ownerOperator.getInitialOffset()); if (io == AbstractKafkaInputOperator.InitialOffset.APPLICATION_OR_EARLIEST || io == AbstractKafkaInputOperator.InitialOffset.EARLIEST) { consumer.seekToBeginning(e.partitions().toArray(new TopicPartition[0])); } else { consumer.seekToEnd(e.partitions().toArray(new TopicPartition[0])); } }
protected void handleNoOffsetForPartitionException(NoOffsetForPartitionException e, AbstractKafkaConsumer consumer) { // if initialOffset is set to EARLIST or LATEST // and the application is run as first time // then there is no existing committed offset and this error will be caught // we need to seek to either beginning or end of the partition // based on the initial offset setting AbstractKafkaInputOperator.InitialOffset io = AbstractKafkaInputOperator.InitialOffset.valueOf(ownerOperator.getInitialOffset()); if (io == AbstractKafkaInputOperator.InitialOffset.APPLICATION_OR_EARLIEST || io == AbstractKafkaInputOperator.InitialOffset.EARLIEST) { consumer.seekToBeginning(e.partitions().toArray(new TopicPartition[0])); } else { consumer.seekToEnd(e.partitions().toArray(new TopicPartition[0])); } }
public void resetMissingPositions() { final Set<TopicPartition> partitionsWithNoOffsets = new HashSet<>(); assignment.stream().forEach(state -> { TopicPartition tp = state.topicPartition(); TopicPartitionState partitionState = state.value(); if (partitionState.isMissingPosition()) { if (defaultResetStrategy == OffsetResetStrategy.NONE) partitionsWithNoOffsets.add(tp); else partitionState.reset(defaultResetStrategy); } }); if (!partitionsWithNoOffsets.isEmpty()) throw new NoOffsetForPartitionException(partitionsWithNoOffsets); }
@Test @SuppressWarnings("unchecked") public void whenReadEventsThenNakadiRuntimeBaseException() { // ARRANGE // final ImmutableList<RuntimeException> exceptions = ImmutableList.of(new NoOffsetForPartitionException( new TopicPartition("", 0)), new KafkaException()); int numberOfNakadiRuntimeBaseExceptions = 0; for (final Exception exception : exceptions) { final KafkaConsumer<byte[], byte[]> kafkaConsumerMock = mock(KafkaConsumer.class); when(kafkaConsumerMock.poll(POLL_TIMEOUT)).thenThrow(exception); try { // ACT // final NakadiKafkaConsumer consumer = new NakadiKafkaConsumer(kafkaConsumerMock, ImmutableList.of(), createTpTimelineMap(), POLL_TIMEOUT); consumer.readEvents(); // ASSERT // fail("An Exception was expected to be be thrown"); } catch (final Exception e) { numberOfNakadiRuntimeBaseExceptions++; } } assertThat("We should get a NakadiBaseException for every call", numberOfNakadiRuntimeBaseExceptions, equalTo(exceptions.size())); }
@Override public void seekToCommitted(Collection<TopicPartition> partitions) { for (TopicPartition tp : partitions) { OffsetAndMetadata offsetAndMetadata = _kafkaConsumer.committed(tp); if (offsetAndMetadata == null) { throw new NoOffsetForPartitionException(tp); } _kafkaConsumer.seek(tp, offsetAndMetadata.offset()); _consumerRecordsProcessor.clear(tp); Long hw = LiKafkaClientsUtils.offsetFromWrappedMetadata(offsetAndMetadata.metadata()); if (hw == null) { hw = offsetAndMetadata.offset(); } _consumerRecordsProcessor.setPartitionConsumerHighWaterMark(tp, hw); } }