private FetchResponse<MemoryRecords> fetchResponse(Map<TopicPartition, FetchInfo> fetches) { LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> tpResponses = new LinkedHashMap<>(); for (Map.Entry<TopicPartition, FetchInfo> fetchEntry : fetches.entrySet()) { TopicPartition partition = fetchEntry.getKey(); long fetchOffset = fetchEntry.getValue().offset; int fetchCount = fetchEntry.getValue().count; final MemoryRecords records; if (fetchCount == 0) { records = MemoryRecords.EMPTY; } else { MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, fetchOffset); for (int i = 0; i < fetchCount; i++) builder.append(0L, ("key-" + i).getBytes(), ("value-" + i).getBytes()); records = builder.build(); } tpResponses.put(partition, new FetchResponse.PartitionData<>( Errors.NONE, 0, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records)); } return new FetchResponse<>(Errors.NONE, tpResponses, 0, INVALID_SESSION_ID); }
public FetchResponse(Struct struct) { super(struct); responseData = new HashMap<TopicPartition, PartitionData>(); for (Object topicResponseObj : struct.getArray(RESPONSES_KEY_NAME)) { Struct topicResponse = (Struct) topicResponseObj; String topic = topicResponse.getString(TOPIC_KEY_NAME); for (Object partitionResponseObj : topicResponse.getArray(PARTITIONS_KEY_NAME)) { Struct partitionResponse = (Struct) partitionResponseObj; int partition = partitionResponse.getInt(PARTITION_KEY_NAME); short errorCode = partitionResponse.getShort(ERROR_CODE_KEY_NAME); long highWatermark = partitionResponse.getLong(HIGH_WATERMARK_KEY_NAME); ByteBuffer recordSet = partitionResponse.getBytes(RECORD_SET_KEY_NAME); PartitionData partitionData = new PartitionData(errorCode, highWatermark, recordSet); responseData.put(new TopicPartition(topic, partition), partitionData); } } }
TopicPartition tp = entry.getKey(); long offset = entry.getValue().fetchOffset; responseMap.put(tp, new FetchResponse.PartitionData<>(Errors.NONE, offset + 2L, offset + 2, 0L, null, buildRecords(offset, 2, offset)));
partitions1.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 2L, 2, 0L, null, this.records)); partitions1.put(tp1, new FetchResponse.PartitionData<>(Errors.NONE, 100L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, emptyRecords)); FetchResponse resp1 = new FetchResponse<>(Errors.NONE, partitions1, 0, 123); partitions3.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 100L, 4, 0L, null, this.nextRecords)); FetchResponse resp3 = new FetchResponse<>(Errors.NONE, partitions3, 0, 123);
partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, records)); partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.OFFSET_OUT_OF_RANGE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY)); partitions.put(tp2, new FetchResponse.PartitionData<>(Errors.NONE, 100L, 4, 0L, null, nextRecords)); partitions.put(tp3, new FetchResponse.PartitionData<>(Errors.NONE, 100L, 4, 0L, null, partialRecords)); client.prepareResponse(new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions),
PartitionData<MemoryRecords> partitionData = new PartitionData<>(error, highWatermark, lastStableOffset, logStartOffset, abortedTransactions, records); responseData.put(new TopicPartition(topic, partition), partitionData);
partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, records)); partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.OFFSET_OUT_OF_RANGE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY)); client.prepareResponse(new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions),
partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records)); partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("val".getBytes()))));
expectedBytes += record.sizeInBytes(); fetchPartitionData.put(tp, new FetchResponse.PartitionData<>(Errors.NONE, 15L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
@Test public void testFetchResponseMetricsWithOnePartitionError() { subscriptions.assignFromUser(Utils.mkSet(tp0, tp1)); subscriptions.seek(tp0, 0); subscriptions.seek(tp1, 0); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg)); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); MemoryRecords records = builder.build(); Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = new HashMap<>(); partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records)); partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.OFFSET_OUT_OF_RANGE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, MemoryRecords.EMPTY)); assertEquals(1, fetcher.sendFetches()); client.prepareResponse(new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID)); consumerClient.poll(time.timer(0)); fetcher.fetchedRecords(); int expectedBytes = 0; for (Record record : records.records()) expectedBytes += record.sizeInBytes(); assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON); assertEquals(3, (Double) recordsCountAverage.metricValue(), EPSILON); }
@Test public void testSeekBeforeException() { Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptionsNoAutoReset, new Metrics(time), 2); subscriptionsNoAutoReset.assignFromUser(Utils.mkSet(tp0)); subscriptionsNoAutoReset.seek(tp0, 1); assertEquals(1, fetcher.sendFetches()); Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = new HashMap<>(); partitions.put(tp0, new FetchResponse.PartitionData<>(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, records)); client.prepareResponse(fullFetchResponse(tp0, this.records, Errors.NONE, 100L, 0)); consumerClient.poll(time.timer(0)); assertEquals(2, fetcher.fetchedRecords().get(tp0).size()); subscriptionsNoAutoReset.assignFromUser(Utils.mkSet(tp0, tp1)); subscriptionsNoAutoReset.seek(tp1, 1); assertEquals(1, fetcher.sendFetches()); partitions = new HashMap<>(); partitions.put(tp1, new FetchResponse.PartitionData<>(Errors.OFFSET_OUT_OF_RANGE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY)); client.prepareResponse(new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID)); consumerClient.poll(time.timer(0)); assertEquals(1, fetcher.fetchedRecords().get(tp0).size()); subscriptionsNoAutoReset.seek(tp1, 10); // Should not throw OffsetOutOfRangeException after the seek assertEquals(0, fetcher.fetchedRecords().size()); }
@Test public void testFetchResponseV4() { LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>(); MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10)); List<FetchResponse.AbortedTransaction> abortedTransactions = asList( new FetchResponse.AbortedTransaction(10, 100), new FetchResponse.AbortedTransaction(15, 50) ); responseData.put(new TopicPartition("bar", 0), new FetchResponse.PartitionData<>(Errors.NONE, 100000, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, abortedTransactions, records)); responseData.put(new TopicPartition("bar", 1), new FetchResponse.PartitionData<>(Errors.NONE, 900000, 5, FetchResponse.INVALID_LOG_START_OFFSET, null, records)); responseData.put(new TopicPartition("foo", 0), new FetchResponse.PartitionData<>(Errors.NONE, 70000, 6, FetchResponse.INVALID_LOG_START_OFFSET, Collections.emptyList(), records)); FetchResponse<MemoryRecords> response = new FetchResponse<>(Errors.NONE, responseData, 10, INVALID_SESSION_ID); FetchResponse deserialized = FetchResponse.parse(toBuffer(response.toStruct((short) 4)), (short) 4); assertEquals(responseData, deserialized.responseData()); }
@Override public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { // The error is indicated in two ways: by setting the same error code in all partitions, and by // setting the top-level error code. The form where we set the same error code in all partitions // is needed in order to maintain backwards compatibility with older versions of the protocol // in which there was no top-level error code. Note that for incremental fetch responses, there // may not be any partitions at all in the response. For this reason, the top-level error code // is essential for them. Errors error = Errors.forException(e); LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>(); for (Map.Entry<TopicPartition, PartitionData> entry : fetchData.entrySet()) { FetchResponse.PartitionData<MemoryRecords> partitionResponse = new FetchResponse.PartitionData<>(error, FetchResponse.INVALID_HIGHWATERMARK, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY); responseData.put(entry.getKey(), partitionResponse); } return new FetchResponse<>(error, responseData, throttleTimeMs, metadata.sessionId()); }
@Test public void fetchResponseVersionTest() { LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>(); MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10)); responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData<>( Errors.NONE, 1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records)); FetchResponse<MemoryRecords> v0Response = new FetchResponse<>(Errors.NONE, responseData, 0, INVALID_SESSION_ID); FetchResponse<MemoryRecords> v1Response = new FetchResponse<>(Errors.NONE, responseData, 10, INVALID_SESSION_ID); assertEquals("Throttle time must be zero", 0, v0Response.throttleTimeMs()); assertEquals("Throttle time must be 10", 10, v1Response.throttleTimeMs()); assertEquals("Should use schema version 0", ApiKeys.FETCH.responseSchema((short) 0), v0Response.toStruct((short) 0).schema()); assertEquals("Should use schema version 1", ApiKeys.FETCH.responseSchema((short) 1), v1Response.toStruct((short) 1).schema()); assertEquals("Response data does not match", responseData, v0Response.responseData()); assertEquals("Response data does not match", responseData, v1Response.responseData()); }
private FetchResponse<MemoryRecords> fullFetchResponseWithAbortedTransactions(MemoryRecords records, List<FetchResponse.AbortedTransaction> abortedTransactions, Errors error, long lastStableOffset, long hw, int throttleTime) { Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = Collections.singletonMap(tp0, new FetchResponse.PartitionData<>(error, hw, lastStableOffset, 0L, abortedTransactions, records)); return new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), throttleTime, INVALID_SESSION_ID); }
private FetchResponse<MemoryRecords> createFetchResponse() { LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>(); MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes())); responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData<>(Errors.NONE, 1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records)); List<FetchResponse.AbortedTransaction> abortedTransactions = Collections.singletonList( new FetchResponse.AbortedTransaction(234L, 999L)); responseData.put(new TopicPartition("test", 1), new FetchResponse.PartitionData<>(Errors.NONE, 1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, abortedTransactions, MemoryRecords.EMPTY)); return new FetchResponse<>(Errors.NONE, responseData, 25, INVALID_SESSION_ID); }
private FetchResponse<MemoryRecords> fetchResponse(TopicPartition tp, MemoryRecords records, Errors error, long hw, long lastStableOffset, long logStartOffset, int throttleTime) { Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = Collections.singletonMap(tp, new FetchResponse.PartitionData<>(error, hw, lastStableOffset, logStartOffset, null, records)); return new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), throttleTime, INVALID_SESSION_ID); }
private FetchResponse<MemoryRecords> fullFetchResponse(TopicPartition tp, MemoryRecords records, Errors error, long hw, long lastStableOffset, int throttleTime) { Map<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> partitions = Collections.singletonMap(tp, new FetchResponse.PartitionData<>(error, hw, lastStableOffset, 0L, null, records)); return new FetchResponse<>(Errors.NONE, new LinkedHashMap<>(partitions), throttleTime, INVALID_SESSION_ID); }
private FetchResponse<MemoryRecords> createFetchResponse(int sessionId) { LinkedHashMap<TopicPartition, FetchResponse.PartitionData<MemoryRecords>> responseData = new LinkedHashMap<>(); MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes())); responseData.put(new TopicPartition("test", 0), new FetchResponse.PartitionData<>(Errors.NONE, 1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records)); List<FetchResponse.AbortedTransaction> abortedTransactions = Collections.singletonList( new FetchResponse.AbortedTransaction(234L, 999L)); responseData.put(new TopicPartition("test", 1), new FetchResponse.PartitionData<>(Errors.NONE, 1000000, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, abortedTransactions, MemoryRecords.EMPTY)); return new FetchResponse<>(Errors.NONE, responseData, 25, sessionId); }
RespEntry(String topic, int partition, long highWatermark, long lastStableOffset) { this.part = new TopicPartition(topic, partition); this.data = new FetchResponse.PartitionData<>( Errors.NONE, highWatermark, lastStableOffset, 0, null, null); } }