requestHeader, response.destination()); for (ProducerBatch batch : batches.values()) completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.NETWORK_EXCEPTION), correlationId, now, 0L); } else if (response.versionMismatch() != null) { log.warn("Cancelled request {} due to a version mismatch with node {}", response, response.destination(), response.versionMismatch()); for (ProducerBatch batch : batches.values()) completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.UNSUPPORTED_VERSION), correlationId, now, 0L); } else { log.trace("Received produce response from node {} with correlation id {}", response.destination(), correlationId); completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.NONE), correlationId, now, 0L);
public ProduceResponse(Struct struct) { super(struct); responses = new HashMap<TopicPartition, PartitionResponse>(); for (Object topicResponse : struct.getArray("responses")) { Struct topicRespStruct = (Struct) topicResponse; String topic = topicRespStruct.getString("topic"); for (Object partResponse : topicRespStruct.getArray("partition_responses")) { Struct partRespStruct = (Struct) partResponse; int partition = partRespStruct.getInt("partition"); short errorCode = partRespStruct.getShort("error_code"); long offset = partRespStruct.getLong("base_offset"); TopicPartition tp = new TopicPartition(topic, partition); responses.put(tp, new PartitionResponse(errorCode, offset)); } } }
responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.MESSAGE_TOO_LARGE)); client.respond(new ProduceResponse(responseMap)); assertTrue("Client ready status should be true", client.isReady(node, 0L)); responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 0, txnManager.isTransactional()), new ProduceResponse(responseMap)); assertTrue("Client ready status should be true", client.isReady(node, 0L)); responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 1L, 0L, 0L)); client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 1, txnManager.isTransactional()), new ProduceResponse(responseMap));
ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(Errors.NONE, offset, RecordBatch.NO_TIMESTAMP, 100); Map<TopicPartition, ProduceResponse.PartitionResponse> partResp = new HashMap<>(); partResp.put(tp0, resp);
responseMap.put(tp0, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); client.respond(new ProduceResponse(responseMap));
@Test public void testExpiredBatchDoesNotRetry() throws Exception { long deliverTimeoutMs = 1500L; setupWithTransactionState(null, false, null); // Send first ProduceRequest Future<RecordMetadata> request1 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); // send request assertEquals(1, client.inFlightRequestCount()); time.sleep(deliverTimeoutMs); Map<TopicPartition, ProduceResponse.PartitionResponse> responseMap = new HashMap<>(); responseMap.put(tp0, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); client.respond(produceResponse(tp0, -1, Errors.NOT_LEADER_FOR_PARTITION, -1)); // return a retriable error sender.run(time.milliseconds()); // expire the batch assertTrue(request1.isDone()); assertEquals(0, client.inFlightRequestCount()); assertEquals(0, sender.inFlightBatches(tp0).size()); sender.run(time.milliseconds()); // receive first response and do not reenqueue. assertEquals(0, client.inFlightRequestCount()); assertEquals(0, sender.inFlightBatches(tp0).size()); sender.run(time.milliseconds()); // run again and must not send anything. assertEquals(0, client.inFlightRequestCount()); assertEquals(0, sender.inFlightBatches(tp0).size()); }
@Override public ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e) { /* In case the producer doesn't actually want any response */ if (acks == 0) return null; Errors error = Errors.forException(e); Map<TopicPartition, ProduceResponse.PartitionResponse> responseMap = new HashMap<>(); ProduceResponse.PartitionResponse partitionResponse = new ProduceResponse.PartitionResponse(error); for (TopicPartition tp : partitions()) responseMap.put(tp, partitionResponse); short versionId = version(); switch (versionId) { case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: return new ProduceResponse(responseMap, throttleTimeMs); default: throw new IllegalArgumentException(String.format("Version %d is not valid. Valid versions for %s are 0 to %d", versionId, this.getClass().getSimpleName(), ApiKeys.PRODUCE.latestVersion())); } }
@Test public void testInflightBatchesExpireOnDeliveryTimeout() throws InterruptedException { long deliveryTimeoutMs = 1500L; setupWithTransactionState(null, true, null); // Send first ProduceRequest Future<RecordMetadata> request = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); // send request assertEquals(1, client.inFlightRequestCount()); assertEquals("Expect one in-flight batch in accumulator", 1, sender.inFlightBatches(tp0).size()); Map<TopicPartition, ProduceResponse.PartitionResponse> responseMap = new HashMap<>(); responseMap.put(tp0, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); client.respond(new ProduceResponse(responseMap)); time.sleep(deliveryTimeoutMs); sender.run(time.milliseconds()); // receive first response assertEquals("Expect zero in-flight batch in accumulator", 0, sender.inFlightBatches(tp0).size()); try { request.get(); fail("The expired batch should throw a TimeoutException"); } catch (ExecutionException e) { assertTrue(e.getCause() instanceof TimeoutException); } }
@Test public void produceResponseVersionTest() { Map<TopicPartition, ProduceResponse.PartitionResponse> responseData = new HashMap<>(); responseData.put(new TopicPartition("test", 0), new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100)); ProduceResponse v0Response = new ProduceResponse(responseData); ProduceResponse v1Response = new ProduceResponse(responseData, 10); ProduceResponse v2Response = new ProduceResponse(responseData, 10); assertEquals("Throttle time must be zero", 0, v0Response.throttleTimeMs()); assertEquals("Throttle time must be 10", 10, v1Response.throttleTimeMs()); assertEquals("Throttle time must be 10", 10, v2Response.throttleTimeMs()); assertEquals("Should use schema version 0", ApiKeys.PRODUCE.responseSchema((short) 0), v0Response.toStruct((short) 0).schema()); assertEquals("Should use schema version 1", ApiKeys.PRODUCE.responseSchema((short) 1), v1Response.toStruct((short) 1).schema()); assertEquals("Should use schema version 2", ApiKeys.PRODUCE.responseSchema((short) 2), v2Response.toStruct((short) 2).schema()); assertEquals("Response data does not match", responseData, v0Response.responses()); assertEquals("Response data does not match", responseData, v1Response.responses()); assertEquals("Response data does not match", responseData, v2Response.responses()); }
@Test public void produceResponseV5Test() { Map<TopicPartition, ProduceResponse.PartitionResponse> responseData = new HashMap<>(); TopicPartition tp0 = new TopicPartition("test", 0); responseData.put(tp0, new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100)); ProduceResponse v5Response = new ProduceResponse(responseData, 10); short version = 5; ByteBuffer buffer = v5Response.serialize(version, new ResponseHeader(0)); buffer.rewind(); ResponseHeader.parse(buffer); // throw away. Struct deserializedStruct = ApiKeys.PRODUCE.parseResponse(version, buffer); ProduceResponse v5FromBytes = (ProduceResponse) AbstractResponse.parseResponse(ApiKeys.PRODUCE, deserializedStruct, version); assertEquals(1, v5FromBytes.responses().size()); assertTrue(v5FromBytes.responses().containsKey(tp0)); ProduceResponse.PartitionResponse partitionResponse = v5FromBytes.responses().get(tp0); assertEquals(100, partitionResponse.logStartOffset); assertEquals(10000, partitionResponse.baseOffset); assertEquals(10, v5FromBytes.throttleTimeMs()); assertEquals(responseData, v5Response.responses()); }
/** * Constructor from a {@link Struct}. */ public ProduceResponse(Struct struct) { responses = new HashMap<>(); for (Object topicResponse : struct.getArray(RESPONSES_KEY_NAME)) { Struct topicRespStruct = (Struct) topicResponse; String topic = topicRespStruct.get(TOPIC_NAME); for (Object partResponse : topicRespStruct.getArray(PARTITION_RESPONSES_KEY_NAME)) { Struct partRespStruct = (Struct) partResponse; int partition = partRespStruct.get(PARTITION_ID); Errors error = Errors.forCode(partRespStruct.get(ERROR_CODE)); long offset = partRespStruct.getLong(BASE_OFFSET_KEY_NAME); long logAppendTime = partRespStruct.getLong(LOG_APPEND_TIME_KEY_NAME); long logStartOffset = partRespStruct.getOrElse(LOG_START_OFFSET_FIELD, INVALID_OFFSET); TopicPartition tp = new TopicPartition(topic, partition); responses.put(tp, new PartitionResponse(error, offset, logAppendTime, logStartOffset)); } } this.throttleTimeMs = struct.getOrElse(THROTTLE_TIME_MS, DEFAULT_THROTTLE_TIME); }
private ProduceResponse produceResponse(Map<TopicPartition, OffsetAndError> responses) { Map<TopicPartition, ProduceResponse.PartitionResponse> partResponses = new LinkedHashMap<>(); for (Map.Entry<TopicPartition, OffsetAndError> entry : responses.entrySet()) { ProduceResponse.PartitionResponse response = new ProduceResponse.PartitionResponse(entry.getValue().error, entry.getValue().offset, RecordBatch.NO_TIMESTAMP, -1); partResponses.put(entry.getKey(), response); } return new ProduceResponse(partResponses); } private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors error, int throttleTimeMs) {
private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors error, int throttleTimeMs) { ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(error, offset, RecordBatch.NO_TIMESTAMP, 10); Map<TopicPartition, ProduceResponse.PartitionResponse> partResp = singletonMap(tp, resp); return new ProduceResponse(partResp, throttleTimeMs); }
private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors error, int throttleTimeMs, long logStartOffset) { ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(error, offset, RecordBatch.NO_TIMESTAMP, logStartOffset); Map<TopicPartition, ProduceResponse.PartitionResponse> partResp = Collections.singletonMap(tp, resp); return new ProduceResponse(partResp, throttleTimeMs); }
private ProduceResponse createProduceResponse() { Map<TopicPartition, ProduceResponse.PartitionResponse> responseData = new HashMap<>(); responseData.put(new TopicPartition("test", 0), new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100)); return new ProduceResponse(responseData, 0); }