public void run() { for (int i = 0; i < msgs; i++) { try { accum.append(new TopicPartition(topic, i % numParts), 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); } catch (Exception e) { e.printStackTrace(); } } } });
@Override public void onCompletion(RecordMetadata metadata, Exception exception) { if (exception instanceof TimeoutException) { expiryCallbackCount.incrementAndGet(); try { accumulator.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); } catch (InterruptedException e) { throw new RuntimeException("Unexpected interruption", e); } } else if (exception != null) unexpectedException.compareAndSet(null, exception); } };
private void assertSendFailure(Class<? extends RuntimeException> expectedError) throws Exception { Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); assertTrue(future.isDone()); try { future.get(); fail("Future should have raised " + expectedError.getSimpleName()); } catch (ExecutionException e) { assertTrue(expectedError.isAssignableFrom(e.getCause().getClass())); } }
@Test public void testPartialDrain() throws Exception { RecordAccumulator accum = createTestRecordAccumulator( 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, 10L); int appends = 1024 / msgSize + 1; List<TopicPartition> partitions = asList(tp1, tp2); for (TopicPartition tp : partitions) { for (int i = 0; i < appends; i++) accum.append(tp, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); } assertEquals("Partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); List<ProducerBatch> batches = accum.drain(cluster, Collections.singleton(node1), 1024, 0).get(node1.id()); assertEquals("But due to size bound only one partition should have been retrieved", 1, batches.size()); }
private int prepareSplitBatches(RecordAccumulator accum, long seed, int recordSize, int numRecords) throws InterruptedException { Random random = new Random(); random.setSeed(seed); // First set the compression ratio estimation to be good. CompressionRatioEstimator.setEstimation(tp1.topic(), CompressionType.GZIP, 0.1f); // Append 20 records of 100 bytes size with poor compression ratio should make the batch too big. for (int i = 0; i < numRecords; i++) { accum.append(tp1, 0L, null, bytesWithPoorCompression(random, recordSize), Record.EMPTY_HEADERS, null, 0); } RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertFalse(result.readyNodes.isEmpty()); Map<Integer, List<ProducerBatch>> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals(1, batches.size()); assertEquals(1, batches.values().iterator().next().size()); ProducerBatch batch = batches.values().iterator().next().get(0); int numSplitBatches = accum.splitAndReenqueue(batch); accum.deallocate(batch); return numSplitBatches; }
@Test public void testResetNextBatchExpiry() throws Exception { client = spy(new MockClient(time, metadata)); setupWithTransactionState(null); accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT); sender.run(time.milliseconds()); sender.run(time.milliseconds()); time.setCurrentTimeMs(time.milliseconds() + accumulator.getDeliveryTimeoutMs() + 1); sender.run(time.milliseconds()); InOrder inOrder = inOrder(client); inOrder.verify(client, atLeastOnce()).ready(any(), anyLong()); inOrder.verify(client, atLeastOnce()).newClientRequest(anyString(), any(), anyLong(), anyBoolean(), anyInt(), any()); inOrder.verify(client, atLeastOnce()).send(any(), anyLong()); inOrder.verify(client).poll(eq(0L), anyLong()); inOrder.verify(client).poll(eq(accumulator.getDeliveryTimeoutMs()), anyLong()); inOrder.verify(client).poll(geq(1L), anyLong()); }
@Test public void testLinger() throws Exception { long lingerMs = 10L; RecordAccumulator accum = createTestRecordAccumulator( 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, CompressionType.NONE, lingerMs); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("No partitions should be ready", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); time.sleep(10); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes); List<ProducerBatch> batches = accum.drain(cluster, Collections.singleton(node1), Integer.MAX_VALUE, 0).get(node1.id()); assertEquals(1, batches.size()); ProducerBatch batch = batches.get(0); Iterator<Record> iter = batch.records().records().iterator(); Record record = iter.next(); assertEquals("Keys should match", ByteBuffer.wrap(key), record.key()); assertEquals("Values should match", ByteBuffer.wrap(value), record.value()); assertFalse("No more records", iter.hasNext()); }
private void verifyAddPartitionsFailsWithPartitionLevelError(final Errors error) throws InterruptedException { final long pid = 1L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(tp0); Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future; assertFalse(responseFuture.isDone()); prepareAddPartitionsToTxn(tp0, error); sender.run(time.milliseconds()); // attempt send addPartitions. assertTrue(transactionManager.hasError()); assertFalse(transactionManager.transactionContainsPartition(tp0)); }
@Test(expected = ExecutionException.class) public void testProducerFencedException() throws InterruptedException, ExecutionException { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(tp0); Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future; assertFalse(responseFuture.isDone()); prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, pid); prepareProduceResponse(Errors.INVALID_PRODUCER_EPOCH, pid, epoch); sender.run(time.milliseconds()); // Add partitions. sender.run(time.milliseconds()); // send produce. assertTrue(responseFuture.isDone()); assertTrue(transactionManager.hasError()); responseFuture.get(); }
@Test public void testAwaitFlushComplete() throws Exception { RecordAccumulator accum = createTestRecordAccumulator( 4 * 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, Long.MAX_VALUE); accum.append(new TopicPartition(topic, 0), 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); accum.beginFlush(); assertTrue(accum.flushInProgress()); delayedInterrupt(Thread.currentThread(), 1000L); try { accum.awaitFlushCompletion(); fail("awaitFlushCompletion should throw InterruptException"); } catch (InterruptedException e) { assertFalse("flushInProgress count should be decremented even if thread is interrupted", accum.flushInProgress()); } }
@Test public void testSenderShutdownWithPendingAddPartitions() throws Exception { long pid = 13131L; short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(tp0); FutureRecordMetadata sendFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future; prepareAddPartitionsToTxn(tp0, Errors.NONE); prepareProduceResponse(Errors.NONE, pid, epoch); sender.initiateClose(); sender.run(); assertTrue(sendFuture.isDone()); }
@Test public void resendFailedProduceRequestAfterAbortableError() throws Exception { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(tp0); Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future; prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, pid); prepareProduceResponse(Errors.NOT_LEADER_FOR_PARTITION, pid, epoch); sender.run(time.milliseconds()); // Add partitions sender.run(time.milliseconds()); // Produce assertFalse(responseFuture.isDone()); transactionManager.transitionToAbortableError(new KafkaException()); prepareProduceResponse(Errors.NONE, pid, epoch); sender.run(time.milliseconds()); assertTrue(responseFuture.isDone()); assertNotNull(responseFuture.get()); // should throw the exception which caused the transaction to be aborted. }
@Test(expected = UnsupportedVersionException.class) public void testIdempotenceWithOldMagic() throws InterruptedException { // Simulate talking to an older broker, ie. one which supports a lower magic. ApiVersions apiVersions = new ApiVersions(); int batchSize = 1025; int requestTimeoutMs = 1600; long deliveryTimeoutMs = 3200L; long lingerMs = 10L; long retryBackoffMs = 100L; long totalSize = 10 * batchSize; String metricGrpName = "producer-metrics"; apiVersions.update("foobar", NodeApiVersions.create(Arrays.asList(new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2)))); RecordAccumulator accum = new RecordAccumulator(logContext, batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, CompressionType.NONE, lingerMs, retryBackoffMs, deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, new TransactionManager(), new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); }
@Test public void testExpiryOfUnsentBatchesShouldNotCauseUnresolvedSequences() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); assertEquals(0, transactionManager.sequenceNumber(tp0).longValue()); // Send first ProduceRequest Future<RecordMetadata> request1 = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; Node node = metadata.fetch().nodes().get(0); time.sleep(10000L); client.disconnect(node.idString()); client.blackout(node, 10); sender.run(time.milliseconds()); assertFutureFailure(request1, TimeoutException.class); assertFalse(transactionManager.hasUnresolvedSequence(tp0)); }
@Test public void testSimple() throws Exception { long offset = 0; Future<RecordMetadata> future = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; sender.run(time.milliseconds()); // connect sender.run(time.milliseconds()); // send produce request assertEquals("We should have a single produce request in flight.", 1, client.inFlightRequestCount()); assertEquals(1, sender.inFlightBatches(tp0).size()); assertTrue(client.hasInFlightRequests()); client.respond(produceResponse(tp0, offset, Errors.NONE, 0)); sender.run(time.milliseconds()); assertEquals("All requests completed.", 0, client.inFlightRequestCount()); assertEquals(0, sender.inFlightBatches(tp0).size()); assertFalse(client.hasInFlightRequests()); sender.run(time.milliseconds()); assertTrue("Request should be completed", future.isDone()); assertEquals(offset, future.get().offset()); }
@Test public void testHandlingOfUnknownTopicPartitionErrorOnAddPartitions() throws InterruptedException { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(tp0); Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future; assertFalse(responseFuture.isDone()); prepareAddPartitionsToTxnResponse(Errors.UNKNOWN_TOPIC_OR_PARTITION, tp0, epoch, pid); sender.run(time.milliseconds()); // Send AddPartitionsRequest assertFalse(transactionManager.transactionContainsPartition(tp0)); // The partition should not yet be added. prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, pid); prepareProduceResponse(Errors.NONE, pid, epoch); sender.run(time.milliseconds()); // Send AddPartitionsRequest successfully. assertTrue(transactionManager.transactionContainsPartition(tp0)); sender.run(time.milliseconds()); // Send ProduceRequest. assertTrue(responseFuture.isDone()); }
@Test public void testUnsupportedForMessageFormatInProduceRequest() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { return body instanceof ProduceRequest && ((ProduceRequest) body).hasIdempotentRecords(); } }, produceResponse(tp0, -1, Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, 0)); sender.run(time.milliseconds()); assertFutureFailure(future, UnsupportedForMessageFormatException.class); // unsupported for message format is not a fatal error assertFalse(transactionManager.hasError()); }
@Test public void testUnsupportedVersionInProduceRequest() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; client.prepareUnsupportedVersionResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { return body instanceof ProduceRequest && ((ProduceRequest) body).hasIdempotentRecords(); } }); sender.run(time.milliseconds()); assertFutureFailure(future, UnsupportedVersionException.class); // unsupported version errors are fatal, so we should continue seeing it on future sends assertTrue(transactionManager.hasFatalError()); assertSendFailure(UnsupportedVersionException.class); }
@Test public void testClusterAuthorizationExceptionInProduceRequest() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = new TransactionManager(); setupWithTransactionState(transactionManager); prepareAndReceiveInitProducerId(producerId, Errors.NONE); assertTrue(transactionManager.hasProducerId()); // cluster authorization is a fatal error for the producer Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future; client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { return body instanceof ProduceRequest && ((ProduceRequest) body).hasIdempotentRecords(); } }, produceResponse(tp0, -1, Errors.CLUSTER_AUTHORIZATION_FAILED, 0)); sender.run(time.milliseconds()); assertFutureFailure(future, ClusterAuthorizationException.class); // cluster authorization errors are fatal, so we should continue seeing it on future sends assertTrue(transactionManager.hasFatalError()); assertSendFailure(ClusterAuthorizationException.class); }
@Test public void testFindCoordinatorAllowedInAbortableErrorState() throws InterruptedException { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(tp0); Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT).future; assertFalse(responseFuture.isDone()); sender.run(time.milliseconds()); // Send AddPartitionsRequest transactionManager.transitionToAbortableError(new KafkaException()); sendAddPartitionsToTxnResponse(Errors.NOT_COORDINATOR, tp0, epoch, pid); sender.run(time.milliseconds()); // AddPartitions returns assertTrue(transactionManager.hasAbortableError()); assertNull(transactionManager.coordinator(CoordinatorType.TRANSACTION)); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); sender.run(time.milliseconds()); // FindCoordinator handled assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); assertTrue(transactionManager.hasAbortableError()); }