private LinkedHashMap<TopicPartition, String> createMap() { LinkedHashMap<TopicPartition, String> map = new LinkedHashMap<>(); map.put(new TopicPartition("foo", 2), "foo 2"); map.put(new TopicPartition("blah", 2), "blah 2"); map.put(new TopicPartition("blah", 1), "blah 1"); map.put(new TopicPartition("baz", 2), "baz 2"); map.put(new TopicPartition("foo", 0), "foo 0"); map.put(new TopicPartition("baz", 3), "baz 3"); return map; }
public void run() { for (int i = 0; i < msgs; i++) { try { accum.append(new TopicPartition(topic, i % numParts), 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); } catch (Exception e) { e.printStackTrace(); } } } });
private WriteTxnMarkersRequest createWriteTxnMarkersRequest() { return new WriteTxnMarkersRequest.Builder( Collections.singletonList(new WriteTxnMarkersRequest.TxnMarkerEntry(21L, (short) 42, 73, TransactionResult.ABORT, Collections.singletonList(new TopicPartition("topic", 73))))).build(); }
public LeaderAndIsrResponse(Struct struct) { responses = new HashMap<>(); for (Object responseDataObj : struct.get(PARTITIONS)) { Struct responseData = (Struct) responseDataObj; String topic = responseData.get(TOPIC_NAME); int partition = responseData.get(PARTITION_ID); Errors error = Errors.forCode(responseData.get(ERROR_CODE)); responses.put(new TopicPartition(topic, partition), error); } error = Errors.forCode(struct.get(ERROR_CODE)); }
public StopReplicaResponse(Struct struct) { responses = new HashMap<>(); for (Object responseDataObj : struct.get(PARTITIONS)) { Struct responseData = (Struct) responseDataObj; String topic = responseData.get(TOPIC_NAME); int partition = responseData.get(PARTITION_ID); Errors error = Errors.forCode(responseData.get(ERROR_CODE)); responses.put(new TopicPartition(topic, partition), error); } error = Errors.forCode(struct.get(ERROR_CODE)); }
@Test public void testErrorCountsWithTopLevelError() { Map<TopicPartition, Errors> errors = new HashMap<>(); errors.put(new TopicPartition("foo", 0), Errors.NONE); errors.put(new TopicPartition("foo", 1), Errors.NOT_LEADER_FOR_PARTITION); LeaderAndIsrResponse response = new LeaderAndIsrResponse(Errors.UNKNOWN_SERVER_ERROR, errors); assertEquals(Collections.singletonMap(Errors.UNKNOWN_SERVER_ERROR, 2), response.errorCounts()); }
@Test public void testErrorCountsNoTopLevelError() { Map<TopicPartition, Errors> errors = new HashMap<>(); errors.put(new TopicPartition("foo", 0), Errors.NONE); errors.put(new TopicPartition("foo", 1), Errors.CLUSTER_AUTHORIZATION_FAILED); LeaderAndIsrResponse response = new LeaderAndIsrResponse(Errors.NONE, errors); Map<Errors, Integer> errorCounts = response.errorCounts(); assertEquals(2, errorCounts.size()); assertEquals(1, errorCounts.get(Errors.NONE).intValue()); assertEquals(1, errorCounts.get(Errors.CLUSTER_AUTHORIZATION_FAILED).intValue()); }
@Test(expected = IllegalArgumentException.class) public void testSeekNegative() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer((String) null)) { consumer.assign(singleton(new TopicPartition("nonExistTopic", 0))); consumer.seek(new TopicPartition("nonExistTopic", 0), -1); } }
private FetchRequest createFetchRequest(int version, IsolationLevel isolationLevel) { LinkedHashMap<TopicPartition, FetchRequest.PartitionData> fetchData = new LinkedHashMap<>(); fetchData.put(new TopicPartition("test1", 0), new FetchRequest.PartitionData(100, 0L, 1000000, Optional.of(15))); fetchData.put(new TopicPartition("test2", 0), new FetchRequest.PartitionData(200, 0L, 1000000, Optional.of(25))); return FetchRequest.Builder.forConsumer(100, 100000, fetchData). isolationLevel(isolationLevel).setMaxBytes(1000).build((short) version); }
@Test(expected = IllegalArgumentException.class) public void testAssignOnNullTopicInPartition() { try (KafkaConsumer<byte[], byte[]> consumer = newConsumer((String) null)) { consumer.assign(singleton(new TopicPartition(null, 0))); } }
private ProduceRequest createNonIdempotentNonTransactionalRecords() { final MemoryRecords memoryRecords = MemoryRecords.withRecords(CompressionType.NONE, simpleRecord); return ProduceRequest.Builder.forCurrentMagic((short) -1, 10, Collections.singletonMap(new TopicPartition("topic", 1), memoryRecords)).build(); } }
/** * A {@link ProducerBatch} configured using a timestamp preceding its create time is interpreted correctly * * as not expired by {@link ProducerBatch#hasReachedDeliveryTimeout(long, long)}. */ @Test public void testBatchExpirationAfterReenqueue() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); // Set batch.retry = true batch.reenqueued(now); // Set `now` to 2ms before the create time. assertFalse(batch.hasReachedDeliveryTimeout(10240, now - 2L)); }
@Test public void testV3AndAboveCannotUseMagicV1() { ByteBuffer buffer = ByteBuffer.allocate(256); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V1, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); builder.append(10L, null, "a".getBytes()); Map<TopicPartition, MemoryRecords> produceData = new HashMap<>(); produceData.put(new TopicPartition("test", 0), builder.build()); ProduceRequest.Builder requestBuilder = ProduceRequest.Builder.forCurrentMagic((short) 1, 5000, produceData); assertThrowsInvalidRecordExceptionForAllVersions(requestBuilder); }
@Test public void testV3AndAboveCannotUseMagicV0() { ByteBuffer buffer = ByteBuffer.allocate(256); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V0, CompressionType.NONE, TimestampType.NO_TIMESTAMP_TYPE, 0L); builder.append(10L, null, "a".getBytes()); Map<TopicPartition, MemoryRecords> produceData = new HashMap<>(); produceData.put(new TopicPartition("test", 0), builder.build()); ProduceRequest.Builder requestBuilder = ProduceRequest.Builder.forCurrentMagic((short) 1, 5000, produceData); assertThrowsInvalidRecordExceptionForAllVersions(requestBuilder); }
@Test public void testGetOffsetsForTimesTimeout() { try { fetcher.offsetsByTimes(Collections.singletonMap(new TopicPartition(topicName, 2), 1000L), time.timer(100L)); fail("Should throw timeout exception."); } catch (TimeoutException e) { // let it go. } }
@Test public void serializeDeserializeAssignment() { List<TopicPartition> partitions = Arrays.asList(new TopicPartition("foo", 0), new TopicPartition("bar", 2)); ByteBuffer buffer = ConsumerProtocol.serializeAssignment(new PartitionAssignor.Assignment(partitions)); PartitionAssignor.Assignment parsedAssignment = ConsumerProtocol.deserializeAssignment(buffer); assertEquals(toSet(partitions), toSet(parsedAssignment.partitions())); }
@Test public void testConsumerRecordsIsEmptyWhenReturningNoRecords() { TopicPartition partition = new TopicPartition("test", 0); consumer.assign(Collections.singleton(partition)); consumer.addRecord(new ConsumerRecord<String, String>("test", 0, 0, null, null)); consumer.updateEndOffsets(Collections.singletonMap(partition, 1L)); consumer.seekToEnd(Collections.singleton(partition)); ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1)); assertThat(records.count(), is(0)); assertThat(records.isEmpty(), is(true)); }
@Test public void deserializeNullAssignmentUserData() { List<TopicPartition> partitions = Arrays.asList(new TopicPartition("foo", 0), new TopicPartition("bar", 2)); ByteBuffer buffer = ConsumerProtocol.serializeAssignment(new PartitionAssignor.Assignment(partitions, null)); PartitionAssignor.Assignment parsedAssignment = ConsumerProtocol.deserializeAssignment(buffer); assertEquals(toSet(partitions), toSet(parsedAssignment.partitions())); assertNull(parsedAssignment.userData()); }
@Test(expected = KafkaException.class) public void testMaybeAddPartitionToTransactionAfterFatalError() { long pid = 13131L; short epoch = 1; doInitTransactions(pid, epoch); transactionManager.transitionToFatalError(new KafkaException()); transactionManager.maybeAddPartitionToTransaction(new TopicPartition("foo", 0)); }
@Test public void testShouldNotAttemptAppendOnceRecordsBuilderIsClosedForAppends() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); FutureRecordMetadata result0 = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, null, now); assertNotNull(result0); assertTrue(memoryRecordsBuilder.hasRoomFor(now, null, new byte[10], Record.EMPTY_HEADERS)); memoryRecordsBuilder.closeForRecordAppends(); assertFalse(memoryRecordsBuilder.hasRoomFor(now, null, new byte[10], Record.EMPTY_HEADERS)); assertEquals(null, batch.tryAppend(now + 1, null, new byte[10], Record.EMPTY_HEADERS, null, now + 1)); }