/** * Calls {@link StreamOperator#setup(StreamTask, StreamConfig, Output)} ()}. */ public void setup() { setup(null); }
@Test public void testInitializeAfterOpenning() throws Throwable { expectedException.expect(IllegalStateException.class); expectedException.expectMessage(containsString("TestHarness has already been initialized.")); AbstractStreamOperatorTestHarness<Integer> result; result = new AbstractStreamOperatorTestHarness<>( new AbstractStreamOperator<Integer>() { }, 1, 1, 0); result.setup(); result.open(); result.initializeState(new OperatorSubtaskState()); } }
AbstractStreamOperatorTestHarness<Integer> testHarnessCopy = new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0); testHarnessCopy.setup(); testHarnessCopy.initializeState(handles); testHarnessCopy.open();
testHarness.setup();
/** * Test restoring from savepoints before version Flink 1.3 should fail if discovery is enabled. */ @Test public void testRestoreFailsWithNonEmptyPreFlink13StatesIfDiscoveryEnabled() throws Exception { assumeTrue(testMigrateVersion == MigrationVersion.v1_3 || testMigrateVersion == MigrationVersion.v1_2); final List<KafkaTopicPartition> partitions = new ArrayList<>(PARTITION_STATE.keySet()); final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>(partitions, 1000L); // discovery enabled StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file; should fail since discovery is enabled try { testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-snapshot")); fail("Restore from savepoints from version before Flink 1.3.x should have failed if discovery is enabled."); } catch (Exception e) { Assert.assertTrue(e instanceof IllegalArgumentException); } }
testHarness.setup(); testHarness.open();
testHarness3.setup(); testHarness3.initializeState(snapshot); testHarness3.open();
/** * Test restoring from an legacy empty state, when no partitions could be found for topics. */ @Test public void testRestoreFromEmptyStateNoPartitions() throws Exception { final DummyFlinkKafkaConsumer<String> consumerFunction = new DummyFlinkKafkaConsumer<>( Collections.<KafkaTopicPartition>emptyList(), FlinkKafkaConsumerBase.PARTITION_DISCOVERY_DISABLED); StreamSource<String, DummyFlinkKafkaConsumer<String>> consumerOperator = new StreamSource<>(consumerFunction); final AbstractStreamOperatorTestHarness<String> testHarness = new AbstractStreamOperatorTestHarness<>(consumerOperator, 1, 1, 0); testHarness.setTimeCharacteristic(TimeCharacteristic.ProcessingTime); testHarness.setup(); // restore state from binary snapshot file testHarness.initializeState( OperatorSnapshotUtil.getResourceFilename( "kafka-consumer-migration-test-flink" + testMigrateVersion + "-empty-state-snapshot")); testHarness.open(); // assert that no partitions were found and is empty assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets() != null); assertTrue(consumerFunction.getSubscribedPartitionsToStartOffsets().isEmpty()); // assert that no state was restored assertTrue(consumerFunction.getRestoredState().isEmpty()); consumerOperator.close(); consumerOperator.cancel(); }
testHarness.setup();
"opened this harness before initializing it?"); if (!setupCalled) { setup();