public void setUp() throws Exception { // setup ZK zookeeper = new TestingServer(true); // setup Broker kafkaDir = new TmpPath(Files.createTempDirectory("kafka-").toAbsolutePath().toString()); Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zookeeper.getConnectString()); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", kafkaDir.getPath()); brokerProps.setProperty("listeners", String.format("PLAINTEXT://%s:%d", KAFKA_HOST, KAFKA_PORT)); brokerProps.setProperty("offsets.topic.replication.factor", "1"); KafkaConfig config = new KafkaConfig(brokerProps); MockTime mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); // setup default Producer createProducer(); kafkaAdminClient = AdminClient.create(Collections.singletonMap(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_HOST + ":" + KAFKA_PORT)); }
void start() throws RuntimeException { if (_numStarted.incrementAndGet() == 1) { log.warn("Starting up Kafka server suite. Zk at " + _zkConnectString + "; Kafka server at " + _kafkaServerPort); _zkServer = new EmbeddedZookeeper(_zkConnectString); _zkClient = new ZkClient(_zkConnectString, 30000, 30000, ZKStringSerializer$.MODULE$); Properties props = kafka.utils.TestUtils.createBrokerConfig(_brokerId, _kafkaServerPort, true); props.setProperty("zookeeper.connect", _zkConnectString); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); _kafkaServer = kafka.utils.TestUtils.createServer(config, mock); } else { log.info("Kafka server suite already started... continuing"); } }
@Test public void testAddStepsFutures() { UUID testUserTaskId = UUID.randomUUID(); UserTaskManager.UUIDGenerator mockUUIDGenerator = EasyMock.mock(UserTaskManager.UUIDGenerator.class); EasyMock.expect(mockUUIDGenerator.randomUUID()).andReturn(testUserTaskId).anyTimes(); HttpSession mockHttpSession = EasyMock.mock(HttpSession.class); EasyMock.expect(mockHttpSession.getLastAccessedTime()).andReturn(100L).anyTimes(); HttpServletRequest mockHttpServletRequest = prepareRequest(mockHttpSession, null); HttpServletResponse mockHttpServletResponse = EasyMock.mock(HttpServletResponse.class); mockHttpServletResponse.setHeader(EasyMock.anyString(), EasyMock.anyString()); EasyMock.replay(mockUUIDGenerator, mockHttpSession, mockHttpServletResponse); UserTaskManager userTaskManager = new UserTaskManager(1000, 1, TimeUnit.HOURS.toMillis(6), 100, new MockTime(), mockUUIDGenerator); OperationFuture testFuture1 = new OperationFuture("testFuture1"); OperationFuture testFuture2 = new OperationFuture("testFuture2"); OperationFuture insertedFuture1 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest, mockHttpServletResponse, uuid -> testFuture1, 0).get(0); Assert.assertEquals(testFuture1, insertedFuture1); EasyMock.reset(mockHttpServletResponse); OperationFuture insertedFuture2 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest, mockHttpServletResponse, uuid -> testFuture2, 1).get(1); Assert.assertEquals(testFuture2, insertedFuture2); Assert.assertEquals(userTaskManager.getFuturesByUserTaskId(testUserTaskId, mockHttpServletRequest).size(), 2); userTaskManager.close(); }
@Test public void testCompletedTasks() throws Exception { HttpSession mockHttpSession = EasyMock.mock(HttpSession.class); EasyMock.expect(mockHttpSession.getLastAccessedTime()).andReturn(100L).anyTimes(); mockHttpSession.invalidate(); HttpServletRequest mockHttpServletRequest = prepareRequest(mockHttpSession, null); UserTaskManager.UUIDGenerator mockUUIDGenerator = EasyMock.mock(UserTaskManager.UUIDGenerator.class); EasyMock.expect(mockUUIDGenerator.randomUUID()).andReturn(UUID.randomUUID()).anyTimes(); OperationFuture future = new OperationFuture("future"); UserTaskManager userTaskManager = new UserTaskManager(1000, 1, TimeUnit.HOURS.toMillis(6), 100, new MockTime(), mockUUIDGenerator); HttpServletResponse mockHttpServletResponse = EasyMock.mock(HttpServletResponse.class); Capture<String> userTaskHeader = Capture.newInstance(); Capture<String> userTaskHeaderValue = Capture.newInstance(); mockHttpServletResponse.setHeader(EasyMock.capture(userTaskHeader), EasyMock.capture(userTaskHeaderValue)); EasyMock.replay(mockUUIDGenerator, mockHttpSession, mockHttpServletResponse); // test-case: verify if the background cleaner task removes tasks that are completed OperationFuture future1 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest, mockHttpServletResponse, uuid -> future, 0).get(0); Assert.assertEquals(future, future1); future1.cancel(true); Thread.sleep(TimeUnit.SECONDS.toMillis(UserTaskManager.USER_TASK_SCANNER_PERIOD_SECONDS * 4)); Assert.assertTrue(future.isDone()); Assert.assertTrue(future.isCancelled()); userTaskManager.close(); }
@Test public void testMaximumActiveTasks() { HttpSession mockHttpSession1 = EasyMock.mock(HttpSession.class); EasyMock.expect(mockHttpSession1.getLastAccessedTime()).andReturn(100L).anyTimes(); HttpServletRequest mockHttpServletRequest1 = prepareRequest(mockHttpSession1, null); OperationFuture future = new OperationFuture("future"); UserTaskManager userTaskManager = new UserTaskManager(1000, 1, TimeUnit.HOURS.toMillis(6), 100, new MockTime()); HttpServletResponse mockHttpServletResponse = EasyMock.mock(HttpServletResponse.class); mockHttpServletResponse.setHeader(EasyMock.anyString(), EasyMock.anyString()); EasyMock.replay(mockHttpSession1, mockHttpServletResponse); // test-case: test max limitation active tasks OperationFuture future1 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest1, mockHttpServletResponse, uuid -> future, 0).get(0); Assert.assertEquals(future, future1); HttpSession mockHttpSession2 = EasyMock.mock(HttpSession.class); EasyMock.expect(mockHttpSession2.getLastAccessedTime()).andReturn(100L).anyTimes(); EasyMock.replay(mockHttpSession2); EasyMock.reset(mockHttpServletResponse); HttpServletRequest mockHttpServletRequest2 = prepareRequest(mockHttpSession2, null, "/test2", Collections.emptyMap()); try { OperationFuture future2 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest2, mockHttpServletResponse, uuid -> future, 0).get(0); Assert.assertEquals(future, future2); } catch (RuntimeException e) { userTaskManager.close(); return; } Assert.fail("Don't expect to be here!"); }
@Test public void testExpireSession() throws Exception { UUID testUserTaskId = UUID.randomUUID(); UserTaskManager.UUIDGenerator mockUUIDGenerator = EasyMock.mock(UserTaskManager.UUIDGenerator.class); EasyMock.expect(mockUUIDGenerator.randomUUID()).andReturn(testUserTaskId).anyTimes(); Time mockTime = new MockTime(); HttpSession mockHttpSession = EasyMock.mock(HttpSession.class); EasyMock.expect(mockHttpSession.getLastAccessedTime()).andReturn(mockTime.milliseconds()).anyTimes(); mockHttpSession.invalidate(); HttpServletRequest mockHttpServletRequest = prepareRequest(mockHttpSession, null); OperationFuture future = new OperationFuture("future"); UserTaskManager userTaskManager = new UserTaskManager(1000, 1, TimeUnit.HOURS.toMillis(6), 100, mockTime, mockUUIDGenerator); HttpServletResponse mockHttpServletResponse = EasyMock.mock(HttpServletResponse.class); mockHttpServletResponse.setHeader(EasyMock.anyString(), EasyMock.anyString()); EasyMock.replay(mockUUIDGenerator, mockHttpSession, mockHttpServletResponse); // test-case: test if the sessions are removed on expiration OperationFuture future1 = userTaskManager.getOrCreateUserTask(mockHttpServletRequest, mockHttpServletResponse, uuid -> future, 0).get(0); Assert.assertEquals(future, future1); mockTime.sleep(1001); Thread.sleep(TimeUnit.SECONDS.toMillis(UserTaskManager.USER_TASK_SCANNER_PERIOD_SECONDS + 1)); OperationFuture future2 = userTaskManager.getFuture(mockHttpServletRequest); Assert.assertNull(future2); userTaskManager.close(); }
100, new MockTime(), mockUUIDGenerator); userTaskManager.getOrCreateUserTask(mockHttpServletRequest1, mockHttpServletResponse1, uuid -> future, 0); userTaskManager.getOrCreateUserTask(mockHttpServletRequest2, mockHttpServletResponse2, uuid -> future, 0);
public static void startServer() throws RuntimeException { if (serverStarted && serverClosed) { throw new RuntimeException("Kafka test server has already been closed. Cannot generate Kafka server twice."); } if (!serverStarted) { serverStarted = true; zkConnect = TestZKUtils.zookeeperConnect(); zkServer = new EmbeddedZookeeper(zkConnect); zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$); kafkaPort = TestUtils.choosePort(); Properties props = TestUtils.createBrokerConfig(brokerId, kafkaPort, true); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); } }
100, new MockTime(), mockUUIDGenerator);
Time mock = new MockTime(); _kafkaServer = kafka.utils.TestUtils.createServer(config, mock);
private KafkaServer createKafkaServer(int brokerId,String _zkConnectString){ int _brokerId = brokerId; int _kafkaServerPort = TestUtils.findFreePort(); Properties props = kafka.utils.TestUtils.createBrokerConfig( _brokerId, _zkConnectString, kafka.utils.TestUtils.createBrokerConfig$default$3(), kafka.utils.TestUtils.createBrokerConfig$default$4(), _kafkaServerPort, kafka.utils.TestUtils.createBrokerConfig$default$6(), kafka.utils.TestUtils.createBrokerConfig$default$7(), kafka.utils.TestUtils.createBrokerConfig$default$8(), kafka.utils.TestUtils.createBrokerConfig$default$9(), kafka.utils.TestUtils.createBrokerConfig$default$10(), kafka.utils.TestUtils.createBrokerConfig$default$11(), kafka.utils.TestUtils.createBrokerConfig$default$12(), kafka.utils.TestUtils.createBrokerConfig$default$13(), kafka.utils.TestUtils.createBrokerConfig$default$14() ); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); KafkaServer _kafkaServer = kafka.utils.TestUtils.createServer(config, mock); kafkaBrokerPortList.add(_kafkaServerPort); return _kafkaServer; }
@Before public void setUp() throws IOException, SQLException { // setup Zookeeper zkServer = new EmbeddedZookeeper(); String zkConnect = ZKHOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); ZkUtils zkUtils = ZkUtils.apply(zkClient, false); // setup Broker Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT); KafkaConfig config = new KafkaConfig(brokerProps); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); kafkaServer.startup(); // create topic AdminUtils.createTopic(zkUtils, TOPIC, 1, 1, new Properties()); pConsumer = new PhoenixConsumer(); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); conn = DriverManager.getConnection(getUrl(), props); }
private KafkaServer startKafkaServer() { File tmpDir = Files.createTempDir(); Properties props = createProperties(tmpDir.getAbsolutePath(), 9092, 1); KafkaConfig kafkaConfig = new KafkaConfig(props); kafkaServer = new KafkaServer(kafkaConfig, new MockTime()); kafkaServer.startup(); return kafkaServer; }
private void createKafkaCluster() throws IOException { System.setProperty("zookeeper.preAllocSize", Integer.toString(128)); zkServer = new EmbeddedZookeeper(); String zkConnect = ZK_HOST + ':' + zkServer.port(); ZkClient zkClient = new ZkClient(zkConnect, SESSION_TIMEOUT, CONNECTION_TIMEOUT, ZKStringSerializer$.MODULE$); zkUtils = ZkUtils.apply(zkClient, false); brokerPort = getRandomPort(); Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKER_HOST + ':' + brokerPort); brokerProps.setProperty("offsets.topic.replication.factor", "1"); brokerProps.setProperty("offsets.topic.num.partitions", "1"); KafkaConfig config = new KafkaConfig(brokerProps); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); }
/** * Starts the Embedded Kafka and Zookeeper Servers. * @throws Exception - If an exeption occurs during startup. */ protected void startup() throws Exception { // Setup the embedded zookeeper logger.info("Starting up Embedded Zookeeper..."); zkServer = new EmbeddedZookeeper(); zookeperConnect = ZKHOST + ":" + zkServer.port(); logger.info("Embedded Zookeeper started at: {}", zookeperConnect); // setup Broker logger.info("Starting up Embedded Kafka..."); brokerPort = Integer.toString(PortUtils.getRandomFreePort()); final Properties brokerProps = new Properties(); brokerProps.setProperty(KafkaConfig$.MODULE$.BrokerIdProp(), "0"); brokerProps.setProperty(KafkaConfig$.MODULE$.HostNameProp(), BROKERHOST); brokerProps.setProperty(KafkaConfig$.MODULE$.PortProp(), brokerPort); brokerProps.setProperty(KafkaConfig$.MODULE$.ZkConnectProp(), zookeperConnect); brokerProps.setProperty(KafkaConfig$.MODULE$.LogDirsProp(), Files.createTempDirectory(getClass().getSimpleName() + "-").toAbsolutePath().toString()); final KafkaConfig config = new KafkaConfig(brokerProps); final Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); logger.info("Embedded Kafka Server started at: {}:{}", BROKERHOST, brokerPort); }
/** * Starts the Embedded Kafka and Zookeeper Servers. * @throws Exception - If an exeption occurs during startup. */ protected void startup() throws Exception { // Setup the embedded zookeeper logger.info("Starting up Embedded Zookeeper..."); zkServer = new EmbeddedZookeeper(); zookeperConnect = ZKHOST + ":" + zkServer.port(); logger.info("Embedded Zookeeper started at: {}", zookeperConnect); // setup Broker logger.info("Starting up Embedded Kafka..."); brokerPort = Integer.toString(PortUtils.getRandomFreePort()); final Properties brokerProps = new Properties(); brokerProps.setProperty(KafkaConfig$.MODULE$.BrokerIdProp(), "0"); brokerProps.setProperty(KafkaConfig$.MODULE$.HostNameProp(), BROKERHOST); brokerProps.setProperty(KafkaConfig$.MODULE$.PortProp(), brokerPort); brokerProps.setProperty(KafkaConfig$.MODULE$.ZkConnectProp(), zookeperConnect); brokerProps.setProperty(KafkaConfig$.MODULE$.LogDirsProp(), Files.createTempDirectory(getClass().getSimpleName() + "-").toAbsolutePath().toString()); brokerProps.setProperty(KafkaConfig$.MODULE$.DeleteTopicEnableProp(), "true"); final KafkaConfig config = new KafkaConfig(brokerProps); final Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); logger.info("Embedded Kafka Server started at: {}:{}", BROKERHOST, brokerPort); }
/** * setup mini kafka and call the super to setup mini fluo */ @Before public void setupKafka() throws Exception { // Install an instance of Rya on the Accumulo cluster. installRyaInstance(); // Setup Kafka. zkServer = new EmbeddedZookeeper(); final String zkConnect = ZKHOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); zkUtils = ZkUtils.apply(zkClient, false); // setup Broker final Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT); final KafkaConfig config = new KafkaConfig(brokerProps); final Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); }
/** * setup mini kafka and call the super to setup mini fluo */ @Before public void setupKafka() throws Exception { // Install an instance of Rya on the Accumulo cluster. installRyaInstance(); // Setup Kafka. zkServer = new EmbeddedZookeeper(); final String zkConnect = ZKHOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); zkUtils = ZkUtils.apply(zkClient, false); // setup Broker final Properties brokerProps = new Properties(); brokerProps.setProperty("zookeeper.connect", zkConnect); brokerProps.setProperty("broker.id", "0"); brokerProps.setProperty("log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString()); brokerProps.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT); final KafkaConfig config = new KafkaConfig(brokerProps); final Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); }
private void createKafkaCluster() throws IOException { zkServer = new EmbeddedZookeeper(); String zkConnect = "localhost:" + zkServer.port(); ZkClient zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); zkUtils = ZkUtils.apply(zkClient, false); KafkaConfig config = new KafkaConfig(props( "zookeeper.connect", zkConnect, "broker.id", "0", "log.dirs", Files.createTempDirectory("kafka-").toAbsolutePath().toString(), "offsets.topic.replication.factor", "1", "listeners", "PLAINTEXT://localhost:9092")); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); }
public void start() throws IOException { // Find port try { ServerSocket serverSocket = new ServerSocket(0); BROKERPORT = Integer.toString(serverSocket.getLocalPort()); serverSocket.close(); } catch (IOException e) { throw Throwables.propagate(e); } // Setup Zookeeper zkServer = new EmbeddedZookeeper(); String zkConnect = BROKERHOST + ":" + zkServer.port(); zkClient = new ZkClient(zkConnect, 30000, 30000, ZKStringSerializer$.MODULE$); zkUtils = ZkUtils.apply(zkClient, false); // Setup brokers cleanupDir(); Properties props = new Properties(); props.setProperty("zookeeper.connect", zkConnect); props.setProperty("broker.id", "0"); props.setProperty("log.dirs", KAFKA_PATH); props.setProperty("listeners", "PLAINTEXT://" + BROKERHOST + ":" + BROKERPORT); KafkaConfig config = new KafkaConfig(props); Time mock = new MockTime(); kafkaServer = TestUtils.createServer(config, mock); }