public static void deletePathRecursive(final ZkClient client, final String path) throws Exception { try { client.deleteRecursive(path); } catch (final ZkNoNodeException e) { logger.info(path + " deleted during connection loss; this is ok"); } catch (final Exception e) { throw e; } }
final String zkPath = "/" + HELIX_CLUSTER_NAME; if (_zkClient.exists(zkPath)) { _zkClient.deleteRecursive(zkPath);
@AfterTest public void shutdown() { LOGGER.info("Trying to shutdown"); for (FakeInstance fakeInstance : FAKE_INSTANCES) { try { LOGGER.info("Trying to shutdown: " + fakeInstance); fakeInstance.stop(); } catch (Exception e) { } } LOGGER.info("Trying to stop controller"); CONTROLLER_STARTER.stop(); LOGGER.info("Trying to stop zk"); kafkaBrokerTopicObserver.stop(); KafkaStarterUtils.stopServer(kafkaStarter); ZK_CLIENT.deleteRecursive("/" + HELIX_CLUSTER_NAME); ZK_CLIENT.close(); ZkStarter.stopLocalZkServer(); }
@Override public boolean deleteRecursive(final String path) { return zkClient.deleteRecursive(path); } }
public void deleteTopics(){ zkClient.deleteRecursive("/brokers/topics"); }
public void clear() { _zkClient.deleteRecursive(_rootPath); }
public void deleteTopics(){ zkClient.deleteRecursive("/brokers/topics"); }
ZK_CLIENT.deleteRecursive("/" + HELIX_CLUSTER_NAME); REQUEST_URL = "http://localhost:" + CONTROLLER_PORT; CONTROLLER_STARTER = startController(HELIX_CLUSTER_NAME, CONTROLLER_PORT);
@Override public void createDefaultNameSpace(org.I0Itec.zkclient.ZkClient client) { client.deleteRecursive("/" + clusterName); } };
void deleteOldVersionPath(String path, List<String> zNodeIds, int numVersionsToLeave, Comparator<String> c) { if (StringUtils.isEmpty(path) || zNodeIds == null) { LOG.warn("cannot cleanup empty path or empty list in ZK"); return; } if (zNodeIds.size() > numVersionsToLeave) { Collections.sort(zNodeIds, c); // get the znodes to delete int size = zNodeIds.size(); List<String> zNodesToDelete = zNodeIds.subList(0, zNodeIds.size() - numVersionsToLeave); LOG.info("Starting cleanup of barrier version zkNodes. From size=" + size + " to size " + zNodesToDelete.size() + "; numberToLeave=" + numVersionsToLeave); for (String znodeId : zNodesToDelete) { String pathToDelete = path + "/" + znodeId; try { LOG.info("deleting " + pathToDelete); zkClient.deleteRecursive(pathToDelete); metrics.deletions.inc(); } catch (Exception e) { LOG.warn("delete of node " + pathToDelete + " failed.", e); } } } } /**
@Override public void createDefaultNameSpace(org.I0Itec.zkclient.ZkClient zkClient) { if (rootNamespaces == null) { return; } for (String rootNamespace : rootNamespaces) { try { zkClient.deleteRecursive(rootNamespace); } catch (Exception e) { LOG.error("fail to deleteRecursive path:" + rootNamespace, e); } } } };
public boolean deleteRecursive(String path) { List<String> children; try { children = getChildren(path, false); } catch (ZkNoNodeException e) { return true; } for (String subPath : children) { if (!deleteRecursive(path + "/" + subPath)) { return false; } } return delete(path); }
public boolean deleteRecursive(String path) { List<String> children; try { children = getChildren(path, false); } catch (ZkNoNodeException e) { return true; } for (String subPath : children) { if (!deleteRecursive(path + "/" + subPath)) { return false; } } return delete(path); }
@Before @After public void cleanup() { zkClient.deleteRecursive(TEST_PATH); }
@AfterTest public void shutdown() { LOGGER.info("Trying to shutdown"); LOGGER.info("Trying to stop manager"); MANAGER_STARTER.stop(); KafkaStarterUtils.stopServer(kafkaStarter); ZK_CLIENT.deleteRecursive("/" + HELIX_CLUSTER_NAME); ZK_CLIENT.close(); ZkStarter.stopLocalZkServer(); } }
@BeforeTest public void setup() throws ParseException { LOGGER.info("Trying to setup"); ZkStarter.startLocalZkServer(); kafkaStarter = KafkaStarterUtils.startServer(KafkaStarterUtils.DEFAULT_KAFKA_PORT, KafkaStarterUtils.DEFAULT_BROKER_ID, ZkStarter.DEFAULT_ZK_STR + "/cluster1", KafkaStarterUtils.getDefaultKafkaConfiguration()); try { Thread.sleep(2000); } catch (Exception e) { } ZK_CLIENT = new ZkClient(ZkStarter.DEFAULT_ZK_STR); ZK_CLIENT.deleteRecursive("/" + HELIX_CLUSTER_NAME); REQUEST_URL = "http://localhost:" + MANAGER_PORT; MANAGER_STARTER = startManager(DEPLOYMENT_NAME, MANAGER_PORT); try { Thread.sleep(2000); } catch (InterruptedException e) { e.printStackTrace(); } }
@After public void testTeardown() { testZkUtils.getZkClient().deleteRecursive(KEY_BUILDER.getRootPath()); testZkUtils.close(); }
@AfterTest public void shutdown() { LOGGER.info("Trying to shutdown"); for (FakeInstance fakeInstance : FAKE_INSTANCES) { try { LOGGER.info("Trying to shutdown: " + fakeInstance); fakeInstance.stop(); } catch (Exception e) { } } LOGGER.info("Trying to stop controller"); CONTROLLER_STARTER.stop(); LOGGER.info("Trying to stop zk"); kafkaBrokerTopicObserver.stop(); KafkaStarterUtils.stopServer(kafkaStarter); ZK_CLIENT.deleteRecursive("/" + HELIX_CLUSTER_NAME); ZK_CLIENT.close(); ZkStarter.stopLocalZkServer(); }
@After public void testTeardown() { testZkUtils.getZkClient().deleteRecursive(KEY_BUILDER.getRootPath()); testZkUtils.close(); }
@Test public void testgetNextJobModelVersion() { // Set up the Zk base paths for testing. ZkKeyBuilder keyBuilder = new ZkKeyBuilder("test"); String root = keyBuilder.getRootPath(); zkClient.deleteRecursive(root); zkUtils.validatePaths(new String[]{root, keyBuilder.getJobModelPathPrefix(), keyBuilder.getJobModelVersionPath()}); String version = "1"; String oldVersion = "0"; // Set zkNode JobModelVersion to 1. zkUtils.publishJobModelVersion(oldVersion, version); Assert.assertEquals(version, zkUtils.getJobModelVersion()); // Publish JobModel with a higher version (2). zkUtils.publishJobModel("2", new JobModel(new MapConfig(), new HashMap<>())); // Get on the JobModel version should return 2, taking into account the published version 2. Assert.assertEquals("3", zkUtils.getNextJobModelVersion(zkUtils.getJobModelVersion())); }