/** * Test turning on/off archiving */ @Test public void testArchivingEnableDisable() throws Exception { // 1. turn on hfile backups LOG.debug("----Starting archiving"); archivingClient.enableHFileBackupAsync(TABLE_NAME); assertTrue("Archving didn't get turned on", archivingClient .getArchivingEnabled(TABLE_NAME)); // 2. Turn off archiving and make sure its off archivingClient.disableHFileBackup(); assertFalse("Archving didn't get turned off.", archivingClient.getArchivingEnabled(TABLE_NAME)); // 3. Check enable/disable on a single table archivingClient.enableHFileBackupAsync(TABLE_NAME); assertTrue("Archving didn't get turned on", archivingClient .getArchivingEnabled(TABLE_NAME)); // 4. Turn off archiving and make sure its off archivingClient.disableHFileBackup(TABLE_NAME); assertFalse("Archving didn't get turned off for " + STRING_TABLE_NAME, archivingClient.getArchivingEnabled(TABLE_NAME)); }
public HFileArchiveManager(Connection connection, Configuration conf) throws ZooKeeperConnectionException, IOException { this.zooKeeper = new ZKWatcher(conf, "hfileArchiveManager-on-" + connection.toString(), connection); this.archiveZnode = ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(), this.zooKeeper); }
/** * Disable hfile backups for all tables. * <p> * Previously backed up files are still retained (if present). * <p> * Asynchronous operation - some extra HFiles may be retained, in the archive directory after * disable is called, dependent on the latency in zookeeper to the servers. * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void disableHFileBackup() throws IOException, KeeperException { createHFileArchiveManager().disableHFileBackup().stop(); }
/** * Disable hfile backups for the given table. * <p> * Previously backed up files are still retained (if present). * <p> * Asynchronous operation - some extra HFiles may be retained, in the archive directory after * disable is called, dependent on the latency in zookeeper to the servers. * @param table name of the table stop backing up * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void disableHFileBackup(String table) throws IOException, KeeperException { disableHFileBackup(Bytes.toBytes(table)); }
/** * Setup the config for the cluster */ @BeforeClass public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniZKCluster(); CONNECTION = (ClusterConnection)ConnectionFactory.createConnection(UTIL.getConfiguration()); archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION); // make hfile archiving node so we can archive files ZKWatcher watcher = UTIL.getZooKeeperWatcher(); String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher); ZKUtil.createWithParents(watcher, archivingZNode); rss = mock(RegionServerServices.class); }
/** * Start archiving table for given hfile cleaner * @param tableName table to archive * @param cleaner cleaner to check to make sure change propagated * @return underlying {@link LongTermArchivingHFileCleaner} that is managing archiving * @throws IOException on failure * @throws KeeperException on failure */ private List<BaseHFileCleanerDelegate> turnOnArchiving(String tableName, HFileCleaner cleaner) throws IOException, KeeperException { // turn on hfile retention LOG.debug("----Starting archiving for table:" + tableName); archivingClient.enableHFileBackupAsync(Bytes.toBytes(tableName)); assertTrue("Archving didn't get turned on", archivingClient.getArchivingEnabled(tableName)); // wait for the archiver to get the notification List<BaseHFileCleanerDelegate> cleaners = cleaner.getDelegatesForTesting(); LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0); while (!delegate.archiveTracker.keepHFiles(STRING_TABLE_NAME)) { // spin until propagation - should be fast } return cleaners; }
/** * Determine if archiving is enabled (but not necessarily fully propagated) for a table * @param table name of the table to check * @return <tt>true</tt> if it is, <tt>false</tt> otherwise * @throws IOException if an unexpected network issue occurs * @throws KeeperException if zookeeper can't be reached */ public boolean getArchivingEnabled(String table) throws IOException, KeeperException { return getArchivingEnabled(Bytes.toBytes(table)); }
/** * @return A new {@link HFileArchiveManager} to manage which tables' hfiles should be archived * rather than deleted. * @throws KeeperException if we can't reach zookeeper * @throws IOException if an unexpected network issue occurs */ private synchronized HFileArchiveManager createHFileArchiveManager() throws KeeperException, IOException { return new HFileArchiveManager(this.connection, this.getConf()); }
@After public void tearDown() throws Exception { try { FileSystem fs = UTIL.getTestFileSystem(); // cleanup each of the files/directories registered for (Path file : toCleanup) { // remove the table and archive directories FSUtils.delete(fs, file, true); } } catch (IOException e) { LOG.warn("Failure to delete archive directory", e); } finally { toCleanup.clear(); } // make sure that backups are off for all tables archivingClient.disableHFileBackup(); }
/** * Setup the config for the cluster */ @BeforeClass public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniZKCluster(); CONNECTION = (ClusterConnection)ConnectionFactory.createConnection(UTIL.getConfiguration()); archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION); // make hfile archiving node so we can archive files ZKWatcher watcher = UTIL.getZooKeeperWatcher(); String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher); ZKUtil.createWithParents(watcher, archivingZNode); rss = mock(RegionServerServices.class); }
/** * Start archiving table for given hfile cleaner * @param tableName table to archive * @param cleaner cleaner to check to make sure change propagated * @return underlying {@link LongTermArchivingHFileCleaner} that is managing archiving * @throws IOException on failure * @throws KeeperException on failure */ private List<BaseHFileCleanerDelegate> turnOnArchiving(String tableName, HFileCleaner cleaner) throws IOException, KeeperException { // turn on hfile retention LOG.debug("----Starting archiving for table:" + tableName); archivingClient.enableHFileBackupAsync(Bytes.toBytes(tableName)); assertTrue("Archving didn't get turned on", archivingClient.getArchivingEnabled(tableName)); // wait for the archiver to get the notification List<BaseHFileCleanerDelegate> cleaners = cleaner.getDelegatesForTesting(); LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0); while (!delegate.archiveTracker.keepHFiles(STRING_TABLE_NAME)) { // spin until propagation - should be fast } return cleaners; }
/** * Determine if archiving is enabled (but not necessarily fully propagated) for a table * @param table name of the table to check * @return <tt>true</tt> if it is, <tt>false</tt> otherwise * @throws IOException if an unexpected network issue occurs * @throws KeeperException if zookeeper can't be reached */ public boolean getArchivingEnabled(String table) throws IOException, KeeperException { return getArchivingEnabled(Bytes.toBytes(table)); }
/** * @return A new {@link HFileArchiveManager} to manage which tables' hfiles should be archived * rather than deleted. * @throws KeeperException if we can't reach zookeeper * @throws IOException if an unexpected network issue occurs */ private synchronized HFileArchiveManager createHFileArchiveManager() throws KeeperException, IOException { return new HFileArchiveManager(this.connection, this.getConf()); }
/** * Test turning on/off archiving */ @Test public void testArchivingEnableDisable() throws Exception { // 1. turn on hfile backups LOG.debug("----Starting archiving"); archivingClient.enableHFileBackupAsync(TABLE_NAME); assertTrue("Archving didn't get turned on", archivingClient .getArchivingEnabled(TABLE_NAME)); // 2. Turn off archiving and make sure its off archivingClient.disableHFileBackup(); assertFalse("Archving didn't get turned off.", archivingClient.getArchivingEnabled(TABLE_NAME)); // 3. Check enable/disable on a single table archivingClient.enableHFileBackupAsync(TABLE_NAME); assertTrue("Archving didn't get turned on", archivingClient .getArchivingEnabled(TABLE_NAME)); // 4. Turn off archiving and make sure its off archivingClient.disableHFileBackup(TABLE_NAME); assertFalse("Archving didn't get turned off for " + STRING_TABLE_NAME, archivingClient.getArchivingEnabled(TABLE_NAME)); }
private TableHFileArchiveTracker(ZKWatcher watcher, HFileArchiveTableMonitor monitor) { super(watcher); watcher.registerListener(this); this.monitor = monitor; this.archiveHFileZNode = ZKTableArchiveClient.getArchiveZNode(watcher.getConfiguration(), watcher); }
/** * Disable hfile backups for the given table. * <p> * Previously backed up files are still retained (if present). * <p> * Asynchronous operation - some extra HFiles may be retained, in the archive directory after * disable is called, dependent on the latency in zookeeper to the servers. * @param table name of the table stop backing up * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void disableHFileBackup(String table) throws IOException, KeeperException { disableHFileBackup(Bytes.toBytes(table)); }
/** * Disable hfile backups for the given table. * <p> * Previously backed up files are still retained (if present). * <p> * Asynchronous operation - some extra HFiles may be retained, in the archive directory after * disable is called, dependent on the latency in zookeeper to the servers. * @param table name of the table stop backing up * @throws IOException if an unexpected exception occurs * @throws KeeperException if zookeeper can't be reached */ public void disableHFileBackup(final byte[] table) throws IOException, KeeperException { createHFileArchiveManager().disableHFileBackup(table).stop(); }
public HFileArchiveManager(HConnection connection, Configuration conf) throws ZooKeeperConnectionException, IOException { this.zooKeeper = new ZooKeeperWatcher(conf, "hfileArchiveManager-on-" + connection.toString(), connection); this.archiveZnode = ZKTableArchiveClient.getArchiveZNode(this.zooKeeper.getConfiguration(), this.zooKeeper); }
@After public void tearDown() throws Exception { try { FileSystem fs = UTIL.getTestFileSystem(); // cleanup each of the files/directories registered for (Path file : toCleanup) { // remove the table and archive directories FSUtils.delete(fs, file, true); } } catch (IOException e) { LOG.warn("Failure to delete archive directory", e); } finally { toCleanup.clear(); } // make sure that backups are off for all tables archivingClient.disableHFileBackup(); }
/** * Determine if archiving is enabled (but not necessarily fully propagated) for a table * @param table name of the table to check * @return <tt>true</tt> if it is, <tt>false</tt> otherwise * @throws IOException if a connection to ZooKeeper cannot be established * @throws KeeperException */ public boolean getArchivingEnabled(byte[] table) throws IOException, KeeperException { HFileArchiveManager manager = createHFileArchiveManager(); try { return manager.isArchivingEnabled(table); } finally { manager.stop(); } }