srcFs = FileSystem.newInstance(p.toUri(), conf); if (!FSHDFSUtils.isSameHdfs(conf, srcFs, fs)) {
/** * Recover the lease from HDFS, retrying multiple times. */ @Override public void recoverFileLease(FileSystem fs, Path p, Configuration conf, CancelableProgressable reporter) throws IOException { if (fs instanceof FilterFileSystem) { fs = ((FilterFileSystem) fs).getRawFileSystem(); } // lease recovery not needed for local file system case. if (!(fs instanceof DistributedFileSystem)) { return; } recoverDFSFileLease((DistributedFileSystem) fs, p, conf, reporter); }
recovered = recoverLease(dfs, nbAttempt, p, startWaiting); if (recovered) break; checkIfCancelled(reporter); if (checkIfTimedout(conf, recoveryTimeout, nbAttempt, p, startWaiting)) break; try { if (isFileClosedMeth != null && isFileClosed(dfs, isFileClosedMeth, p)) { recovered = true; break; checkIfCancelled(reporter);
boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout, final int nbAttempt, final Path p, final long startWaiting) { if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) { LOG.warn("Cannot recoverLease after trying for " + conf.getInt("hbase.lease.recovery.timeout", 900000) + "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + getLogMessageDetail(nbAttempt, p, startWaiting)); return true; } return false; }
Set<InetSocketAddress> srcAddrs = getNNAddresses((DistributedFileSystem) srcFs, conf); Set<InetSocketAddress> desAddrs = getNNAddresses((DistributedFileSystem) desFs, conf); if (Sets.intersection(srcAddrs, desAddrs).size() > 0) { return true;
@Override public void recoverFileLease(final FileSystem fs, final Path p, Configuration conf) throws IOException{ if (!isAppendSupported(conf)) { LOG.warn("Running on HDFS without append enabled may result in data loss"); return;
/** * Try to recover the lease. * @param dfs * @param nbAttempt * @param p * @param startWaiting * @return True if dfs#recoverLease came by true. * @throws FileNotFoundException */ boolean recoverLease(final DistributedFileSystem dfs, final int nbAttempt, final Path p, final long startWaiting) throws FileNotFoundException { boolean recovered = false; try { recovered = dfs.recoverLease(p); LOG.info((recovered? "Recovered lease, ": "Failed to recover lease, ") + getLogMessageDetail(nbAttempt, p, startWaiting)); } catch (IOException e) { if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) { // This exception comes out instead of FNFE, fix it throw new FileNotFoundException("The given WAL wasn't found at " + p); } else if (e instanceof FileNotFoundException) { throw (FileNotFoundException)e; } LOG.warn(getLogMessageDetail(nbAttempt, p, startWaiting), e); } return recovered; }
Set<InetSocketAddress> srcAddrs = getNNAddresses((DistributedFileSystem) srcFs, conf); Set<InetSocketAddress> desAddrs = getNNAddresses((DistributedFileSystem) desFs, conf); if (Sets.intersection(srcAddrs, desAddrs).size() > 0) { return true;
recovered = recoverLease(dfs, nbAttempt, p, startWaiting); if (recovered) break; checkIfCancelled(reporter); if (checkIfTimedout(conf, recoveryTimeout, nbAttempt, p, startWaiting)) break; try { if (isFileClosedMeth != null && isFileClosed(dfs, isFileClosedMeth, p)) { recovered = true; break; checkIfCancelled(reporter);
if (!FSHDFSUtils.isSameHdfs(conf, srcFs, fs)) { LOG.debug("Bulk-load file " + srcPath + " is on different filesystem than " + "the destination filesystem. Copying file over to destination staging dir.");
/** * Test that isFileClosed makes us recover lease faster. */ @Test public void testIsFileClosed() throws IOException { // Make this time long so it is plain we broke out because of the isFileClosed invocation. HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 100000); CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class); Mockito.when(reporter.progress()).thenReturn(true); IsFileClosedDistributedFileSystem dfs = Mockito.mock(IsFileClosedDistributedFileSystem.class); // Now make it so we fail the first two times -- the two fast invocations, then we fall into // the long loop during which we will call isFileClosed.... the next invocation should // therefore return true if we are to break the loop. Mockito.when(dfs.recoverLease(FILE)). thenReturn(false).thenReturn(false).thenReturn(true); Mockito.when(dfs.isFileClosed(FILE)).thenReturn(true); assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter)); Mockito.verify(dfs, Mockito.times(2)).recoverLease(FILE); Mockito.verify(dfs, Mockito.times(1)).isFileClosed(FILE); }
boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout, final int nbAttempt, final Path p, final long startWaiting) { if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) { LOG.warn("Cannot recoverLease after trying for " + conf.getInt("hbase.lease.recovery.timeout", 900000) + "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + getLogMessageDetail(nbAttempt, p, startWaiting)); return true; } return false; }
if (!FSHDFSUtils.isSameHdfs(conf, realSrcFs, desFs)) { LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " + "the destination store. Copying file over to destination filesystem.");
/** * Test recover lease eventually succeeding. */ @Test public void testRecoverLease() throws IOException { HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000); CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class); Mockito.when(reporter.progress()).thenReturn(true); DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class); // Fail four times and pass on the fifth. Mockito.when(dfs.recoverLease(FILE)). thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true); assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter)); Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE); // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two // invocations will happen pretty fast... the we fall into the longer wait loop). assertTrue((EnvironmentEdgeManager.currentTime() - this.startTime) > (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); }
/** * Try to recover the lease. * @param dfs * @param nbAttempt * @param p * @param startWaiting * @return True if dfs#recoverLease came by true. * @throws FileNotFoundException */ boolean recoverLease(final DistributedFileSystem dfs, final int nbAttempt, final Path p, final long startWaiting) throws FileNotFoundException { boolean recovered = false; try { recovered = dfs.recoverLease(p); LOG.info((recovered? "Recovered lease, ": "Failed to recover lease, ") + getLogMessageDetail(nbAttempt, p, startWaiting)); } catch (IOException e) { if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) { // This exception comes out instead of FNFE, fix it throw new FileNotFoundException("The given WAL wasn't found at " + p); } else if (e instanceof FileNotFoundException) { throw (FileNotFoundException)e; } LOG.warn(getLogMessageDetail(nbAttempt, p, startWaiting), e); } return recovered; }
FileSystem desFs = desPath.getFileSystem(conf); assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); desPath = new Path("/"); desFs = desPath.getFileSystem(conf); assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); desPath = new Path("/"); desFs = desPath.getFileSystem(conf); assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs));
/** * Recover the lease from HDFS, retrying multiple times. */ @Override public void recoverFileLease(final FileSystem fs, final Path p, Configuration conf, CancelableProgressable reporter) throws IOException { // lease recovery not needed for local file system case. if (!(fs instanceof DistributedFileSystem)) return; recoverDFSFileLease((DistributedFileSystem)fs, p, conf, reporter); }
if (FSHDFSUtils.isSameHdfs(getConf(), sourceFs, targetFs)) { for (Pair<byte[], String> el : famPaths) { Path hfileStagingPath = null;
/** * Test that isFileClosed makes us recover lease faster. */ @Test public void testIsFileClosed() throws IOException { // Make this time long so it is plain we broke out because of the isFileClosed invocation. HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 100000); CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class); Mockito.when(reporter.progress()).thenReturn(true); IsFileClosedDistributedFileSystem dfs = Mockito.mock(IsFileClosedDistributedFileSystem.class); // Now make it so we fail the first two times -- the two fast invocations, then we fall into // the long loop during which we will call isFileClosed.... the next invocation should // therefore return true if we are to break the loop. Mockito.when(dfs.recoverLease(FILE)). thenReturn(false).thenReturn(false).thenReturn(true); Mockito.when(dfs.isFileClosed(FILE)).thenReturn(true); assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter)); Mockito.verify(dfs, Mockito.times(2)).recoverLease(FILE); Mockito.verify(dfs, Mockito.times(1)).isFileClosed(FILE); }
@Override public void failedBulkLoad(final byte[] family, final String srcPath) throws IOException { if (!FSHDFSUtils.isSameHdfs(conf, srcFs, fs)) { // files are copied so no need to move them back return; } Path p = new Path(srcPath); Path stageP = new Path(stagingDir, new Path(Bytes.toString(family), p.getName())); LOG.debug("Moving " + stageP + " back to " + p); if(!fs.rename(stageP, p)) throw new IOException("Failed to move HFile: " + stageP + " to " + p); // restore original permission if (origPermissions.containsKey(srcPath)) { fs.setPermission(p, origPermissions.get(srcPath)); } else { LOG.warn("Can't find previous permission for path=" + srcPath); } }