/** * Reassign lease for file src to the new holder. */ synchronized Lease reassignLease(Lease lease, INodeFile src, String newHolder) { assert newHolder != null : "new lease holder is null"; if (lease != null) { removeLease(lease, src.getId()); } return addLease(newHolder, src.getId()); }
@Override // NameNode protected void initialize(Configuration conf) throws IOException { // async edit logs are incompatible with backup node due to race // conditions resulting from laxer synchronization conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_ASYNC_LOGGING, false); // Trash is disabled in BackupNameNode, // but should be turned back on if it ever becomes active. conf.setLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT); NamespaceInfo nsInfo = handshake(conf); super.initialize(conf); namesystem.setBlockPoolId(nsInfo.getBlockPoolID()); if (false == namesystem.isInSafeMode()) { namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER); } // Backup node should never do lease recovery, // therefore lease hard limit should never expire. namesystem.leaseManager.setLeasePeriod( HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); // register with the active name-node registerWith(nsInfo); // Checkpoint daemon should start after the rpc server started runCheckpointDaemon(conf); InetSocketAddress addr = getHttpAddress(); if (addr != null) { conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress())); } }
@VisibleForTesting public void runLeaseChecks() { checkLeases(); }
/** * Renew the lease(s) held by the given client */ synchronized void renewLease(String holder) { renewLease(getLease(holder)); } synchronized void renewLease(Lease lease) {
/** * Remove the lease for the specified holder and src */ synchronized void removeLease(String holder, INodeFile src) { Lease lease = getLease(holder); if (lease != null) { removeLease(lease, src.getId()); } else { LOG.warn("Removing non-existent lease! holder={} src={}", holder, src .getFullPathName()); } }
assert newFile != null; iip = INodesInPath.replace(iip, iip.length() - 1, newFile); fsNamesys.leaseManager.addLease(addCloseOp.clientName, path); fsNamesys.leaseManager.removeLeaseWithPrefixPath(path); file.toCompleteFile(file.getModificationTime()); ReassignLeaseOp reassignLeaseOp = (ReassignLeaseOp)op; Lease lease = fsNamesys.leaseManager.getLease( reassignLeaseOp.leaseHolder); final String path =
throw new IOException("Unable to add " + src + " to namespace"); fsn.leaseManager.addLease( newNode.getFileUnderConstructionFeature().getClientName(), newNode.getId());
/** Check that even if LeaseManager.checkLease is not able to relinquish * leases, the Namenode does't enter an infinite loop while holding the FSN * write lock and thus become unresponsive */ @Test (timeout=1000) public void testCheckLeaseNotInfiniteLoop() { FSDirectory dir = Mockito.mock(FSDirectory.class); FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.isRunning()).thenReturn(true); Mockito.when(fsn.hasWriteLock()).thenReturn(true); Mockito.when(fsn.getFSDirectory()).thenReturn(dir); LeaseManager lm = new LeaseManager(fsn); //Make sure the leases we are going to add exceed the hard limit lm.setLeasePeriod(0,0); //Add some leases to the LeaseManager lm.addLease("holder1", "src1"); lm.addLease("holder2", "src2"); lm.addLease("holder3", "src3"); assertEquals(lm.getNumSortedLeases(), 3); //Initiate a call to checkLease. This should exit within the test timeout lm.checkLeases(); }
assert newFile != null; iip = INodesInPath.replace(iip, iip.length() - 1, newFile); fsNamesys.leaseManager.addLease(addCloseOp.clientName, newFile.getId()); fsNamesys.getLeaseManager().removeLease(file.getId()); file.toCompleteFile(file.getModificationTime(), 0, fsNamesys.getBlockManager().getMinReplication()); ReassignLeaseOp reassignLeaseOp = (ReassignLeaseOp)op; Lease lease = fsNamesys.leaseManager.getLease( reassignLeaseOp.leaseHolder); final String path =
@Test public void testRemoveLeaseWithPrefixPath() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem()); lm.addLease("holder1", "/a/b"); lm.addLease("holder2", "/a/c"); assertNotNull(lm.getLeaseByPath("/a/b")); assertNotNull(lm.getLeaseByPath("/a/c")); lm.removeLeaseWithPrefixPath("/a"); assertNull(lm.getLeaseByPath("/a/b")); assertNull(lm.getLeaseByPath("/a/c")); lm.addLease("holder1", "/a/b"); lm.addLease("holder2", "/a/c"); lm.removeLeaseWithPrefixPath("/a/"); assertNull(lm.getLeaseByPath("/a/b")); assertNull(lm.getLeaseByPath("/a/c")); }
synchronized void removeLeaseWithPrefixPath(String prefix) { for(Map.Entry<String, LeaseOpenTime> entry : findLeaseWithPrefixPath(prefix, sortedLeasesByPath)) { if (LOG.isDebugEnabled()) { LOG.debug(LeaseManager.class.getSimpleName() + ".removeLeaseWithPrefixPath: entry=" + entry); } removeLease(entry.getValue().lease, entry.getKey()); } }
static boolean hasLease(MiniDFSCluster cluster, Path src) { return cluster.getNamesystem().leaseManager.getLeaseByPath(src.toString()) != null; }
Lease lease = leaseManager.getLease(holder); Lease leaseFile = leaseManager.getLeaseByPath(src); if (leaseFile != null && leaseFile.equals(lease)) { throw new AlreadyBeingCreatedException( lease = leaseManager.getLease(pendingFile.clientName); if (lease == null) { throw new AlreadyBeingCreatedException(
/** * Renew all of the currently open leases. */ synchronized void renewAllLeases() { for (Lease l : leases.values()) { renewLease(l); } }
/** * Get the total number of active clients holding lease in the system. */ @Metric({ "NumActiveClients", "Number of active clients holding lease" }) public long getNumActiveClients() { return leaseManager.countLease(); }
synchronized void removeLease(long inodeId) { final Lease lease = leasesById.get(inodeId); if (lease != null) { removeLease(lease, inodeId); } }
&& !isMaxLockHoldToReleaseLease(start)) { Lease leaseToCheck = sortedLeases.first(); LOG.info("{} has expired hard limit", leaseToCheck); FSDirectory fsd = fsnamesystem.getFSDirectory(); String p = null; String newHolder = getInternalLeaseHolder(); for(Long id : leaseINodeIds) { try { if (fsnamesystem.isFileDeleted(lastINode)) { removeLease(lastINode.getId()); continue; removing.add(id); if (isMaxLockHoldToReleaseLease(start)) { LOG.debug("Breaking out of checkLeases after {} ms.", fsnamesystem.getMaxLockHoldToReleaseLeaseMs()); removeLease(leaseToCheck, id);
/** * Test that FSNamesystem#clear clears all leases. */ @Test public void testFSNamespaceClearLeases() throws Exception { Configuration conf = new HdfsConfiguration(); File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); conf.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath()); NameNode.initMetrics(conf, NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); FSNamesystem fsn = FSNamesystem.loadFromDisk(conf); LeaseManager leaseMan = fsn.getLeaseManager(); leaseMan.addLease("client1", "importantFile"); assertEquals(1, leaseMan.countLease()); fsn.clear(); leaseMan = fsn.getLeaseManager(); assertEquals(0, leaseMan.countLease()); }
cluster.getNamesystem().leaseManager.removeLease( file.getFileUnderConstructionFeature().getClientName(), path); cluster.getNamesystem().leaseManager.getLeaseByPath(path)); } finally { if (cluster != null) {
/** * Remove the blocks from the given list. Also, remove the path. Add the * blocks to invalidates, and set a flag that explicit ACK from DataNode is * not required. This function should be used only for deleting entire files. */ void removePathAndBlocks(String src, List<Block> blocks) throws IOException { // No need for lock until we start accepting requests from clients. assert (!nameNode.isRpcServerRunning() || hasWriteLock()); leaseManager.removeLeaseWithPrefixPath(src); removeBlocks(blocks); }