public BlockManager(final Namesystem namesystem, boolean haEnabled, final Configuration conf) throws IOException { this.namesystem = namesystem; datanodeManager = new DatanodeManager(this, namesystem, conf); heartbeatManager = datanodeManager.getHeartbeatManager(); this.blockIdManager = new BlockIdManager(this); blocksPerPostpondedRescan = (int)Math.min(Integer.MAX_VALUE, datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan()); rescannedMisreplicatedBlocks = new ArrayList<Block>(blocksPerPostpondedRescan); DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L; invalidateBlocks = new InvalidateBlocks( datanodeManager.getBlockInvalidateLimit(), startupDelayBlockDeletionInMs, blockIdManager); LightWeightGSet.computeCapacity(2.0, "BlocksMap")); placementPolicies = new BlockPlacementPolicies( conf, datanodeManager.getFSClusterStats(), datanodeManager.getNetworkTopology(), datanodeManager.getHost2DatanodeMap()); storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite(); pendingReconstruction = new PendingReconstructionBlocks(conf.getInt(
if (checkIpHostnameInRegistration && !isNameResolved(dnAddress)) { DatanodeDescriptor nodeS = getDatanode(nodeReg.getDatanodeUuid()); DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr( nodeReg.getIpAddr(), nodeReg.getXferPort()); removeDatanode(nodeN); wipeDatanode(nodeN); nodeN = null; getNetworkTopology().remove(nodeS); if(shouldCountVersion(nodeS)) { decrementVersionCount(nodeS.getSoftwareVersion()); nodeS.setNetworkLocation(resolveNetworkLocation(nodeS)); nodeS.setDependentHostNames(getNetworkDependencies(nodeS)); } else { nodeS.setNetworkLocation( resolveNetworkLocationWithFallBackToDefaultLocation(nodeS)); nodeS.setDependentHostNames( getNetworkDependenciesWithDefault(nodeS)); getNetworkTopology().add(nodeS); resolveUpgradeDomain(nodeS); incrementVersionCount(nodeS.getSoftwareVersion()); startAdminOperationIfNecessary(nodeS); success = true;
/** Add a datanode. */ void addDatanode(final DatanodeDescriptor node) { // To keep host2DatanodeMap consistent with datanodeMap, // remove from host2DatanodeMap the datanodeDescriptor removed // from datanodeMap before adding node to host2DatanodeMap. synchronized(this) { host2DatanodeMap.remove(datanodeMap.put(node.getDatanodeUuid(), node)); } networktopology.add(node); // may throw InvalidTopologyException host2DatanodeMap.add(node); checkIfClusterIsNowMultiRack(node); resolveUpgradeDomain(node); if (LOG.isDebugEnabled()) { LOG.debug(getClass().getSimpleName() + ".addDatanode: " + "node " + node + " is added to datanodeMap."); } }
/** Remove a dead datanode. */ void removeDeadDatanode(final DatanodeID nodeID, boolean removeBlocksFromBlockMap) { DatanodeDescriptor d; try { d = getDatanode(nodeID); } catch(IOException e) { d = null; } if (d != null && isDatanodeDead(d)) { NameNode.stateChangeLog.info( "BLOCK* removeDeadDatanode: lost heartbeat from " + d + ", removeBlocksFromBlockMap " + removeBlocksFromBlockMap); removeDatanode(d, removeBlocksFromBlockMap); } }
getDatanodeListForReport(DatanodeReportType.ALL); for(DatanodeDescriptor node : results) { if (isDatanodeDead(node)) { if (dead != null) { dead.add(node); removeDecomNodeFromList(live); removeDecomNodeFromList(dead);
/** * Remove a datanode * @throws UnregisteredNodeException */ public void removeDatanode(final DatanodeID node) throws UnregisteredNodeException { namesystem.writeLock(); try { final DatanodeDescriptor descriptor = getDatanode(node); if (descriptor != null) { removeDatanode(descriptor, true); } else { NameNode.stateChangeLog.warn("BLOCK* removeDatanode: " + node + " does not exist"); } } finally { namesystem.writeUnlock(); } }
DatanodeID dnId = parseDNFromHostsEntry(address); String host = dnId.getIpAddr(); int xferPort = dnId.getXferPort(); DatanodeDescriptor node = getDatanodeByXferAddr(host, xferPort); if (node == null) { node = getDatanodeByHost(host); resolveNetworkLocationWithFallBackToDefaultLocation(dnId); List<Node> rackNodes = getNetworkTopology() .getDatanodesInRack(networkLocation); if (rackNodes != null) { node = (DatanodeDescriptor)getNetworkTopology() .chooseRandom(NodeBase.ROOT);
int idx = host.indexOf(":"); if (idx != -1) { excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr( host.substring(0, idx), Integer.parseInt(host.substring(idx + 1)))); } else { excludes.add(bm.getDatanodeManager().getDatanodeByHost(host)); ).getDatanodeByHost(remoteAddr); if (clientNode != null) { final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS( return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology( ).chooseRandom(NodeBase.ROOT, excludes);
@Override // FSNamesystemMBean @Metric({"VolumeFailuresTotal", "Total number of volume failures across all Datanodes"}) public int getVolumeFailuresTotal() { List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); getBlockManager().getDatanodeManager().fetchDatanodes(live, null, false); int volumeFailuresTotal = 0; for (DatanodeDescriptor node: live) { volumeFailuresTotal += node.getVolumeFailures(); } return volumeFailuresTotal; }
try { String blockPoolId = namenode.getNamesystem().getBlockPoolId(); dnManager.handleHeartbeat(dnrList.get(3), BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]), blockPoolId, dataNodes[3].getCacheCapacity(), dataNodes[3].getCacheRemaining(), 2, 0, 0, null); dnManager.handleHeartbeat(dnrList.get(4), BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]), blockPoolId, dataNodes[4].getCacheCapacity(), dataNodes[4].getCacheRemaining(), 4, 0, 0, null); dnManager.handleHeartbeat(dnrList.get(5), BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]), blockPoolId, dataNodes[5].getCacheCapacity(), assertEquals((double)load/6, dnManager.getFSClusterStats() .getInServiceXceiverAverage(), EPSILON); DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i)); dnManager.getDecomManager().startDecommission(d); d.setDecommissioned(); assertEquals((double)load/3, dnManager.getFSClusterStats() .getInServiceXceiverAverage(), EPSILON); dnManager.fetchDatanodes(liveNodes, null, false); DatanodeDescriptor writerDn = null; if (liveNodes.contains(dataNodes[0])) {
@Test public void testSafeModeIBRAfterIncremental() throws Exception { DatanodeDescriptor node = spy(nodes.get(0)); DatanodeStorageInfo ds = node.getStorageInfos()[0]; node.isAlive = true; DatanodeRegistration nodeReg = new DatanodeRegistration(node, null, null, ""); // pretend to be in safemode doReturn(true).when(fsn).isInStartupSafeMode(); // register new node bm.getDatanodeManager().registerDatanode(nodeReg); bm.getDatanodeManager().addDatanode(node); // swap in spy assertEquals(node, bm.getDatanodeManager().getDatanode(node)); assertEquals(0, ds.getBlockReportCount()); // send block report while pretending to already have blocks reset(node); doReturn(1).when(node).numBlocks(); bm.processReport(node, new DatanodeStorage(ds.getStorageID()), BlockListAsLongs.EMPTY, null); assertEquals(1, ds.getBlockReportCount()); }
/** @return list of datanodes where decommissioning is in progress. */ public List<DatanodeDescriptor> getDecommissioningNodes() { // There is no need to take namesystem reader lock as // getDatanodeListForReport will synchronize on datanodeMap // A decommissioning DN may be "alive" or "dead". return getDatanodeListForReport(DatanodeReportType.DECOMMISSIONING); }
DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class), fsn, new Configuration()); dm.removeDatanode(toRemove); it.remove(); dm.registerDatanode(dr); sIdToDnReg.put(storageID, dr); Map<String, Integer> mapToCheck = dm.getDatanodesSoftwareVersions();
DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class), fsn, new Configuration()); Mockito.when(dr.getXferPort()).thenReturn(9000); Mockito.when(dr.getSoftwareVersion()).thenReturn("version1"); dm.registerDatanode(dr); locs[i] = dm.getDatanode(uuid); storageIDs[i] = "storageID-"+i; dm.sortLocatedBlocks(targetIp, blocks);
bm.getDatanodeManager().registerDatanode(nodeReg); bm.getDatanodeManager().addDatanode(node); // swap in spy assertEquals(node, bm.getDatanodeManager().getDatanode(node)); assertEquals(0, ds.getBlockReportCount()); bm.getDatanodeManager().removeDatanode(node); reset(node); bm.getDatanodeManager().registerDatanode(nodeReg); verify(node).updateRegInfo(nodeReg);
public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) { assert namesystem.hasReadLock(); DatanodeDescriptor node = null; try { node = datanodeManager.getDatanode(nodeReg); } catch (UnregisteredNodeException e) { LOG.warn("Unregistered datanode {}", nodeReg); return 0; } if (node == null) { LOG.warn("Failed to find datanode {}", nodeReg); return 0; } // Request a new block report lease. The BlockReportLeaseManager has // its own internal locking. long leaseId = blockReportLeaseManager.requestLease(node); BlockManagerFaultInjector.getInstance(). requestBlockReportLease(node, leaseId); return leaseId; }
final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager(); decommissionNode(fsn, localFileSys, dnName); dm.refreshNodes(conf); final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); while (true) { dm.fetchDatanodes(null, dead, false); if (dead.size() == 1) { break; List<DatanodeDescriptor> decomlist = dm.getDecommissioningNodes(); assertTrue("The node should be be decommissioning", decomlist.size() == 1); dm.refreshNodes(conf);
@Override // FSNamesystemMBean @Metric({"NumDecommissioningDataNodes", "Number of datanodes in decommissioning state"}) public int getNumDecommissioningDataNodes() { return getBlockManager().getDatanodeManager().getDecommissioningNodes() .size(); }
@Override //NameNodeMXBean public int getDistinctVersionCount() { return blockManager.getDatanodeManager().getDatanodesSoftwareVersions() .size(); }
final DatanodeDescriptor nodeinfo; try { nodeinfo = getDatanode(nodeReg); } catch (UnregisteredNodeException e) { return new DatanodeCommand[]{RegisterCommand.REGISTER}; setDatanodeDead(nodeinfo); throw new DisallowedDatanodeException(nodeinfo); final BlockRecoveryCommand brCommand = getBlockRecoveryCommand(blockPoolId, nodeinfo); if (brCommand != null) { addCacheCommands(blockPoolId, nodeinfo, cmds);