List<Node> rackNodes = getNetworkTopology() .getDatanodesInRack(networkLocation); if (rackNodes != null) { node = (DatanodeDescriptor)getNetworkTopology() .chooseRandom(NodeBase.ROOT);
throw new IOException(String.format(errorMessage, src, targets.length, minReplication, "minReplication nodes", getDatanodeManager().getNetworkTopology().getNumOfLeaves(), (excludedNodes == null? "no": excludedNodes.size()))); } else if (blockType == BlockType.STRIPED && ecPolicy.getNumDataUnits(), String.format("required nodes for %s", ecPolicy.getName()), getDatanodeManager().getNetworkTopology().getNumOfLeaves(), (excludedNodes == null ? "no" : excludedNodes.size())));
placementPolicies = new BlockPlacementPolicies( conf, datanodeManager.getFSClusterStats(), datanodeManager.getNetworkTopology(), datanodeManager.getHost2DatanodeMap()); storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite();
@Override public Object run() throws Exception { NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); final FSNamesystem namesystem = nn.getNamesystem(); final BlockManager bm = namesystem.getBlockManager(); final int totalDatanodes = namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); new NamenodeFsck(conf, nn, bm.getDatanodeManager().getNetworkTopology(), pmap, out, totalDatanodes, remoteAddress).fsck(); return null; } });
return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology( ).chooseRandom(NodeBase.ROOT, excludes);
.getNetworkTopology(); NameNode.stateChangeLog.info("STATE* Network topology has {} racks and {}" + " datanodes", nt.getNumOfRacks(), nt.getNumOfLeaves());
try { getNetworkTopology().remove(nodeS); if(shouldCountVersion(nodeS)) { decrementVersionCount(nodeS.getSoftwareVersion()); getNetworkDependenciesWithDefault(nodeS)); getNetworkTopology().add(nodeS); resolveUpgradeDomain(nodeS);
/** @return a randomly chosen datanode. */ static DatanodeDescriptor getRandomDatanode(final NameNode namenode) { return (DatanodeDescriptor)namenode.getNamesystem().getBlockManager( ).getDatanodeManager().getNetworkTopology().chooseRandom( NodeBase.ROOT); } }
/** @return a randomly chosen datanode. */ static DatanodeDescriptor getRandomDatanode(final NameNode namenode) { return (DatanodeDescriptor)namenode.getNamesystem().getBlockManager( ).getDatanodeManager().getNetworkTopology().chooseRandom( NodeBase.ROOT); } }
+ targets.length + " nodes instead of minReplication (=" + minReplication + "). There are " + getDatanodeManager().getNetworkTopology().getNumOfLeaves() + " datanode(s) running and " + (excludedNodes == null? "no": excludedNodes.size())
+ targets.length + " nodes instead of minReplication (=" + minReplication + "). There are " + getDatanodeManager().getNetworkTopology().getNumOfLeaves() + " datanode(s) running and " + (excludedNodes == null? "no": excludedNodes.size())
@Override public Object run() throws Exception { NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); final FSNamesystem namesystem = nn.getNamesystem(); final BlockManager bm = namesystem.getBlockManager(); final int totalDatanodes = namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); new NamenodeFsck(conf, nn, bm.getDatanodeManager().getNetworkTopology(), pmap, out, totalDatanodes, remoteAddress).fsck(); return null; } });
private void removeNode(DatanodeDescriptor deadNode) { NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); cluster.remove(deadNode); bm.removeBlocksAssociatedTo(deadNode); }
reachedTimestamp = -1; safeMode = null; final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology(); NameNode.stateChangeLog.info("STATE* Network topology has " + nt.getNumOfRacks() + " racks and "
@Override public Object run() throws Exception { NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); final FSNamesystem namesystem = nn.getNamesystem(); final BlockManager bm = namesystem.getBlockManager(); final int totalDatanodes = namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); new NamenodeFsck(conf, nn, bm.getDatanodeManager().getNetworkTopology(), pmap, out, totalDatanodes, remoteAddress).fsck(); return null; } });
@Before public void setUp() throws Exception { FileSystem.setDefaultUri(CONF, "hdfs://localhost:0"); CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); // Set properties to make HDFS aware of NodeGroup. CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyWithNodeGroup.class.getName()); CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, NetworkTopologyWithNodeGroup.class.getName()); CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true); File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class); CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath()); DFSTestUtil.formatNameNode(CONF); namenode = new NameNode(CONF); final BlockManager bm = namenode.getNamesystem().getBlockManager(); replicator = bm.getBlockPlacementPolicy(); cluster = bm.getDatanodeManager().getNetworkTopology(); // construct network topology for(int i=0; i<NUM_OF_DATANODES; i++) { cluster.add(dataNodes[i]); } setupDataNodeCapacity(); }
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); for (DatanodeDescriptor datanode : dataNodes) { cluster.add(datanode);
cluster = bm.getDatanodeManager().getNetworkTopology(); for (int i = 0; i < nodesPerRack * numRacks; i++) { cluster.add(dataNodes[i]);
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) { NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); // construct network topology for (DatanodeDescriptor dn : nodesToAdd) { cluster.add(dn); dn.getStorageInfos()[0].setUtilizationForTesting( 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L); dn.updateHeartbeat( BlockManagerTestUtil.getStorageReportsForDatanode(dn), 0L, 0L, 0, 0, null); bm.getDatanodeManager().checkIfClusterIsNowMultiRack(dn); } }
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); for (DatanodeDescriptor datanode : dataNodes) { cluster.add(datanode);