boolean hasEnoughEffectiveReplicas(BlockInfo block, NumberReplicas numReplicas, int pendingReplicaNum) { int required = getExpectedLiveRedundancyNum(block, numReplicas); int numEffectiveReplicas = numReplicas.liveReplicas() + pendingReplicaNum; return (numEffectiveReplicas >= required) && (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block)); }
boolean isNeededReconstructionForMaintenance(BlockInfo storedBlock, NumberReplicas numberReplicas) { return storedBlock.isComplete() && (numberReplicas.liveReplicas() < getMinMaintenanceStorageNum(storedBlock) || !isPlacementPolicySatisfied(storedBlock)); }
(!isPlacementPolicySatisfied(block)) ) { if (!isInNewRack(rw.getSrcNodes(), targets[0].getDatanodeDescriptor())) {
/** * A block needs replication if the number of replicas is less than expected * or if it does not have enough racks. */ boolean isNeededReplication(Block b, int expected, int current) { BlockInfoContiguous blockInfo; if (b instanceof BlockInfoContiguous) { blockInfo = (BlockInfoContiguous) b; } else { blockInfo = getStoredBlock(b); } return blockInfo.isComplete() && (current < expected || !isPlacementPolicySatisfied(b)); }
/** * A block needs replication if the number of replicas is less than expected * or if it does not have enough racks. */ boolean isNeededReplication(Block b, int expected, int current) { BlockInfoContiguous blockInfo; if (b instanceof BlockInfoContiguous) { blockInfo = (BlockInfoContiguous) b; } else { blockInfo = getStoredBlock(b); } return blockInfo.isComplete() && (current < expected || !isPlacementPolicySatisfied(b)); }
final int numLive = numberReplicas.liveReplicas(); if (numLive >= numExpected && blockManager.isPlacementPolicySatisfied(block)) {
(isPlacementPolicySatisfied(block)) ) { (isPlacementPolicySatisfied(block)) ) { (!isPlacementPolicySatisfied(block)) ) { if (rw.srcNode.getNetworkLocation().equals( targets[0].getDatanodeDescriptor().getNetworkLocation())) {
/** * {@link BlockManager#isPlacementPolicySatisfied(Block)} should return false * if all the replicas are on the same rack and shouldn't be dependent on * CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY * @throws Exception */ @Test public void testAllReplicasOnSameRack() throws Exception { Configuration conf = new HdfsConfiguration(); conf.unset(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY); fsn = Mockito.mock(FSNamesystem.class); Mockito.doReturn(true).when(fsn).hasWriteLock(); Mockito.doReturn(true).when(fsn).hasReadLock(); bm = new BlockManager(fsn, conf); // Add nodes on two racks addNodes(nodes); // Added a new block in blocksMap and all the replicas are on the same rack BlockInfoContiguous blockInfo = addBlockOnNodes(1, rackA); // Since the network toppolgy is multi-rack, the blockHasEnoughRacks // should return false. assertFalse("Replicas for block is not stored on enough racks", bm.isPlacementPolicySatisfied(blockInfo)); }
(isPlacementPolicySatisfied(block)) ) { (isPlacementPolicySatisfied(block)) ) { (!isPlacementPolicySatisfied(block)) ) { if (rw.srcNode.getNetworkLocation().equals( targets[0].getDatanodeDescriptor().getNetworkLocation())) {
final int numLive = numberReplicas.liveReplicas(); if (numLive >= numExpected && blockManager.isPlacementPolicySatisfied(block)) {