"Block: " + block + ", Expected Replicas: " + curExpectedRedundancy + ", live replicas: " + curReplicas + ", corrupt replicas: " + num.corruptReplicas() + ", decommissioned replicas: " + num.decommissioned() + ", decommissioning replicas: " + num.decommissioning()
numberReplicas.corruptReplicas());
" decommissioning and decommissioned: " + numReplicas.decommissionedAndDecommissioning() + " corrupt: " + numReplicas.corruptReplicas() + " in excess: " + numReplicas.excessReplicas() + " maintenance mode: " + numReplicas.maintenanceReplicas() + ") ");
int numCorruptNodes = num.corruptReplicas(); if (numCorruptNodes != corruptReplicasCount) { LOG.warn("Inconsistent number of corrupt replicas for {}" +
final int numCorruptNodes = numReplicas.corruptReplicas(); final int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blk); if (numCorruptNodes != numCorruptReplicas) {
(numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) > expectedRedundancies; boolean corruptedDuringWrite = minReplicationSatisfied &&
private static void logBlockReplicationInfo(Block block, BlockCollection bc, DatanodeDescriptor srcNode, NumberReplicas num, Iterable<DatanodeStorageInfo> storages) { int curReplicas = num.liveReplicas(); int curExpectedReplicas = bc.getBlockReplication(); StringBuilder nodeList = new StringBuilder(); for (DatanodeStorageInfo storage : storages) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); nodeList.append(node); nodeList.append(" "); } LOG.info("Block: " + block + ", Expected Replicas: " + curExpectedReplicas + ", live replicas: " + curReplicas + ", corrupt replicas: " + num.corruptReplicas() + ", decommissioned replicas: " + num.decommissioned() + ", decommissioning replicas: " + num.decommissioning() + ", excess replicas: " + num.excessReplicas() + ", Is Open File: " + bc.isUnderConstruction() + ", Datanodes having this block: " + nodeList + ", Current Datanode: " + srcNode + ", Is current datanode decommissioning: " + srcNode.isDecommissionInProgress()); }
out.println("No. of decommission Replica: " + numberReplicas.decommissioned()); out.println("No. of corrupted Replica: " + numberReplicas.corruptReplicas());
private static void logBlockReplicationInfo(Block block, BlockCollection bc, DatanodeDescriptor srcNode, NumberReplicas num, Iterable<DatanodeStorageInfo> storages) { int curReplicas = num.liveReplicas(); int curExpectedReplicas = bc.getBlockReplication(); StringBuilder nodeList = new StringBuilder(); for (DatanodeStorageInfo storage : storages) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); nodeList.append(node); nodeList.append(" "); } LOG.info("Block: " + block + ", Expected Replicas: " + curExpectedReplicas + ", live replicas: " + curReplicas + ", corrupt replicas: " + num.corruptReplicas() + ", decommissioned replicas: " + num.decommissioned() + ", decommissioning replicas: " + num.decommissioning() + ", excess replicas: " + num.excessReplicas() + ", Is Open File: " + bc.isUnderConstruction() + ", Datanodes having this block: " + nodeList + ", Current Datanode: " + srcNode + ", Is current datanode decommissioning: " + srcNode.isDecommissionInProgress()); }
void checkTimeout(String testLabel, long cycleTime) throws TimeoutException { if (Time.monotonicNow() > failtime) { throw new TimeoutException("Timeout: " + testLabel + " for block " + lastBlock + " after " + timeout + " msec. Last counts: live = " + lastNum.liveReplicas() + ", excess = " + lastNum.excessReplicas() + ", corrupt = " + lastNum.corruptReplicas()); } if (cycleTime > 0) { try { Thread.sleep(cycleTime); } catch (InterruptedException ie) { //ignore } } }
" l: " + numReplicas.liveReplicas() + " d: " + numReplicas.decommissionedAndDecommissioning() + " c: " + numReplicas.corruptReplicas() + " e: " + numReplicas.excessReplicas() + ") ");
private void validateNumberReplicas(int expectedReplicas) throws IOException { NumberReplicas numberReplicas = blockManager.countNodes(block); assertThat(numberReplicas.liveReplicas(), is(expectedReplicas)); assertThat(numberReplicas.excessReplicas(), is(0)); assertThat(numberReplicas.corruptReplicas(), is(0)); assertThat(numberReplicas.decommissionedAndDecommissioning(), is(0)); assertThat(numberReplicas.replicasOnStaleNodes(), is(0)); BlockManagerTestUtil.updateState(blockManager); assertThat(blockManager.getUnderReplicatedBlocksCount(), is(0L)); assertThat(blockManager.getExcessBlocksCount(), is(0L)); }
/** * Verify that corrupt <tt>READ_ONLY_SHARED</tt> replicas aren't counted * towards the corrupt replicas total. */ @Test public void testReadOnlyReplicaCorrupt() throws Exception { // "Corrupt" a READ_ONLY_SHARED replica by reporting it as a bad replica client.reportBadBlocks(new LocatedBlock[] { new LocatedBlock(extendedBlock, new DatanodeInfo[] { readOnlyDataNode }) }); // There should now be only 1 *location* for the block as the READ_ONLY_SHARED is corrupt waitForLocations(1); // However, the corrupt READ_ONLY_SHARED replica should *not* affect the overall corrupt replicas count NumberReplicas numberReplicas = blockManager.countNodes(block); assertThat(numberReplicas.corruptReplicas(), is(0)); }
final int numCorruptNodes = countNodes(blk).corruptReplicas(); final int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blk); if (numCorruptNodes != numCorruptReplicas) {
final int numCorruptNodes = countNodes(blk).corruptReplicas(); final int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blk); if (numCorruptNodes != numCorruptReplicas) {
numberOfReplicas.liveReplicas() >= minReplication; boolean hasMoreCorruptReplicas = minReplicationSatisfied && (numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) > bc.getBlockReplication(); boolean corruptedDuringWrite = minReplicationSatisfied &&
assertEquals(3, countReplicas(namesystem, block).corruptReplicas()); assertEquals(3, countReplicas(namesystem, block).corruptReplicas());
assertEquals(1, countReplicas(namesystem, block).corruptReplicas()); assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
assertEquals(1, countReplicas(namesystem, block).corruptReplicas()); assertEquals(0, countReplicas(namesystem, block).corruptReplicas()); } finally { cluster.shutdown();
numberOfReplicas.liveReplicas() >= minReplication; boolean hasMoreCorruptReplicas = minReplicationSatisfied && (numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) > bc.getBlockReplication(); boolean corruptedDuringWrite = minReplicationSatisfied &&