namesystem.writeUnlock(); workFound += this.computeInvalidateWork(nodesToProcess); return workFound;
public static int computeInvalidationWork(BlockManager bm) { return bm.computeInvalidateWork(Integer.MAX_VALUE); }
namesystem.writeUnlock(); workFound += this.computeInvalidateWork(nodesToProcess); return workFound;
namesystem.writeUnlock(); workFound += this.computeInvalidateWork(nodesToProcess); return workFound;
bm.computeInvalidateWork(NUM_OF_DATANODES+1)); assertEquals(blockInvalidateLimit*NUM_OF_DATANODES, bm.computeInvalidateWork(NUM_OF_DATANODES)); assertEquals(blockInvalidateLimit*(NUM_OF_DATANODES-1), bm.computeInvalidateWork(NUM_OF_DATANODES-1)); int workCount = bm.computeInvalidateWork(1); if (workCount == 1) { assertEquals(blockInvalidateLimit+1, bm.computeInvalidateWork(2)); } else { assertEquals(workCount, blockInvalidateLimit); assertEquals(2, bm.computeInvalidateWork(2));
.computeInvalidateWork(2); cluster.triggerHeartbeats(); HATestUtil.waitForDNDeletions(cluster);
/** * Reformatted DataNodes will replace the original UUID in the * {@link DatanodeManager#datanodeMap}. This tests if block * invalidation work on the original DataNode can be skipped. */ @Test(timeout=120000) public void testDatanodeReformat() throws Exception { namesystem.writeLock(); try { // Change the datanode UUID to emulate a reformat String poolId = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnr = cluster.getDataNode(nodes[0].getIpcPort()) .getDNRegistrationForBP(poolId); dnr = new DatanodeRegistration(UUID.randomUUID().toString(), dnr); cluster.stopDataNode(nodes[0].getXferAddr()); Block block = new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP); bm.addToInvalidates(block, nodes[0]); bm.getDatanodeManager().registerDatanode(dnr); // Since UUID has changed, the invalidation work should be skipped assertEquals(0, bm.computeInvalidateWork(1)); assertEquals(0, bm.getPendingDeletionBlocksCount()); } finally { namesystem.writeUnlock(); } }