public void waitForNumReplicas(int numReplicas) throws Exception { while (UTIL.getDFSCluster().getDataNodes().size() < numReplicas) { Thread.sleep(100); } for (int i = 0; i < numReplicas; ++i) { for (DataNode dn: UTIL.getDFSCluster().getDataNodes()) { while (!dn.isDatanodeFullyStarted()) { Thread.sleep(100); } } } } }
@Test(expected=RuntimeException.class) public void testWalAbortOnLowReplication() throws Exception { setupDFS(); assertEquals(3, UTIL.getDFSCluster().getDataNodes().size()); LOG.info("Stop DataNode"); UTIL.getDFSCluster().stopDataNode(0); assertEquals(2, UTIL.getDFSCluster().getDataNodes().size()); store.insert(new TestProcedure(1, -1), null); for (long i = 2; store.isRunning(); ++i) { assertEquals(2, UTIL.getDFSCluster().getDataNodes().size()); store.insert(new TestProcedure(i, -1), null); Thread.sleep(100); } assertFalse(store.isRunning()); }
setupDFS(); assertEquals(3, UTIL.getDFSCluster().getDataNodes().size()); store.registerListener(new ProcedureStore.ProcedureStoreListener() { @Override LOG.info("Stop DataNode"); UTIL.getDFSCluster().stopDataNode(0); assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
Path p = new Path("hello"); Assert.assertTrue((short) cluster.getDataNodes().size() > 1); final int repCount = 2; final String lookup = lbs[0].getHosts()[0]; StringBuilder sb = new StringBuilder(); for (DataNode dn : cluster.getDataNodes()) { final String dnName = getHostName(dn); sb.append(dnName).append(' ');
Path currentFile = wal.getCurrentFileName(); for (int i = 0, n = TEST_UTIL.getDFSCluster().getDataNodes().size(); i < n; i++) {
List<DataNode> datanodes = TEST_UTIL.getDFSCluster().getDataNodes(); Method selfAddress; try {
Assert.assertTrue((short) cluster.getDataNodes().size() >= repCount);
List<DataNode> existingNodes = dfsCluster.getDataNodes(); int numDataNodes = 3; dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(), numDataNodes, true, null, null); List<DataNode> allNodes = dfsCluster.getDataNodes(); for (int i = allNodes.size() - 1; i >= 0; i--) { if (existingNodes.contains(allNodes.get(i))) { "DataNodes " + dfsCluster.getDataNodes().size() + " default replication " + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()), dfsCluster.getDataNodes() .size() >= fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) + 1);
static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException { List<File> files = new ArrayList<File>(); List<DataNode> datanodes = cluster.getDataNodes(); Iterable<Block>[] blocks = cluster.getAllBlockReports(); for(int i = 0; i < blocks.length; i++) { FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset(); for(Block b : blocks[i]) { files.add(ds.getBlockFile(b)); } } return files; }
static long getTotalDfsUsed(MiniDFSCluster cluster) throws IOException { long total = 0; for(DataNode node : cluster.getDataNodes()) { total += node.getFSDataset().getDfsUsed(); } return total; }
private Block findBlock(Path path, long size) throws IOException { Block ret; List<LocatedBlock> lbs = cluster.getNameNode().getBlockLocations(path.toString(), FILE_START, size).getLocatedBlocks(); LocatedBlock lb = lbs.get(lbs.size() - 1); // Get block from the first DN ret = cluster.getDataNodes().get(DN_N0). data.getStoredBlock(lb.getBlock().getBlockId()); return ret; }
protected final void triggerBlockReport() throws IOException, InterruptedException { // Trigger block report to NN DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(0)); Thread.sleep(10 * 1000); }
public void triggerHeartbeats() throws IOException { for (DataNode dn : getDataNodes()) { DataNodeTestUtils.triggerHeartbeat(dn); } }
@Override public Boolean get() { for (DataNode dn : cluster.getDataNodes()) { if (DataNodeTestUtils.getPendingAsyncDeletions(dn) > 0) { return false; } } return true; } }, 1000, 10000);
static long getTotalDfsUsed(MiniDFSCluster cluster) throws IOException { long total = 0; for(DataNode node : cluster.getDataNodes()) { total += DataNodeTestUtils.getFSDataset(node).getDfsUsed(); } return total; }
@Before public void setup() throws Exception { conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, KEEPALIVE_TIMEOUT); conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 0); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(1).build(); dn = cluster.getDataNodes().get(0); }
private DataNode getDataNode(DatanodeInfo decomInfo) { DataNode decomNode = null; for (DataNode dn: cluster.getDataNodes()) { if (decomInfo.equals(dn.getDatanodeId())) { decomNode = dn; break; } } assertNotNull("Could not find decomNode in cluster!", decomNode); return decomNode; }
@Before public void startUpCluster() throws IOException { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build(); fs = cluster.getFileSystem(); client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), cluster.getConfiguration(0)); dn0 = cluster.getDataNodes().get(0); poolId = cluster.getNamesystem().getBlockPoolId(); dn0Reg = dn0.getDNRegistrationForBP(poolId); }
@Before public void startCluster() throws IOException { conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DN_COUNT).build(); singletonNn = cluster.getNameNode(); singletonDn = cluster.getDataNodes().get(0); bpos = singletonDn.getAllBpOs()[0]; actor = bpos.getBPServiceActors().get(0); storageUuid = singletonDn.getFSDataset().getVolumes().get(0).getStorageID(); }
private void startCluster() throws IOException { conf = new HdfsConfiguration(); conf.setInt("dfs.blocksize", 1024*1024); cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build(); cluster.waitActive(); fs = cluster.getFileSystem(); nn = cluster.getNameNode(0); assertNotNull(nn); dn0 = cluster.getDataNodes().get(0); assertNotNull(dn0); blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId(); }