private Token<DelegationTokenIdentifier> generateDelegationToken( String owner, String renewer) { DelegationTokenSecretManager dtSecretManager = cluster.getNamesystem() .getDelegationTokenSecretManager(); DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text( owner), new Text(renewer), null); return new Token<DelegationTokenIdentifier>(dtId, dtSecretManager); }
/** * Set the softLimit and hardLimit of client lease periods */ void setLeasePeriod(long soft, long hard) { final FSNamesystem namesystem = getNamesystem(); namesystem.leaseManager.setLeasePeriod(soft, hard); namesystem.lmthread.interrupt(); }
private void refreshClusterState() throws IOException{ fsdir = cluster.getNamesystem().getFSDirectory(); dfs = cluster.getFileSystem(); fsn = cluster.getNamesystem(); }
/** * Set the softLimit and hardLimit of client lease periods */ public void setLeasePeriod(long soft, long hard) { NameNodeAdapter.setLeasePeriod(getNamesystem(), soft, hard); }
@Before public void setUp() throws Exception { config = new HdfsConfiguration(); config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000); config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000); config.set("hadoop.security.auth_to_local", "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0"); cluster = new MiniDFSCluster.Builder(config).build(); cluster.waitActive(); cluster.getNamesystem().getDelegationTokenSecretManager().startThreads(); }
private LocatedBlocks createFileGetBlocks(String filenamePrefix) throws IOException { Path filePath = new Path("/" + filenamePrefix + ".dat"); // Write out a file with a few blocks, get block locations. DFSTestUtil.createFile(fs, filePath, BLOCK_SIZE, BLOCK_SIZE * NUM_BLOCKS, BLOCK_SIZE, NUM_DATANODES, seed); // Get the block list for the file with the block locations. LocatedBlocks blocks = client.getLocatedBlocks( filePath.toString(), 0, BLOCK_SIZE * NUM_BLOCKS); assertThat(cluster.getNamesystem().getUnderReplicatedBlocks(), is(0L)); return blocks; }
/** * Get AclFeature for the path */ public static AclFeature getAclFeature(Path pathToCheck, MiniDFSCluster cluster) throws IOException { INode inode = cluster.getNamesystem().getFSDirectory() .getINode(pathToCheck.toUri().getPath(), false); assertNotNull(inode); AclFeature aclFeature = inode.getAclFeature(); return aclFeature; }
private static String getRegisteredDatanodeUid( MiniDFSCluster cluster, int nnIndex) { List<DatanodeDescriptor> registeredDatanodes = cluster.getNamesystem(nnIndex) .getBlockManager().getDatanodeManager() .getDatanodeListForReport(DatanodeReportType.ALL); assertEquals(1, registeredDatanodes.size()); return registeredDatanodes.get(0).getDatanodeUuid(); }
private static void assertCTimesEqual(MiniDFSCluster cluster) { long nn1CTime = cluster.getNamesystem(0).getFSImage().getStorage().getCTime(); long nn2CTime = cluster.getNamesystem(1).getFSImage().getStorage().getCTime(); assertEquals(nn1CTime, nn2CTime); }
@Before public void setUp() throws IOException { final Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); dir = cluster.getNamesystem().getFSDirectory(); dfs = cluster.getFileSystem(); }
public void startUpCluster(long splitThreshold) throws IOException { conf = new HdfsConfiguration(); conf.setLong(DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY, splitThreshold); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(REPL_FACTOR) .build(); fs = cluster.getFileSystem(); bpid = cluster.getNamesystem().getBlockPoolId(); }
@Before public void setUp() throws Exception { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); }
@Before public void startUpCluster() throws IOException { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build(); fs = cluster.getFileSystem(); client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), cluster.getConfiguration(0)); dn0 = cluster.getDataNodes().get(0); poolId = cluster.getNamesystem().getBlockPoolId(); dn0Reg = dn0.getDNRegistrationForBP(poolId); }
@Before public void setUp() throws Exception { conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .format(true).build(); cluster.waitActive(); fsn = cluster.getNamesystem(); fsdir = fsn.getFSDirectory(); hdfs = cluster.getFileSystem(); }
@Before public void setUp() throws Exception { cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(DATANODE_COUNT).build(); cluster.waitActive(); namesystem = cluster.getNamesystem(); bm = namesystem.getBlockManager(); fs = cluster.getFileSystem(); tempFiles = new ArrayList<>(); }
@Before public void setUp() throws Exception { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); hdfs.mkdirs(dir); }
@Before public void setUp() throws Exception { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .build(); cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); hdfs.mkdirs(dir1); hdfs.mkdirs(dir2); }
@Before public void setUp() throws Exception { conf = new Configuration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) .format(true).build(); cluster.waitActive(); fsn = cluster.getNamesystem(); fsdir = fsn.getFSDirectory(); blockmanager = fsn.getBlockManager(); hdfs = cluster.getFileSystem(); }
private int getBlockCount() { Assert.assertNotNull("Null cluster", mc); Assert.assertNotNull("No Namenode in cluster", mc.getNameNode()); FSNamesystem namesystem = mc.getNamesystem(); Assert.assertNotNull("Null Namesystem in cluster", namesystem); Assert.assertNotNull("Null Namesystem.blockmanager", namesystem.getBlockManager()); return (int) namesystem.getBlocksTotal(); }
@Before public void startUpCluster() throws IOException { conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(REPL_FACTOR) .storageTypes(new StorageType[] { storageType, storageType } ) .build(); fs = cluster.getFileSystem(); bpid = cluster.getNamesystem().getBlockPoolId(); }