Refine search
throws IOException, InterruptedException, ClassNotFoundException { Configuration conf = getConf(); if (filesGroup != null) conf.set(CONF_FILES_GROUP, filesGroup); if (filesUser != null) conf.set(CONF_FILES_USER, filesUser); if (mappers > 0) { conf.setInt(CONF_NUM_SPLITS, mappers); conf.setInt(MR_NUM_MAPS, mappers); conf.setInt(CONF_FILES_MODE, filesMode); conf.setBoolean(CONF_CHECKSUM_VERIFY, verifyChecksum); conf.set(CONF_OUTPUT_ROOT, outputRoot.toString()); conf.set(CONF_INPUT_ROOT, inputRoot.toString()); conf.setInt(CONF_BANDWIDTH_MB, bandwidthMB); conf.set(CONF_SNAPSHOT_NAME, snapshotName); conf.set(CONF_SNAPSHOT_DIR, snapshotDir.toString()); String jobname = conf.get(CONF_MR_JOB_NAME, "ExportSnapshot-" + snapshotName); TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { inputRoot }, srcConf); Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { outputRoot }, destConf);
@Test public void testSingleTokenFetch() throws Exception { Configuration conf = new Configuration(); conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM"); String renewer = Master.getMasterPrincipal(conf); Credentials credentials = new Credentials(); final MockFileSystem fs = new MockFileSystem(); final MockFileSystem mockFs = (MockFileSystem) fs.getRawFileSystem(); when(mockFs.getCanonicalServiceName()).thenReturn("host:0"); when(mockFs.getUri()).thenReturn(new URI("mockfs://host:0")); Path mockPath = mock(Path.class); when(mockPath.getFileSystem(conf)).thenReturn(mockFs); Path[] paths = new Path[]{ mockPath, mockPath }; when(mockFs.addDelegationTokens("me", credentials)).thenReturn(null); TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf); verify(mockFs, times(1)).addDelegationTokens(renewer, credentials); }
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, submitJobDir.toString()); LOG.debug("Configuring job " + jobId + " with " + submitJobDir + " as the submit dir"); TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { submitJobDir }, conf); if (TokenCache.getShuffleSecretKey(job.getCredentials()) == null) { KeyGenerator keyGen; try { TokenCache.setShuffleSecretKey(shuffleKey.getEncoded(), job.getCredentials()); LOG.debug("Creating splits at " + jtFs.makeQualified(submitJobDir)); TokenCache.cleanUpTokenReferral(conf); job.getCredentials().getAllTokens()) { trackingIds.add(t.decodeIdentifier().getTrackingId()); jobId, submitJobDir.toString(), job.getCredentials()); if (status != null) { return status; LOG.info("Cleaning up the staging area " + submitJobDir); if (jtFs != null && submitJobDir != null) jtFs.delete(submitJobDir, true);
final Arguments args) throws IOException { jobConf.set(DST_DIR_LABEL, args.dst.toUri().toString()); final boolean overwrite = !update && args.flags.contains(Options.OVERWRITE) && !args.dryrun; jobConf.setBoolean(Options.UPDATE.propertyname, update); jobConf.setBoolean(Options.SKIPCRC.propertyname, skipCRCCheck); jobConf.setBoolean(Options.OVERWRITE.propertyname, overwrite); jobConf.setBoolean(Options.IGNORE_READ_FAILURES.propertyname, Path jobDirectory = new Path(stagingArea + NAME + "_" + randomId); FsPermission mapredSysPerms = new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION); FileSystem.mkdirs(jClient.getFs(), jobDirectory, mapredSysPerms); jobConf.set(JOB_DIR_LABEL, jobDirectory.toString()); long maxBytesPerMap = conf.getLong(BYTES_PER_MAP_LABEL, BYTES_PER_MAP); FileSystem dstfs = args.dst.getFileSystem(conf); TokenCache.obtainTokensForNamenodes(jobConf.getCredentials(), new Path[] {args.dst}, conf); boolean dstExists = dstfs.exists(args.dst); boolean dstIsDir = false; if (dstExists) { dstIsDir = dstfs.getFileStatus(args.dst).isDirectory();
Configuration conf = getConf(); String [] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); Credentials credentials = new Credentials(); String filterWord = otherArgs[2]; FileSystem fs = FileSystem.get(conf); if (fs.exists(new Path(outputPath))) { System.err.println("Output directory : " + outputPath + " already exists"); return 2; fs.getWorkingDirectory(); Path stagingDir = new Path(fs.getWorkingDirectory(), UUID.randomUUID().toString()); tezConf.set(TezConfiguration.TEZ_AM_STAGING_DIR, stagingDir.toString()); TezClientUtils.ensureStagingDirExists(tezConf, stagingDir); fs.copyFromLocalFile(new Path(jarPath), remoteJarPath); FileStatus remoteJarStatus = fs.getFileStatus(remoteJarPath); TokenCache.obtainTokensForNamenodes(credentials, new Path[]{remoteJarPath}, conf); Configuration stage1Conf = new JobConf(conf); stage1Conf.set(FILTER_PARAM_NAME, filterWord); Configuration stage2Conf = new JobConf(conf); stage2Conf.set(FileOutputFormat.OUTDIR, outputPath); stage2Conf.setBoolean("mapred.mapper.new-api", false);
FileSystem remoteFS = FileSystem.get(conf); String jobJar = conf.get(MRJobConfig.JAR); if (jobJar != null) { final Path jobJarPath = new Path(jobJar); final FileSystem jobJarFs = FileSystem.get(jobJarPath.toUri(), conf); Path remoteJobJar = jobJarPath.makeQualified(jobJarFs.getUri(), jobJarFs.getWorkingDirectory()); LocalResource rc = createLocalResource(jobJarFs, remoteJobJar, LocalResourceType.PATTERN, LocalResourceVisibility.APPLICATION); String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN, JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern(); rc.setPattern(pattern); LOG.info("Adding #" + credentials.numberOfTokens() + " tokens and #" + credentials.numberOfSecretKeys() + " secret keys for NM use for launching container"); Credentials taskCredentials = new Credentials(credentials); TokenCache.setJobToken(jobToken, taskCredentials); byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials); if (shuffleSecret == null) { LOG.warn("Cannot locate shuffle secret in credentials." Collection<String> shuffleProviders = conf.getStringCollection( MRJobConfig.MAPREDUCE_JOB_SHUFFLE_PROVIDER_SERVICES); if (! shuffleProviders.isEmpty()) {
private void testBinaryCredentials(boolean hasScheme) throws Exception { Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data","test/build/data")); ? FileSystem.getLocal(conf).makeQualified( new Path(TEST_ROOT_DIR, "tokenFile")).toString() : FileSystem.getLocal(conf).makeQualified( new Path(TEST_ROOT_DIR, "tokenFile")).toUri().getPath(); conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile); creds.writeTokenStorageFile(new Path(binaryTokenFile), conf); TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf, renewer); checkToken(creds, newerToken1, token2); TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf, renewer); checkToken(creds, newerToken1, token2); TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf, renewer); Token<?> token3 = creds.getToken(new Text(fs3.getCanonicalServiceName())); assertTrue(token3 != null); TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf, renewer); TokenCache.obtainTokensForNamenodesInternal(fs2, creds, conf, renewer); TokenCache.obtainTokensForNamenodesInternal(fs3, creds, conf, renewer); checkToken(creds, newerToken1, token2, token3);
Configuration mapStageConf = new JobConf(conf); mapStageConf.setInt(MRJobConfig.NUM_MAPS, numMapper); mapStageConf.setLong(MAP_SLEEP_TIME, mapSleepTime); mapStageConf.setLong(REDUCE_SLEEP_TIME, reduceSleepTime); mapStageConf.setLong(IREDUCE_SLEEP_TIME, iReduceSleepTime); mapStageConf.setInt(MAP_SLEEP_COUNT, mapSleepCount); intermediateReduceStageConfs = new JobConf[iReduceStagesCount]; for (int i = 1; i <= iReduceStagesCount; ++i) { JobConf iReduceStageConf = new JobConf(conf); iReduceStageConf.setLong(MRRSleepJob.REDUCE_SLEEP_TIME, iReduceSleepTime); iReduceStageConf.setInt(MRRSleepJob.REDUCE_SLEEP_COUNT, iReduceSleepCount); iReduceStageConf.setInt(MRJobConfig.NUM_REDUCES, numIReducer); FileSystem stagingFs = stagingDir.getFileSystem(conf); Path remoteJarPath = new Path(stagingDir, "dag_job.jar"); stagingFs.copyFromLocalFile(new Path(jarPath), remoteJarPath); FileStatus jarFileStatus = stagingFs.getFileStatus(remoteJarPath); TokenCache.obtainTokensForNamenodes(this.credentials, new Path[] { remoteJarPath }, mapStageConf);
private Path prepareEnsureStagingDir( TezConfiguration workingConf ) throws IOException { String stepStagingPath = createStepStagingPath(); workingConf.set( TezConfiguration.TEZ_AM_STAGING_DIR, stepStagingPath ); Path stagingDir = new Path( stepStagingPath ); FileSystem fileSystem = FileSystem.get( workingConf ); stagingDir = fileSystem.makeQualified( stagingDir ); TokenCache.obtainTokensForNamenodes( new Credentials(), new Path[]{stagingDir}, workingConf ); TezClientUtils.ensureStagingDirExists( workingConf, stagingDir ); if( fileSystem.getScheme().startsWith( "file:/" ) ) new File( stagingDir.toUri() ).mkdirs(); return stagingDir; }
@SuppressWarnings("deprecation") @Test public void testGetTokensForNamenodes() throws IOException, URISyntaxException { Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "test/build/data")); // ick, but need fq path minus file:/ String binaryTokenFile = FileSystem.getLocal(conf) .makeQualified(new Path(TEST_ROOT_DIR, "tokenFile")).toUri() .getPath(); MockFileSystem fs1 = createFileSystemForServiceName("service1"); Credentials creds = new Credentials(); Token<?> token1 = fs1.getDelegationToken(renewer); creds.addToken(token1.getService(), token1); // wait to set, else the obtain tokens call above will fail with FNF conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY, binaryTokenFile); creds.writeTokenStorageFile(new Path(binaryTokenFile), conf); TokenCache.obtainTokensForNamenodesInternal(fs1, creds, conf, renewer); String fs_addr = fs1.getCanonicalServiceName(); Token<?> nnt = TokenCache.getDelegationToken(creds, fs_addr); assertNotNull("Token for nn is null", nnt); } }
FileSystem localFs = FileSystem.getLocal(fConf); getLocalizer().initializeJobDirs(userName, jobId); rjob.ugi = UserGroupInformation.createRemoteUser(t.getUser()); Credentials ts = TokenCache.loadTokens(localJobTokenFile, fConf); Token<JobTokenIdentifier> jt = TokenCache.getJobToken(ts); if (jt != null) { //could be null in the case of some unit tests getJobTokenSecretManager().addTokenForJob(jobId.toString(), jt); for (Token<? extends TokenIdentifier> token : ts.getAllTokens()) { rjob.ugi.addToken(token); localizeJobConfFile(new Path(t.getJobFile()), userName, jobId); JobConf localJobConf = new JobConf(localJobFile); localJobConf.setUser(userName); localJobConf.set(TokenCache.JOB_TOKENS_FILENAME, localJobTokenFile); System.setProperty(JOB_LOCAL_DIR, workDir.toUri().getPath()); localJobConf.set(JOB_LOCAL_DIR, workDir.toUri().getPath());
FileSystem.get(job.conf).makeQualified( new Path(path, oldJobIDString)); job.remoteJobConfFile = new Path(job.remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE); if (TokenCache.getShuffleSecretKey(job.jobCredentials) == null) { LOG.warn("Shuffle secret key missing from job credentials." + " Using job token secret as shuffle secret."); TokenCache.setShuffleSecretKey(job.jobToken.getPassword(), job.jobCredentials);
private static void configureTask(JobConf job, Task task, Credentials credentials, Token<JobTokenIdentifier> jt) throws IOException { job.setCredentials(credentials); LOG.debug("APPLICATION_ATTEMPT_ID: " + appAttemptId); job.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, appAttemptId.getAttemptId()); job.setBoolean("ipc.client.tcpnodelay", true); job.setClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS, YarnOutputFiles.class, MapOutputFile.class); byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials); if (shuffleSecret == null) { LOG.warn("Shuffle secret missing from task credentials." Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE); writeLocalJobFile(localTaskFile, job); task.setJobFile(localTaskFile.toString()); task.setConf(job);
private void populateTokenCache(Configuration conf, Credentials credentials) throws IOException{ readTokensFromFiles(conf, credentials); // add the delegation tokens from configuration String [] nameNodes = conf.getStrings(MRJobConfig.JOB_NAMENODES); LOG.debug("adding the following namenodes' delegation tokens:" + Arrays.toString(nameNodes)); if(nameNodes != null) { Path [] ps = new Path[nameNodes.length]; for(int i=0; i< nameNodes.length; i++) { ps[i] = new Path(nameNodes[i]); } TokenCache.obtainTokensForNamenodes(credentials, ps, conf); } }
private void populateTokens(JobConf job) { // Credentials in the job will not have delegation tokens // because security is disabled. Fetch delegation tokens // and populate the credential in the job. try { Credentials ts = job.getCredentials(); Path p1 = new Path("file1"); p1 = p1.getFileSystem(job).makeQualified(p1); Credentials cred = new Credentials(); TokenCache.obtainTokensForNamenodesInternal(cred, new Path[] { p1 }, job); for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) { ts.addToken(new Text("Hdfs"), t); } } catch (IOException e) { Assert.fail("Exception " + e); } }
public void checkOutputSpecs(FileSystem ignored, JobConf job) throws FileAlreadyExistsException, InvalidJobConfException, IOException { // Ensure that the output directory is set and not already there Path outDir = getOutputPath(job); if (outDir == null && job.getNumReduceTasks() != 0) { throw new InvalidJobConfException("Output directory not set in JobConf."); } if (outDir != null) { FileSystem fs = outDir.getFileSystem(job); // normalize the output directory outDir = fs.makeQualified(outDir); setOutputPath(job, outDir); // get delegation token for the outDir's file system TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] {outDir}, job); // check its existence if (fs.exists(outDir)) { throw new FileAlreadyExistsException("Output directory " + outDir + " already exists"); } } }
FileSystem fs = FileSystem.get(_hadoopConf); Path src = new Path(appMasterJar); String appHome = Util.getApplicationHomeForId(_appId.toString()); Path dst = new Path(fs.getHomeDirectory(), appHome + Path.SEPARATOR + "AppMaster.jar"); fs.copyFromLocalFile(false, true, src, dst); localResources.put("AppMaster.jar", Util.newYarnAppResource(fs, dst)); Path zip; if (storm_zip_location != null) { zip = new Path(storm_zip_location); } else { zip = new Path("/lib/storm/" + stormVersion + "/storm.zip"); paths[1] = zip; paths[2] = confDst; Credentials credentials = new Credentials(); TokenCache.obtainTokensForNamenodes(credentials, paths, _hadoopConf); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
private void setupBinaryTokenFile(Job job) { // Credentials in the job will not have delegation tokens // because security is disabled. Fetch delegation tokens // and store in binary token file. try { Credentials cred1 = new Credentials(); Credentials cred2 = new Credentials(); TokenCache.obtainTokensForNamenodesInternal(cred1, new Path[] { p1 }, job.getConfiguration()); for (Token<? extends TokenIdentifier> t : cred1.getAllTokens()) { cred2.addToken(new Text("Hdfs"), t); } DataOutputStream os = new DataOutputStream(new FileOutputStream( binaryTokenFileName.toString())); cred2.writeTokenStorageToStream(os); os.close(); job.getConfiguration().set("mapreduce.job.credentials.binary", binaryTokenFileName.toString()); } catch (IOException e) { Assert.fail("Exception " + e); } } }
@Override public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException { if (context == null) { throw new IllegalArgumentException("context must not be null"); //$NON-NLS-1$ } Path path = getOutputPath(context); if (TemporaryOutputFormat.getOutputPath(context) == null) { throw new IOException("Temporary output path is not set"); } TokenCache.obtainTokensForNamenodes( context.getCredentials(), new Path[] { path }, context.getConfiguration()); if (path.getFileSystem(context.getConfiguration()).exists(path)) { throw new IOException(MessageFormat.format( "Output directory {0} already exists", path)); } }
private List<InputSplit> getSplitsFromManifest(JobConf job) throws IOException { Path[] dirs = getInputPaths(job); if (dirs.length == 0) { throw new IOException("No input path specified in job"); } else if (dirs.length > 1) { throw new IOException("Will only look for manifests in a single input directory (" + dirs .length + " directories provided)."); } TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs, job); Path dir = dirs[0]; FileSystem fs = dir.getFileSystem(job); if (!fs.getFileStatus(dir).isDirectory()) { throw new IOException("Input path not a directory: " + dir); } Path manifestPath = new Path(dir, ExportManifestOutputFormat.MANIFEST_FILENAME); if (!fs.isFile(manifestPath)) { return null; } return parseManifest(fs, manifestPath, job); }