/** * Does actual test TeraSort job Through Ignite API * * @param gzip Whether to use GZIP. */ protected final void teraSort(boolean gzip) throws Exception { System.out.println("TeraSort ==============================================================="); getFileSystem().delete(new Path(sortOutDir), true); final JobConf jobConf = new JobConf(); jobConf.setUser(getUser()); jobConf.set("fs.defaultFS", getFsBase()); log().info("Desired number of reduces: " + numReduces()); jobConf.set("mapreduce.job.reduces", String.valueOf(numReduces())); log().info("Desired number of maps: " + numMaps()); final long splitSize = dataSizeBytes() / numMaps(); log().info("Desired split size: " + splitSize); // Force the split to be of the desired size: jobConf.set("mapred.min.split.size", String.valueOf(splitSize)); jobConf.set("mapred.max.split.size", String.valueOf(splitSize)); jobConf.setBoolean(HadoopJobProperty.SHUFFLE_MAPPER_STRIPED_OUTPUT.propertyName(), true); jobConf.setInt(HadoopJobProperty.SHUFFLE_MSG_SIZE.propertyName(), 4096); if (gzip) jobConf.setBoolean(HadoopJobProperty.SHUFFLE_MSG_GZIP.propertyName(), true); jobConf.set(HadoopJobProperty.JOB_PARTIALLY_RAW_COMPARATOR.propertyName(), TextPartiallyRawComparator.class.getName()); Job job = setupConfig(jobConf); HadoopJobId jobId = new HadoopJobId(UUID.randomUUID(), 1); IgniteInternalFuture<?> fut = grid(0).hadoop().submit(jobId, createJobInfo(job.getConfiguration(), null)); fut.get(); }
jobConf.setUser(USER); jobConf.set(IgniteHadoopFileSystemCounterWriter.COUNTER_WRITER_DIR_PROPERTY, "/xxx/${USER}/zzz");
/** * Set the reported username for this job. * * @param user the username for this job. */ public void setUser(String user) { ensureState(JobState.DEFINE); conf.setUser(user); }
/** * Set the reported username for this job. * * @param user the username for this job. */ public void setUser(String user) { ensureState(JobState.DEFINE); conf.setUser(user); }
/** * Set the reported username for this job. * * @param user the username for this job. */ public void setUser(String user) { ensureState(JobState.DEFINE); conf.setUser(user); }
/** * Set the reported username for this job. * * @param user the username for this job. */ public void setUser(String user) { ensureState(JobState.DEFINE); conf.setUser(user); }
/** * Set the reported username for this job. * * @param user the username for this job. */ public void setUser(String user) { ensureState(JobState.DEFINE); conf.setUser(user); }
@Override public synchronized JobConf getJobConf() { if (jobConf == null) { jobConf = new JobConf(); // Add parameters from the configuration in the job trace // // The reason why the job configuration parameters, as seen in the jobconf // file, are added first because the specialized values obtained from // Rumen should override the job conf values. // for (Map.Entry<Object, Object> entry : job.getJobProperties().getValue().entrySet()) { jobConf.set(entry.getKey().toString(), entry.getValue().toString()); } //TODO Eliminate parameters that are already copied from the job's // configuration file. jobConf.setJobName(getName()); jobConf.setUser(getUser()); jobConf.setNumMapTasks(getNumberMaps()); jobConf.setNumReduceTasks(getNumberReduces()); jobConf.setQueueName(getQueueName()); } return jobConf; }
@Override public synchronized JobConf getJobConf() { if (jobConf == null) { jobConf = new JobConf(); // Add parameters from the configuration in the job trace // // The reason why the job configuration parameters, as seen in the jobconf // file, are added first because the specialized values obtained from // Rumen should override the job conf values. // for (Map.Entry<Object, Object> entry : job.getJobProperties().getValue().entrySet()) { jobConf.set(entry.getKey().toString(), entry.getValue().toString()); } //TODO Eliminate parameters that are already copied from the job's // configuration file. jobConf.setJobName(getName()); jobConf.setUser(getUser()); jobConf.setNumMapTasks(getNumberMaps()); jobConf.setNumReduceTasks(getNumberReduces()); jobConf.setQueueName(getQueueName()); } return jobConf; }
localConf.setUser(map.getUser()); map.localizeConfiguration(localConf); map.setConf(localConf);
localConf.setUser(map.getUser()); map.localizeConfiguration(localConf); map.setConf(localConf);
localConf.setUser(map.getUser()); map.localizeConfiguration(localConf); map.setConf(localConf);
localConf.setUser(map.getUser()); map.localizeConfiguration(localConf); map.setConf(localConf);
localConf.setUser(map.getUser()); map.localizeConfiguration(localConf); map.setConf(localConf);
conf.set(JTConfig.JT_JOBHISTORY_LOCATION, historyDir.toString()); conf.setUser("user");
localConf.setUser(reduce.getUser()); reduce.localizeConfiguration(localConf); reduce.setConf(localConf);
localConf.setUser(reduce.getUser()); reduce.localizeConfiguration(localConf); reduce.setConf(localConf);
localConf.setUser(reduce.getUser()); reduce.localizeConfiguration(localConf); reduce.setConf(localConf);
localConf.setUser(reduce.getUser()); reduce.localizeConfiguration(localConf); reduce.setConf(localConf);
/** * set this user's id in job configuration, so later job files can be * accessed using this user's id * @param job * @throws IOException */ private void configureUserName(JobConf job) throws IOException { UnixUserGroupInformation ugi = getUGI(job); // Set the user's name, group and working directory job.setUser(ugi.getUserName()); if (ugi.getGroupNames() != null && ugi.getGroupNames().length > 0) { job.set("group.name", ugi.getGroupNames()[0]); } if (job.getWorkingDirectory() == null) { job.setWorkingDirectory(fs.getWorkingDirectory()); } }