/** Construct a Configured. */ public Configured(Configuration conf) { setConf(conf); }
public static String[] configure(String[] args, Configured configured) throws Exception { Configuration conf = configured.getConf(); if (conf == null) { throw new NullPointerException("Provided Configuration is null!!!"); conf.set(toolArgs[i], toolArgs[i + 1]); configured.setConf(conf); return toolArgs;
@Override public Configuration getConf() { Configuration result = super.getConf(); if (result == null) { return new Configuration(); } return result; }
@Override public Configuration getConf() { Configuration result = super.getConf(); if (result == null) { return new Configuration(); } return result; }
@Override public void setConf(Configuration conf) { super.setConf(conf); if (conf != null) { rpcTimeoutForChecks = conf.getInt( CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY, CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT); } }
@Override public Configuration getConf() { Configuration result = super.getConf(); if (result == null) { return new Configuration(); } return result; }
@Override public void setConf(Configuration conf) { super.setConf(conf); if (conf != null) { timeout = conf.getTimeDuration( CommonConfigurationKeys. HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY, CommonConfigurationKeys. HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); } }
@Override public void setConf(Configuration conf) { super.setConf(conf); }
@Override public void setConf(Configuration conf) { Configuration healSickConf = HadoopUtil.healSickConfig(conf); super.setConf(healSickConf); } }
@SuppressWarnings("deprecation") public int runJob() throws IOException, InterruptedException, ClassNotFoundException { final Configuration conf = super.getConf(); final Job job = Job.getInstance(conf); job.setJarByClass(this.getClass()); final boolean jobSuccess = job.waitForCompletion(true); return (jobSuccess) ? 0 : 1; }
@Override public void setConf(Configuration conf) { super.setConf(conf); if (conf != null) schema = AvroJob.getMapOutputSchema(conf); }
public int run(final String[] args) throws Exception { if (args.length != 2) { System.err.println(String.format("Usage: %s: <file path> <number of records>", SequenceFileGenerator.class.getName())); return 1; } Path file = new Path(args[0]); int numRecords = Integer.valueOf(args[1]); FileSystem fs = FileSystem.get(super.getConf()); SequenceFile.Writer writer = SequenceFile.createWriter(fs, super.getConf(), file, Text.class, Text.class, SequenceFile.CompressionType.BLOCK, new DefaultCodec()); try { for (int i=0; i < numRecords; i++) { writer.append(new Text("k" + i), new Text("v" + i)); } } finally { writer.close(); } return 0; }
/** {@inheritDoc} */ @Override public void setConf(Configuration conf) { super.setConf(conf); if (null != conf) { // The MapReduce framework will be using this comparator to sort AvroKey objects // output from the map phase, so use the schema defined for the map output key // and the data model non-raw compare() implementation. mSchema = AvroJob.getMapOutputKeySchema(conf); mDataModel = AvroSerialization.createDataModel(conf); } }
public int run(final String[] args) throws Exception { if (args.length != 2) { System.err.println(String.format("Usage: %s: <file path> <number of records>", AvroFileGenerator.class.getName())); return 1; } Path file = new Path(args[0]); int numRecords = Integer.valueOf(args[1]); FileSystem fs = FileSystem.get(super.getConf()); SequenceFile.Writer writer = SequenceFile.createWriter(fs, super.getConf(), file, Text.class, Text.class, SequenceFile.CompressionType.BLOCK, new DefaultCodec()); try { for (int i=0; i < numRecords; i++) { writer.append(new Text("k" + i), new Text("v" + i)); } } finally { writer.close(); } return 0; }
@Override public void setConf(Configuration conf) { super.setConf(conf); if (conf != null) schema = Pair.getKeySchema(AvroJob.getMapOutputSchema(conf)); }
@Override public int run(final String[] args) throws ModelException, SCXMLExpressionException, SAXException, IOException, ParseException { Logger.getLogger("org.apache").setLevel(Level.WARN); Engine chartExec = parseCommandLine(args); hdfsDist.setConfiguration(super.getConf()); hdfsDist.setOutputFileDir("dg-result"); chartExec.process(hdfsDist); return 0; } /*
@Override public void setConf(Configuration conf) { super.setConf(conf); if (conf == null) { // Configured gets passed null before real conf. Why? I don't know. return; } serverHostname = conf.get(REST_API_CLUSTER_MANAGER_HOSTNAME, DEFAULT_SERVER_HOSTNAME); serverUsername = conf.get(REST_API_CLUSTER_MANAGER_USERNAME, DEFAULT_SERVER_USERNAME); serverPassword = conf.get(REST_API_CLUSTER_MANAGER_PASSWORD, DEFAULT_SERVER_PASSWORD); clusterName = conf.get(REST_API_CLUSTER_MANAGER_CLUSTER_NAME, DEFAULT_CLUSTER_NAME); // Add filter to Client instance to enable server authentication. client.register(HttpAuthenticationFeature.basic(serverUsername, serverPassword)); }
@Override public int run(String[] args) throws Exception { if (args.length != 2) { System.err.printf("%s %s\n",getName(),getDescription()); System.err.println("Usage: <input> <output>"); return 1; } try { run(super.getConf(), args[0], args[1]); return 0; } catch (IOException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } catch (ClassNotFoundException e) { e.printStackTrace(); } return 1; }