public static boolean getKafkaIgnoreSchemaErrors(JobContext job) { return job.getConfiguration().getBoolean(ETL_IGNORE_SCHEMA_ERRORS, false); }
public static int getKafkaMaxPullMinutesPerTask(JobContext job) { return job.getConfiguration().getInt(KAFKA_MAX_PULL_MINUTES_PER_TASK, -1); }
public static void setMoveToLatestTopics(JobContext job, String val) { job.getConfiguration().set(KAFKA_MOVE_TO_LAST_OFFSET_LIST, val); }
public static void setKafkaMaxPullMinutesPerTask(JobContext job, int val) { job.getConfiguration().setInt(KAFKA_MAX_PULL_MINUTES_PER_TASK, val); }
public static int getKafkaMaxHistoricalDays(JobContext job) { return job.getConfiguration().getInt(KAFKA_MAX_HISTORICAL_DAYS, -1); }
public static int getMaximumDecoderExceptionsToPrint(JobContext job) { return job.getConfiguration().getInt(PRINT_MAX_DECODER_EXCEPTIONS, 10); } }
public static int getKafkaClientBufferSize(JobContext job) { return job.getConfiguration().getInt(KAFKA_CLIENT_BUFFER_SIZE, 2 * 1024 * 1024); }
public static void setKafkaAuditIgnoreServiceTopicList(JobContext job, String topics) { job.getConfiguration().set(ETL_AUDIT_IGNORE_SERVICE_TOPIC_LIST, topics); }
public static Class<MessageDecoder> getMessageDecoderClass(JobContext job) { return (Class<MessageDecoder>) job.getConfiguration().getClass( CAMUS_MESSAGE_DECODER_CLASS, KafkaAvroMessageDecoder.class); }
public static void setKafkaClientTimeout(JobContext job, int val) { job.getConfiguration().setInt(KAFKA_CLIENT_SO_TIMEOUT, val); }
public static int getKafkaMaxPullHrs(JobContext job) { return job.getConfiguration().getInt(KAFKA_MAX_PULL_HRS, -1); }
public static void setKafkaIgnoreSchemaErrors(JobContext job, boolean val) { job.getConfiguration().setBoolean(ETL_IGNORE_SCHEMA_ERRORS, val); }
public static void setKafkaMaxHistoricalDays(JobContext job, int val) { job.getConfiguration().setInt(KAFKA_MAX_HISTORICAL_DAYS, val); }
public static void setMessageDecoderClass(JobContext job, Class<MessageDecoder> cls) { job.getConfiguration().setClass(CAMUS_MESSAGE_DECODER_CLASS, cls, MessageDecoder.class); }
@Override public boolean isRecoverySupported( org.apache.hadoop.mapred.JobContext jobContext) { boolean isRecoverySupported = false; if (jobContext != null && jobContext.getConfiguration() != null) { isRecoverySupported = jobContext.getConfiguration().getBoolean( "want.am.recovery", false); } return isRecoverySupported; }
private void cleanIntermediate(JobContext jobContext) throws IOException { FileSystem fs = FileSystem.get(jobContext.getConfiguration()); fs.delete(FileOutputFormat.getOutputPath(jobContext.getJobConf()), true); } }
@Override public void commitJob(JobContext context) throws IOException { Configuration conf = context.getConfiguration(); Path share = new Path(conf.get("share")); FileSystem fs = FileSystem.get(conf); while (true) { if (fs.exists(share)) { break; } UtilsForTests.waitFor(100); } super.commitJob(context); } }
@Override public void commitJob(JobContext jobContext) throws IOException { RevisionManager rm = null; try { rm = HBaseRevisionManagerUtil .getOpenedRevisionManager(jobContext.getConfiguration()); rm.commitWriteTransaction(HBaseRevisionManagerUtil.getWriteTransaction(jobContext .getConfiguration())); } finally { if (rm != null) rm.close(); } } }
@Override public void commitJob(org.apache.hadoop.mapred.JobContext context) throws IOException { super.commitJob(context); Path outPath = CanvasOutputFormat.getOutputPath(context); try { mergeImages(context.getConfiguration(), outPath); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } } }
@Override public void abortJob(JobContext jobContext, int status) throws IOException { super.abortJob(jobContext, status); RevisionManager rm = null; try { rm = HBaseRevisionManagerUtil .getOpenedRevisionManager(jobContext.getConfiguration()); Transaction writeTransaction = HBaseRevisionManagerUtil .getWriteTransaction(jobContext.getConfiguration()); rm.abortWriteTransaction(writeTransaction); } finally { if (rm != null) rm.close(); } }