result .setOldStyle(true) .minimumTransactionId(0) .maximumTransactionId(0) .bucket(bucket) result .setOldStyle(false) .minimumTransactionId(0) .maximumTransactionId(parseBase(bucketFile.getParent())) .bucket(bucket) result .setOldStyle(false) .minimumTransactionId(parsedDelta.minTransaction) .maximumTransactionId(parsedDelta.maxTransaction) .bucket(bucket); result .setOldStyle(false) .minimumTransactionId(parsedDelta.minTransaction) .maximumTransactionId(parsedDelta.maxTransaction) .bucket(bucket); result.setOldStyle(true).bucket(-1).minimumTransactionId(0) .maximumTransactionId(0);
private void getWriter(Reporter reporter, ObjectInspector inspector, int bucket) throws IOException { if (writer == null) { AcidOutputFormat.Options options = new AcidOutputFormat.Options(jobConf); options.inspector(inspector) .writingBase(jobConf.getBoolean(IS_MAJOR, false)) .isCompressed(jobConf.getBoolean(IS_COMPRESSED, false)) .tableProperties(new StringableMap(jobConf.get(TABLE_PROPS)).toProperties()) .reporter(reporter) .minimumTransactionId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE)) .maximumTransactionId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE)) .bucket(bucket) .statementId(-1);//setting statementId == -1 makes compacted delta files use //delta_xxxx_yyyy format // Instantiate the underlying output format @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class AcidOutputFormat<WritableComparable, V> aof = instantiate(AcidOutputFormat.class, jobConf.get(OUTPUT_FORMAT_CLASS_NAME)); writer = aof.getRawRecordWriter(new Path(jobConf.get(TMP_LOCATION)), options); } }
private void getDeleteEventWriter(Reporter reporter, ObjectInspector inspector, int bucket) throws IOException { if (deleteEventWriter == null) { AcidOutputFormat.Options options = new AcidOutputFormat.Options(jobConf); options.inspector(inspector) .writingBase(false) .writingDeleteDelta(true) // this is the option which will make it a delete writer .isCompressed(jobConf.getBoolean(IS_COMPRESSED, false)) .tableProperties(new StringableMap(jobConf.get(TABLE_PROPS)).toProperties()) .reporter(reporter) .minimumTransactionId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE)) .maximumTransactionId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE)) .bucket(bucket) .statementId(-1);//setting statementId == -1 makes compacted delta files use //delta_xxxx_yyyy format // Instantiate the underlying output format @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class AcidOutputFormat<WritableComparable, V> aof = instantiate(AcidOutputFormat.class, jobConf.get(OUTPUT_FORMAT_CLASS_NAME)); deleteEventWriter = aof.getRawRecordWriter(new Path(jobConf.get(TMP_LOCATION)), options); } } }
result .setOldStyle(true) .minimumTransactionId(0) .maximumTransactionId(0) .bucket(bucket); result .setOldStyle(false) .minimumTransactionId(0) .maximumTransactionId(parseBase(bucketFile.getParent())) .bucket(bucket); } else { result.setOldStyle(true).bucket(-1).minimumTransactionId(0) .maximumTransactionId(0);
private void getWriter(Reporter reporter, ObjectInspector inspector, int bucket) throws IOException { if (writer == null) { AcidOutputFormat.Options options = new AcidOutputFormat.Options(jobConf); options.inspector(inspector) .writingBase(jobConf.getBoolean(IS_MAJOR, false)) .isCompressed(jobConf.getBoolean(IS_COMPRESSED, false)) .tableProperties(new StringableMap(jobConf.get(TABLE_PROPS)).toProperties()) .reporter(reporter) .minimumTransactionId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE)) .maximumTransactionId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE)) .bucket(bucket); // Instantiate the underlying output format @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class AcidOutputFormat<WritableComparable, V> aof = instantiate(AcidOutputFormat.class, jobConf.get(OUTPUT_FORMAT_CLASS_NAME)); writer = aof.getRawRecordWriter(new Path(jobConf.get(TMP_LOCATION)), options); } }
private static RecordUpdater getRecordUpdater(JobConf jc, AcidOutputFormat<?, ?> acidOutputFormat, boolean isCompressed, long txnId, int bucket, ObjectInspector inspector, Properties tableProp, Path outPath, Reporter reporter, int rowIdColNum) throws IOException { return acidOutputFormat.getRecordUpdater(outPath, new AcidOutputFormat.Options(jc) .isCompressed(isCompressed) .tableProperties(tableProp) .reporter(reporter) .writingBase(false) .minimumTransactionId(txnId) .maximumTransactionId(txnId) .bucket(bucket) .inspector(inspector) .recordIdColumn(rowIdColNum)); }
private RecordUpdater createRecordUpdater(int bucketId, Long minTxnId, Long maxTxnID) throws IOException, SerializationError { try { return outf.getRecordUpdater(partitionPath, new AcidOutputFormat.Options(conf) .inspector(getSerde().getObjectInspector()) .bucket(bucketId) .minimumTransactionId(minTxnId) .maximumTransactionId(maxTxnID)); } catch (SerDeException e) { throw new SerializationError("Failed to get object inspector from Serde " + getSerde().getClass().getName(), e); } }
private static RecordUpdater getRecordUpdater(JobConf jc, AcidOutputFormat<?, ?> acidOutputFormat, int bucket, ObjectInspector inspector, Properties tableProp, Path outPath, Reporter reporter, int rowIdColNum, FileSinkDesc conf) throws IOException { return acidOutputFormat.getRecordUpdater(outPath, new AcidOutputFormat.Options(jc) .isCompressed(conf.getCompressed()) .tableProperties(tableProp) .reporter(reporter) .writingBase(false) .minimumTransactionId(conf.getTransactionId()) .maximumTransactionId(conf.getTransactionId()) .bucket(bucket) .inspector(inspector) .recordIdColumn(rowIdColNum) .statementId(conf.getStatementId()) .finalDestination(conf.getDestPath())); }