/** * flush a block out without doing anything except compressing the key part. */ public void flushBlock(KeyBuffer keyBuffer, ValueBuffer valueBuffer, int recordLen, int keyLength, @SuppressWarnings("unused") int compressedKeyLen) throws IOException { writeKey(keyBuffer, recordLen, keyLength); valueBuffer.write(out); }
/** * Returns {@code true} if {@code parsedBase} was created by compaction. * As of Hive 4.0 we can tell if a directory is a result of compaction based on the * presence of {@link AcidUtils#VISIBILITY_PATTERN} suffix. Base directories written prior to * that, have to rely on the {@link MetaDataFile} in the directory. So look at the filename first * since that is the cheaper test.*/ private static boolean isCompactedBase(ParsedBase parsedBase, FileSystem fs) throws IOException { return parsedBase.getVisibilityTxnId() > 0 || MetaDataFile.isCompacted(parsedBase.getBaseDirPath(), fs); } private static void getChildState(FileStatus child, HdfsFileStatusWithId childWithId,
protected RecordUpdater createRecordUpdater(AcidOutputFormat<?, ?> outputFormat) throws IOException { int bucketId = BucketCodec .determineVersion(bucketProperty).decodeWriterId(bucketProperty); return outputFormat.getRecordUpdater( partitionPath, new AcidOutputFormat.Options(configuration) .inspector(objectInspector) .bucket(bucketId) .minimumWriteId(writeId) .maximumWriteId(writeId) .recordIdColumn(recordIdColumn) .finalDestination(partitionPath) .statementId(-1)); }
BigRow(byte[] val, long rowId, long origTxn, int bucket) { field = new BytesWritable(val); bucket = BucketCodec.V1.encode(new AcidOutputFormat.Options(null).bucket(bucket)); this.rowId = new RecordIdentifier(origTxn, bucket, rowId); }
static int encodeBucketId(Configuration conf, int bucketId, int statementId) { return BucketCodec.V1.encode(new AcidOutputFormat.Options(conf).bucket(bucketId) .statementId(statementId)); } /**
@Override public boolean next(LongWritable key, BytesRefArrayWritable value) throws IOException { more = next(key); if (more) { in.getCurrentRow(value); } return more; }
private static int encodeBucket(int bucketId) { return BucketCodec.V1.encode( new AcidOutputFormat.Options(null).bucket(bucketId)); }
public static RecordWriter getRecordWriter(JobConf jc, OutputFormat<?, ?> outputFormat, Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProp, Path outPath, Reporter reporter ) throws IOException, HiveException { if (!(outputFormat instanceof HiveOutputFormat)) { outputFormat = new HivePassThroughOutputFormat(outputFormat); } return ((HiveOutputFormat)outputFormat).getHiveRecordWriter( jc, outPath, valueClass, isCompressed, tableProp, reporter); }
protected void currentValueBuffer() throws IOException { if (!keyInit) { nextKeyBuffer(); } currentValue.keyBuffer = currentKey; currentValue.clearColumnBuffer(); currentValue.readFields(in); currentValue.inited = true; }
@Override public void close(boolean abort) throws IOException { outWriter.close(); } };
/** * This should be called after the binary search is finished and before the linear scan begins * @throws IOException */ private void beginLinearSearch() throws IOException { sync(rangeStart); this.getIOContext().setBinarySearching(false); this.wasUsingSortedSearch = false; }
/** * Returns an acidOperationalProperties object that represents default ACID behavior for tables * that do no explicitly specify/override the default behavior. * @return the acidOperationalProperties object. */ public static AcidOperationalProperties getDefault() { AcidOperationalProperties obj = new AcidOperationalProperties(); obj.setSplitUpdate(true); obj.setHashBasedMerge(false); obj.setInsertOnly(false); return obj; }
@Override protected FSDataInputStream openFile(FileSystem fs, Path file, int bufferSize, long length) throws IOException { final InputStream in = super.openFile(fs, file, bufferSize, length); openedFile[0] = new TestFSDataInputStream(in); return openedFile[0]; } };
private void clearColumnBuffers() throws IOException { for (int i = 0; i < columnNumber; i++) { columnBuffers[i].clear(); } }
/** * delta dir name after compaction */ String makeDeltaDirNameCompacted(long minTxnId, long maxTxnId) { return AcidUtils.deltaSubdir(minTxnId, maxTxnId); } String makeDeleteDeltaDirNameCompacted(long minTxnId, long maxTxnId) {
@Override public float getProgress() throws IOException { return inner.getProgress(); } }
/** * Compare the buffers in serialized form. */ @Override public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { return compareBytes(b1, s1 + LENGTH_BYTES, l1 - LENGTH_BYTES, b2, s2 + LENGTH_BYTES, l2 - LENGTH_BYTES); } }
/** * Checks if a table is a transactional table that only supports INSERT, but not UPDATE/DELETE * @param params table properties * @return true if table is an INSERT_ONLY table, false otherwise */ public static boolean isInsertOnlyTable(Map<String, String> params) { return isInsertOnlyTable(params, false); } public static boolean isInsertOnlyTable(Table table) {
DummyRow(long val, long rowId, long origTxn, int bucket) { field = new LongWritable(val); bucket = BucketCodec.V1.encode(new AcidOutputFormat.Options(null).bucket(bucket)); ROW__ID = new RecordIdentifier(origTxn, bucket, rowId); }