private List<OrcSplit> generateSplitsFromPpd(SplitInfos ppdResult) throws IOException { OffsetAndLength current = new OffsetAndLength(); List<OrcSplit> splits = new ArrayList<>(ppdResult.getInfosCount()); int lastIdx = -1; for (Metastore.SplitInfo si : ppdResult.getInfosList()) { int index = si.getIndex(); if (lastIdx >= 0 && lastIdx + 1 != index && current.offset != -1) { // Create split for the previous unfinished stripe. splits.add(createSplit(current.offset, current.length, orcTail)); current.offset = -1; } lastIdx = index; String debugStr = null; if (LOG.isDebugEnabled()) { debugStr = current.toString(); } current = generateOrUpdateSplit(splits, current, si.getOffset(), si.getLength(), null); if (LOG.isDebugEnabled()) { LOG.debug("Updated split from {" + index + ": " + si.getOffset() + ", " + si.getLength() + "} and "+ debugStr + " to " + current); } } generateLastSplit(splits, current, null); return splits; }
public org.apache.hadoop.hive.metastore.Metastore.SplitInfos buildPartial() { org.apache.hadoop.hive.metastore.Metastore.SplitInfos result = new org.apache.hadoop.hive.metastore.Metastore.SplitInfos(this); int from_bitField0_ = bitField0_; if (infosBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { infos_ = java.util.Collections.unmodifiableList(infos_); bitField0_ = (bitField0_ & ~0x00000001); } result.infos_ = infos_; } else { result.infos_ = infosBuilder_.build(); } onBuilt(); return result; }
public Builder mergeFrom(org.apache.hadoop.hive.metastore.Metastore.SplitInfos other) { if (other == org.apache.hadoop.hive.metastore.Metastore.SplitInfos.getDefaultInstance()) return this; if (infosBuilder_ == null) { if (!other.infos_.isEmpty()) { this.mergeUnknownFields(other.getUnknownFields()); return this;
com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; makeExtensionsImmutable();
public Builder mergeFrom(org.apache.hadoop.hive.metastore.Metastore.SplitInfos other) { if (other == org.apache.hadoop.hive.metastore.Metastore.SplitInfos.getDefaultInstance()) return this; if (infosBuilder_ == null) { if (!other.infos_.isEmpty()) { this.mergeUnknownFields(other.getUnknownFields()); return this;
public Builder toBuilder() { return newBuilder(this); }
public org.apache.hadoop.hive.metastore.Metastore.SplitInfos buildPartial() { org.apache.hadoop.hive.metastore.Metastore.SplitInfos result = new org.apache.hadoop.hive.metastore.Metastore.SplitInfos(this); int from_bitField0_ = bitField0_; if (infosBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { infos_ = java.util.Collections.unmodifiableList(infos_); bitField0_ = (bitField0_ & ~0x00000001); } result.infos_ = infos_; } else { result.infos_ = infosBuilder_.build(); } onBuilt(); return result; }
public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfos prototype) {
public org.apache.hadoop.hive.metastore.Metastore.SplitInfos getDefaultInstanceForType() { return org.apache.hadoop.hive.metastore.Metastore.SplitInfos.getDefaultInstance(); }
public static Builder newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfos prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); }
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < infos_.size(); i++) { output.writeMessage(1, infos_.get(i)); } getUnknownFields().writeTo(output); }
public org.apache.hadoop.hive.metastore.Metastore.SplitInfos build() { org.apache.hadoop.hive.metastore.Metastore.SplitInfos result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; }
public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < infos_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, infos_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; }
public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; for (int i = 0; i < getInfosCount(); i++) { if (!getInfos(i).isInitialized()) { memoizedIsInitialized = 0; return false; } } memoizedIsInitialized = 1; return true; }
com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; makeExtensionsImmutable();
@Override public SplitInfos applySargToMetadata( SearchArgument sarg, ByteBuffer fileMetadata) throws IOException { // TODO: ideally we should store shortened representation of only the necessary fields // in HBase; it will probably require custom SARG application code. OrcTail orcTail = ReaderImpl.extractFileTail(fileMetadata); OrcProto.Footer footer = orcTail.getFooter(); int stripeCount = footer.getStripesCount(); boolean[] result = OrcInputFormat.pickStripesViaTranslatedSarg( sarg, orcTail.getWriterVersion(), footer.getTypesList(), orcTail.getStripeStatistics(), stripeCount); // For ORC case, send the boundaries of the stripes so we don't have to send the footer. SplitInfos.Builder sb = SplitInfos.newBuilder(); List<StripeInformation> stripes = orcTail.getStripes(); boolean isEliminated = true; for (int i = 0; i < result.length; ++i) { if (result != null && !result[i]) continue; isEliminated = false; StripeInformation si = stripes.get(i); if (LOG.isDebugEnabled()) { LOG.debug("PPD is adding a split " + i + ": " + si.getOffset() + ", " + si.getLength()); } sb.addInfos(SplitInfo.newBuilder().setIndex(i) .setOffset(si.getOffset()).setLength(si.getLength())); } return isEliminated ? null : sb.build(); }
@Override public SplitInfos applySargToMetadata( SearchArgument sarg, ByteBuffer fileMetadata) throws IOException { // TODO: ideally we should store shortened representation of only the necessary fields // in HBase; it will probably require custom SARG application code. OrcTail orcTail = ReaderImpl.extractFileTail(fileMetadata); OrcProto.Footer footer = orcTail.getFooter(); int stripeCount = footer.getStripesCount(); boolean[] result = OrcInputFormat.pickStripesViaTranslatedSarg( sarg, orcTail.getWriterVersion(), footer.getTypesList(), orcTail.getStripeStatistics(), stripeCount); // For ORC case, send the boundaries of the stripes so we don't have to send the footer. SplitInfos.Builder sb = SplitInfos.newBuilder(); List<StripeInformation> stripes = orcTail.getStripes(); boolean isEliminated = true; for (int i = 0; i < result.length; ++i) { if (result != null && !result[i]) continue; isEliminated = false; StripeInformation si = stripes.get(i); if (LOG.isDebugEnabled()) { LOG.debug("PPD is adding a split " + i + ": " + si.getOffset() + ", " + si.getLength()); } sb.addInfos(SplitInfo.newBuilder().setIndex(i) .setOffset(si.getOffset()).setLength(si.getLength())); } return isEliminated ? null : sb.build(); }
private List<OrcSplit> generateSplitsFromPpd(SplitInfos ppdResult) throws IOException { OffsetAndLength current = new OffsetAndLength(); List<OrcSplit> splits = new ArrayList<>(ppdResult.getInfosCount()); int lastIdx = -1; for (Metastore.SplitInfo si : ppdResult.getInfosList()) { int index = si.getIndex(); if (lastIdx >= 0 && lastIdx + 1 != index && current.offset != -1) { // Create split for the previous unfinished stripe. splits.add(createSplit(current.offset, current.length, orcTail)); current.offset = -1; } lastIdx = index; String debugStr = null; if (LOG.isDebugEnabled()) { debugStr = current.toString(); } current = generateOrUpdateSplit(splits, current, si.getOffset(), si.getLength(), null); if (LOG.isDebugEnabled()) { LOG.debug("Updated split from {" + index + ": " + si.getOffset() + ", " + si.getLength() + "} and "+ debugStr + " to " + current); } } generateLastSplit(splits, current, null); return splits; }
public Builder toBuilder() { return newBuilder(this); }
public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfos prototype) {