public Builder mergeFrom(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef other) { if (other == org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.getDefaultInstance()) return this; if (other.hasRecordCount()) { setRecordCount(other.getRecordCount()); if (other.hasCarriesTwoByteSelectionVector()) { setCarriesTwoByteSelectionVector(other.getCarriesTwoByteSelectionVector()); if (other.hasAffectedRowsCount()) { setAffectedRowsCount(other.getAffectedRowsCount()); this.mergeUnknownFields(other.getUnknownFields()); return this;
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt32(1, recordCount_); } for (int i = 0; i < field_.size(); i++) { output.writeMessage(2, field_.get(i)); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(3, carriesTwoByteSelectionVector_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt32(4, affectedRowsCount_); } getUnknownFields().writeTo(output); }
/** * <code>optional .exec.shared.RecordBatchDef def = 3;</code> */ public Builder mergeDef(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef value) { if (defBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && def_ != org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.getDefaultInstance()) { def_ = org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.newBuilder(def_).mergeFrom(value).buildPartial(); } else { def_ = value; } onChanged(); } else { defBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /**
if (fieldList == null && rawBatch.getHeader().getDef().getFieldCount() != 0) { fieldList = rawBatch.getHeader().getDef().getFieldList(); if (rawBatch.getHeader().getDef().getRecordCount() != 0) { rawBatches.add(rawBatch); } else { while ((rawBatch = getNext(p)) != null && rawBatch.getHeader().getDef().getRecordCount() == 0) { return IterOutcome.STOP; if (rawBatch == null || rawBatch.getHeader().getDef().getFieldCount() == 0) { createDummyBatch = true; UserBitShared.RecordBatchDef dummyDef = UserBitShared.RecordBatchDef.newBuilder() if (rawBatch == null || rawBatch.getHeader().getDef().getFieldCount() == 0) { rawBatch = new RawFragmentBatch(dummyHeader, null, null); rawBatches.set(i, rawBatch); nextBatch = getNext(node.batchId); while (nextBatch != null && nextBatch.getHeader().getDef().getRecordCount() == 0) { nextBatch = getNext(node.batchId);
valueCount = def.getRecordCount(); boolean schemaChanged = schema == null; final List<SerializedField> fields = def.getFieldList(); int bufOffset = 0; for (final SerializedField field : fields) {
while (batch != null && batch.getHeader().getDef().getRecordCount() == 0 && (!first || batch.getHeader().getDef().getFieldCount() == 0)) { batch = getNextBatch(); if(schemaChanged) { this.schema = batchLoader.getSchema(); stats.batchReceived(0, rbd.getRecordCount(), true); lastOutcome = IterOutcome.OK_NEW_SCHEMA; } else { stats.batchReceived(0, rbd.getRecordCount(), false); lastOutcome = IterOutcome.OK;
List<SerializedField> fields = def.getFieldList(); if (def.hasCarriesTwoByteSelectionVector() && def.getCarriesTwoByteSelectionVector()) { svMode = SelectionVectorMode.TWO_BYTE; } else { m.setValueCount(def.getRecordCount());
.setQueryId(context.getHandle().getQueryId()) .setRowCount(0) .setDef(RecordBatchDef.getDefaultInstance()) .build(); batch = new QueryWritableBatch(header);
if (batch.getHeader().getDef().getFieldCount() == 0) { i++; continue; for (final SerializedField field : batch.getHeader().getDef().getFieldList()) { @SuppressWarnings("resource") final ValueVector v = outgoingContainer.addOrGet(MaterializedField.create(field));
@SuppressWarnings("resource") public void readFromStreamWithContainer(VectorContainer myContainer, InputStream input) throws IOException { final VectorContainer container = new VectorContainer(); final UserBitShared.RecordBatchDef batchDef = UserBitShared.RecordBatchDef.parseDelimitedFrom(input); recordCount = batchDef.getRecordCount(); if (batchDef.hasCarriesTwoByteSelectionVector() && batchDef.getCarriesTwoByteSelectionVector()) { final List<SerializedField> fieldList = batchDef.getFieldList(); for (SerializedField metaData : fieldList) { final int dataLength = metaData.getBufferLength();
public Builder mergeFrom(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef other) { if (other == org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.getDefaultInstance()) return this; if (other.hasRecordCount()) { setRecordCount(other.getRecordCount()); if (other.hasCarriesTwoByteSelectionVector()) { setCarriesTwoByteSelectionVector(other.getCarriesTwoByteSelectionVector()); if (other.hasAffectedRowsCount()) { setAffectedRowsCount(other.getAffectedRowsCount()); this.mergeUnknownFields(other.getUnknownFields()); return this;
break; case 6: builder.setDef(input.mergeObject(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.newBuilder(), org.apache.drill.exec.proto.SchemaUserBitShared.RecordBatchDef.MERGE));
public org.apache.drill.exec.proto.UserBitShared.RecordBatchDef buildPartial() { org.apache.drill.exec.proto.UserBitShared.RecordBatchDef result = new org.apache.drill.exec.proto.UserBitShared.RecordBatchDef(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.recordCount_ = recordCount_; if (fieldBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002)) { field_ = java.util.Collections.unmodifiableList(field_); bitField0_ = (bitField0_ & ~0x00000002); } result.field_ = field_; } else { result.field_ = fieldBuilder_.build(); } if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000002; } result.carriesTwoByteSelectionVector_ = carriesTwoByteSelectionVector_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000004; } result.affectedRowsCount_ = affectedRowsCount_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
public int write(WritableBatch batch, SelectionVector2 sv2) throws IOException { checkNotNull(batch); checkNotNull(channel); final Timer.Context timerContext = metrics.timer(WRITER_TIMER).time(); final DrillBuf[] incomingBuffers = batch.getBuffers(); final UserBitShared.RecordBatchDef batchDef = batch.getDef(); int bytesWritten = batchDef.getSerializedSize(); /* Write the metadata to the file */ batchDef.writeDelimitedTo(output); /* If we have a selection vector, dump it to file first */ if (sv2 != null) { final int dataLength = sv2.getCount() * SelectionVector2.RECORD_SIZE; ByteBuffer buffer = sv2.getBuffer(false).nioBuffer(0, dataLength); while (buffer.remaining() > 0) { bytesWritten += channel.write(buffer); } } /* Dump the array of ByteBuf's associated with the value vectors */ for (DrillBuf buf : incomingBuffers) { /* dump the buffer into the OutputStream */ ByteBuffer buffer = buf.nioBuffer(); while (buffer.remaining() > 0) { bytesWritten += channel.write(buffer); } } timeNs += timerContext.stop(); this.bytesWritten += bytesWritten; return bytesWritten; }
public void mergeFrom(com.dyuproject.protostuff.Input input, org.apache.drill.exec.proto.UserBitShared.QueryData.Builder builder) throws java.io.IOException { for(int number = input.readFieldNumber(this);; number = input.readFieldNumber(this)) { switch(number) { case 0: return; case 1: builder.setQueryId(input.mergeObject(org.apache.drill.exec.proto.UserBitShared.QueryId.newBuilder(), org.apache.drill.exec.proto.SchemaUserBitShared.QueryId.MERGE)); break; case 2: builder.setRowCount(input.readInt32()); break; case 3: builder.setDef(input.mergeObject(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.newBuilder(), org.apache.drill.exec.proto.SchemaUserBitShared.RecordBatchDef.MERGE)); break; case 4: builder.setAffectedRowsCount(input.readInt32()); break; default: input.handleUnknownField(number, this); } } } public boolean isInitialized(org.apache.drill.exec.proto.UserBitShared.QueryData.Builder builder)
@SuppressWarnings("resource") private RawFragmentBatch getNext(final int providerIndex) throws IOException { stats.startWait(); final RawFragmentBatchProvider provider = fragProviders[providerIndex]; try { injector.injectInterruptiblePause(context.getExecutionControls(), "waiting-for-data", logger); final RawFragmentBatch b = provider.getNext(); if (b != null) { stats.addLongStat(Metric.BYTES_RECEIVED, b.getByteCount()); stats.batchReceived(0, b.getHeader().getDef().getRecordCount(), false); inputCounts[providerIndex] += b.getHeader().getDef().getRecordCount(); } return b; } catch(final InterruptedException e) { // Preserve evidence that the interruption occurred so that code higher up on the call stack can learn of the // interruption and respond to it if it wants to. Thread.currentThread().interrupt(); return null; } finally { stats.stopWait(); } }
public Builder clear() { super.clear(); if (queryIdBuilder_ == null) { queryId_ = org.apache.drill.exec.proto.UserBitShared.QueryId.getDefaultInstance(); } else { queryIdBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); receivingMajorFragmentId_ = 0; bitField0_ = (bitField0_ & ~0x00000002); receivingMinorFragmentId_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); sendingMajorFragmentId_ = 0; bitField0_ = (bitField0_ & ~0x00000008); sendingMinorFragmentId_ = 0; bitField0_ = (bitField0_ & ~0x00000010); if (defBuilder_ == null) { def_ = org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.getDefaultInstance(); } else { defBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); isLastBatch_ = false; bitField0_ = (bitField0_ & ~0x00000040); return this; }
/** * <code>optional .exec.shared.RecordBatchDef def = 6;</code> */ public Builder mergeDef(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef value) { if (defBuilder_ == null) { if (((bitField0_ & 0x00000020) == 0x00000020) && def_ != org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.getDefaultInstance()) { def_ = org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.newBuilder(def_).mergeFrom(value).buildPartial(); } else { def_ = value; } onChanged(); } else { defBuilder_.mergeFrom(value); } bitField0_ |= 0x00000020; return this; } /**
/** * <code>optional .exec.shared.RecordBatchDef def = 3;</code> */ public Builder mergeDef(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef value) { if (defBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && def_ != org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.getDefaultInstance()) { def_ = org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.newBuilder(def_).mergeFrom(value).buildPartial(); } else { def_ = value; } onChanged(); } else { defBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /**
@SuppressWarnings("resource") private void readVectors(InputStream input, RecordBatchDef batchDef) throws IOException { final VectorContainer container = new VectorContainer(); final List<ValueVector> vectorList = Lists.newArrayList(); final List<SerializedField> fieldList = batchDef.getFieldList(); for (SerializedField metaData : fieldList) { final int dataLength = metaData.getBufferLength(); final MaterializedField field = MaterializedField.create(metaData); final DrillBuf buf = allocator.read(dataLength, input); final ValueVector vector = TypeHelper.getNewVector(field, allocator); vector.load(metaData, buf); buf.release(); // Vector now owns the buffer vectorList.add(vector); } container.addCollection(vectorList); container.buildSchema(svMode); container.setRecordCount(recordCount); va = container; }