public Builder mergeFrom(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef other) { if (other == org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.getDefaultInstance()) return this; if (other.hasRecordCount()) { setRecordCount(other.getRecordCount());
public Builder mergeFrom(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef other) { if (other == org.apache.drill.exec.proto.UserBitShared.RecordBatchDef.getDefaultInstance()) return this; if (other.hasRecordCount()) { setRecordCount(other.getRecordCount());
@SuppressWarnings("resource") private RawFragmentBatch getNext(final int providerIndex) throws IOException { stats.startWait(); final RawFragmentBatchProvider provider = fragProviders[providerIndex]; try { injector.injectInterruptiblePause(context.getExecutionControls(), "waiting-for-data", logger); final RawFragmentBatch b = provider.getNext(); if (b != null) { stats.addLongStat(Metric.BYTES_RECEIVED, b.getByteCount()); stats.batchReceived(0, b.getHeader().getDef().getRecordCount(), false); inputCounts[providerIndex] += b.getHeader().getDef().getRecordCount(); } return b; } catch(final InterruptedException e) { // Preserve evidence that the interruption occurred so that code higher up on the call stack can learn of the // interruption and respond to it if it wants to. Thread.currentThread().interrupt(); return null; } finally { stats.stopWait(); } }
if (rawBatch.getHeader().getDef().getRecordCount() != 0) { rawBatches.add(rawBatch); } else { while ((rawBatch = getNext(p)) != null && rawBatch.getHeader().getDef().getRecordCount() == 0) { nextBatch = getNext(node.batchId); while (nextBatch != null && nextBatch.getHeader().getDef().getRecordCount() == 0) { nextBatch = getNext(node.batchId);
valueCount = def.getRecordCount(); boolean schemaChanged = schema == null;
m.setValueCount(def.getRecordCount());
while (batch != null && batch.getHeader().getDef().getRecordCount() == 0 && (!first || batch.getHeader().getDef().getFieldCount() == 0)) { batch = getNextBatch(); if(schemaChanged) { this.schema = batchLoader.getSchema(); stats.batchReceived(0, rbd.getRecordCount(), true); lastOutcome = IterOutcome.OK_NEW_SCHEMA; } else { stats.batchReceived(0, rbd.getRecordCount(), false); lastOutcome = IterOutcome.OK;
final VectorContainer container = new VectorContainer(); final UserBitShared.RecordBatchDef batchDef = UserBitShared.RecordBatchDef.parseDelimitedFrom(input); recordCount = batchDef.getRecordCount(); if (batchDef.hasCarriesTwoByteSelectionVector() && batchDef.getCarriesTwoByteSelectionVector()) {
/** * Reads from an InputStream and parses a RecordBatchDef. From this, we * construct a SelectionVector2 if it exits and construct the vectors and add * them to a vector container * * @param input * the InputStream to read from * @throws IOException */ @Override public void readFromStream(InputStream input) throws IOException { final UserBitShared.RecordBatchDef batchDef = UserBitShared.RecordBatchDef.parseDelimitedFrom(input); recordCount = batchDef.getRecordCount(); if (batchDef.hasCarriesTwoByteSelectionVector() && batchDef.getCarriesTwoByteSelectionVector()) { readSv2(input); } readVectors(input, batchDef); }
public void writeTo(com.dyuproject.protostuff.Output output, org.apache.drill.exec.proto.UserBitShared.RecordBatchDef message) throws java.io.IOException { if(message.hasRecordCount()) output.writeInt32(1, message.getRecordCount(), false); for(org.apache.drill.exec.proto.UserBitShared.SerializedField field : message.getFieldList()) output.writeObject(2, field, org.apache.drill.exec.proto.SchemaUserBitShared.SerializedField.WRITE, true); if(message.hasCarriesTwoByteSelectionVector()) output.writeBool(3, message.getCarriesTwoByteSelectionVector(), false); if(message.hasAffectedRowsCount()) output.writeInt32(4, message.getAffectedRowsCount(), false); } public boolean isInitialized(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef message)
public void updateStats(FragmentWritableBatch writableBatch) { stats.addLongStat(Metric.BYTES_SENT, writableBatch.getByteCount()); stats.addLongStat(Metric.BATCHES_SENT, 1); stats.addLongStat(Metric.RECORDS_SENT, writableBatch.getHeader().getDef().getRecordCount()); }
public void writeTo(com.dyuproject.protostuff.Output output, org.apache.drill.exec.proto.UserBitShared.RecordBatchDef message) throws java.io.IOException { if(message.hasRecordCount()) output.writeInt32(1, message.getRecordCount(), false); for(org.apache.drill.exec.proto.UserBitShared.SerializedField field : message.getFieldList()) output.writeObject(2, field, org.apache.drill.exec.proto.SchemaUserBitShared.SerializedField.WRITE, true); if(message.hasCarriesTwoByteSelectionVector()) output.writeBool(3, message.getCarriesTwoByteSelectionVector(), false); if(message.hasAffectedRowsCount()) output.writeInt32(4, message.getAffectedRowsCount(), false); } public boolean isInitialized(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef message)