public Builder mergeFrom(org.apache.drill.exec.proto.BitData.FragmentRecordBatch other) { if (other == org.apache.drill.exec.proto.BitData.FragmentRecordBatch.getDefaultInstance()) return this; if (other.hasQueryId()) { mergeQueryId(other.getQueryId()); if (other.hasReceivingMajorFragmentId()) { setReceivingMajorFragmentId(other.getReceivingMajorFragmentId()); if (other.hasSendingMajorFragmentId()) { setSendingMajorFragmentId(other.getSendingMajorFragmentId()); if (other.hasSendingMinorFragmentId()) { setSendingMinorFragmentId(other.getSendingMinorFragmentId()); if (other.hasDef()) { mergeDef(other.getDef()); if (other.hasIsLastBatch()) { setIsLastBatch(other.getIsLastBatch()); this.mergeUnknownFields(other.getUnknownFields()); return this;
size += 1 * getReceivingMinorFragmentIdList().size(); .computeBoolSize(7, isLastBatch_); size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size;
public org.apache.drill.exec.proto.BitData.FragmentRecordBatch buildPartial() { org.apache.drill.exec.proto.BitData.FragmentRecordBatch result = new org.apache.drill.exec.proto.BitData.FragmentRecordBatch(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
if (fieldList == null && rawBatch.getHeader().getDef().getFieldCount() != 0) { fieldList = rawBatch.getHeader().getDef().getFieldList(); if (rawBatch.getHeader().getDef().getRecordCount() != 0) { rawBatches.add(rawBatch); } else { while ((rawBatch = getNext(p)) != null && rawBatch.getHeader().getDef().getRecordCount() == 0) { if (rawBatch == null || rawBatch.getHeader().getDef().getFieldCount() == 0) { createDummyBatch = true; BitData.FragmentRecordBatch dummyHeader = BitData.FragmentRecordBatch.newBuilder() .setIsLastBatch(true) .setDef(dummyDef) if (rawBatch == null || rawBatch.getHeader().getDef().getFieldCount() == 0) { rawBatch = new RawFragmentBatch(dummyHeader, null, null); rawBatches.set(i, rawBatch); final UserBitShared.RecordBatchDef rbd = batch.getHeader().getDef(); try { batchLoaders[i].load(rbd, batch.getBody()); incomingBatches[b] = batch; if (batch != null) { batchLoaders[b].load(batch.getHeader().getDef(), batch.getBody()); } else {
if (b.getHeader().getIsLastBatch()) { logger.debug("Got last batch from {}:{}", b.getHeader().getSendingMajorFragmentId(), b.getHeader() .getSendingMinorFragmentId()); final int remainingStreams = decrementStreamCounter(); if (remainingStreams == 0) {
public org.apache.drill.exec.proto.BitData.FragmentRecordBatch buildPartial() { org.apache.drill.exec.proto.BitData.FragmentRecordBatch result = new org.apache.drill.exec.proto.BitData.FragmentRecordBatch(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0;
size += 1 * getReceivingMinorFragmentIdList().size(); .computeBoolSize(7, isLastBatch_); size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size;
private void handleRecordBatchRequest(ByteBuf pBody, ByteBuf dBody, ResponseSender sender) throws RpcException { final FragmentRecordBatch fragmentBatch = RpcBus.get(pBody, FragmentRecordBatch.PARSER); final AckSender ack = new AckSender(sender); // increment so we don't get false returns. ack.increment(); try { final IncomingDataBatch batch = new IncomingDataBatch(fragmentBatch, (DrillBuf) dBody, ack); final int targetCount = fragmentBatch.getReceivingMinorFragmentIdCount(); // randomize who gets first transfer (and thus ownership) so memory usage is balanced when we're sharing amongst // multiple fragments. final int firstOwner = ThreadLocalRandom.current().nextInt(targetCount); submit(batch, firstOwner, targetCount); submit(batch, 0, firstOwner); } catch (IOException | FragmentSetupException e) { logger.error("Failure while getting fragment manager. {}", QueryIdHelper.getQueryIdentifiers(fragmentBatch.getQueryId(), fragmentBatch.getReceivingMajorFragmentId(), fragmentBatch.getReceivingMinorFragmentIdList()), e); ack.clear(); sender.send(new Response(BitData.RpcType.ACK, Acks.FAIL)); } finally { // decrement the extra reference we grabbed at the top. ack.sendOk(); } }
public boolean batchArrived(final IncomingDataBatch incomingBatch) throws FragmentSetupException, IOException { // we want to make sure that we only generate local record batch reference in the case that we're not closed. // Otherwise we would leak memory. try (@SuppressWarnings("unused") AutoCloseables.Closeable lock = sharedIncomingBatchLock.open()) { if (closed) { return false; } if (incomingBatch.getHeader().getIsLastBatch()) { streamsRemaining.decrementAndGet(); } final int sendMajorFragmentId = incomingBatch.getHeader().getSendingMajorFragmentId(); DataCollector collector = collectorMap.get(sendMajorFragmentId); if (collector == null) { throw new FragmentSetupException(String.format( "We received a major fragment id that we were not expecting. The id was %d. %s", sendMajorFragmentId, Arrays.toString(collectorMap.values().toArray()))); } // Use the Data Collector's buffer allocator if set, otherwise the fragment's one BufferAllocator ownerAllocator = collector.getAllocator(); synchronized (collector) { final RawFragmentBatch newRawFragmentBatch = incomingBatch.newRawFragmentBatch(ownerAllocator); boolean decrementedToZero = collector .batchArrived(incomingBatch.getHeader().getSendingMinorFragmentId(), newRawFragmentBatch); newRawFragmentBatch.release(); // we should only return true if remaining required has been decremented and is currently equal to zero. return decrementedToZero; } } }
public Builder mergeFrom(org.apache.drill.exec.proto.BitData.FragmentRecordBatch other) { if (other == org.apache.drill.exec.proto.BitData.FragmentRecordBatch.getDefaultInstance()) return this; if (other.hasQueryId()) { mergeQueryId(other.getQueryId()); if (other.hasReceivingMajorFragmentId()) { setReceivingMajorFragmentId(other.getReceivingMajorFragmentId()); if (other.hasSendingMajorFragmentId()) { setSendingMajorFragmentId(other.getSendingMajorFragmentId()); if (other.hasSendingMinorFragmentId()) { setSendingMinorFragmentId(other.getSendingMinorFragmentId()); if (other.hasDef()) { mergeDef(other.getDef()); if (other.hasIsLastBatch()) { setIsLastBatch(other.getIsLastBatch()); this.mergeUnknownFields(other.getUnknownFields()); return this;
@SuppressWarnings("resource") private RawFragmentBatch getNext(final int providerIndex) throws IOException { stats.startWait(); final RawFragmentBatchProvider provider = fragProviders[providerIndex]; try { injector.injectInterruptiblePause(context.getExecutionControls(), "waiting-for-data", logger); final RawFragmentBatch b = provider.getNext(); if (b != null) { stats.addLongStat(Metric.BYTES_RECEIVED, b.getByteCount()); stats.batchReceived(0, b.getHeader().getDef().getRecordCount(), false); inputCounts[providerIndex] += b.getHeader().getDef().getRecordCount(); } return b; } catch(final InterruptedException e) { // Preserve evidence that the interruption occurred so that code higher up on the call stack can learn of the // interruption and respond to it if it wants to. Thread.currentThread().interrupt(); return null; } finally { stats.stopWait(); } }
@Override protected void enqueueInner(RawFragmentBatch batch) throws IOException { assert batch.getHeader().getSendingMajorFragmentId() == oppositeId; logger.debug("Enqueue batch. Current buffer size: {}. Last batch: {}. Sending fragment: {}", bufferQueue.size(), batch.getHeader().getIsLastBatch(), batch.getHeader().getSendingMajorFragmentId()); RawFragmentBatchWrapper wrapper; boolean spoolCurrentBatch = isCurrentlySpooling(); wrapper = new RawFragmentBatchWrapper(batch, !spoolCurrentBatch); currentSizeInMemory += wrapper.getBodySize(); if (spoolCurrentBatch) { if (spooler == null) { initSpooler(); } spooler.addBatchForSpooling(wrapper); } bufferQueue.add(wrapper); if (!spoolCurrentBatch && currentSizeInMemory > threshold) { logger.debug("Buffer size {} greater than threshold {}. Start spooling to disk", currentSizeInMemory, threshold); startSpooling(); } }
public Builder toBuilder() { return newBuilder(this); }
public static MessageLite getResponseDefaultInstanceServer(int rpcType) throws RpcException { switch (rpcType) { case RpcType.ACK_VALUE: return Ack.getDefaultInstance(); case RpcType.HANDSHAKE_VALUE: return BitClientHandshake.getDefaultInstance(); case RpcType.REQ_RECORD_BATCH_VALUE: return FragmentRecordBatch.getDefaultInstance(); case RpcType.SASL_MESSAGE_VALUE: return SaslMessage.getDefaultInstance(); default: throw new UnsupportedOperationException(); } } }
public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.drill.exec.proto.BitData.FragmentRecordBatch prototype) {
public void writeTo(com.dyuproject.protostuff.Output output, org.apache.drill.exec.proto.BitData.FragmentRecordBatch message) throws java.io.IOException { if(message.hasQueryId()) output.writeObject(1, message.getQueryId(), org.apache.drill.exec.proto.SchemaUserBitShared.QueryId.WRITE, false); if(message.hasReceivingMajorFragmentId()) output.writeInt32(2, message.getReceivingMajorFragmentId(), false); for(int receivingMinorFragmentId : message.getReceivingMinorFragmentIdList()) output.writeInt32(3, receivingMinorFragmentId, true); if(message.hasSendingMajorFragmentId()) output.writeInt32(4, message.getSendingMajorFragmentId(), false); if(message.hasSendingMinorFragmentId()) output.writeInt32(5, message.getSendingMinorFragmentId(), false); if(message.hasDef()) output.writeObject(6, message.getDef(), org.apache.drill.exec.proto.SchemaUserBitShared.RecordBatchDef.WRITE, false); if(message.hasIsLastBatch()) output.writeBool(7, message.getIsLastBatch(), false); } public boolean isInitialized(org.apache.drill.exec.proto.BitData.FragmentRecordBatch message)
public void updateStats(FragmentWritableBatch writableBatch) { stats.addLongStat(Metric.BYTES_SENT, writableBatch.getByteCount()); stats.addLongStat(Metric.BATCHES_SENT, 1); stats.addLongStat(Metric.RECORDS_SENT, writableBatch.getHeader().getDef().getRecordCount()); }
public static Builder newBuilder(org.apache.drill.exec.proto.BitData.FragmentRecordBatch prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); }