MaterializedField field = MaterializedField.create(column.getAsNamePart().getName(), ROW_KEY_TYPE); rowKeyVector = outputMutator.addField(field, VarBinaryVector.class); } else {
/** * <code>optional .exec.shared.NamePart child = 3;</code> */ public Builder mergeChild(org.apache.drill.exec.proto.UserBitShared.NamePart value) { if (childBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && child_ != org.apache.drill.exec.proto.UserBitShared.NamePart.getDefaultInstance()) { child_ = org.apache.drill.exec.proto.UserBitShared.NamePart.newBuilder(child_).mergeFrom(value).buildPartial(); } else { child_ = value; } onChanged(); } else { childBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /**
public org.apache.drill.exec.proto.UserBitShared.NamePart buildPartial() { org.apache.drill.exec.proto.UserBitShared.NamePart result = new org.apache.drill.exec.proto.UserBitShared.NamePart(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.type_ = type_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.name_ = name_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (childBuilder_ == null) { result.child_ = child_; } else { result.child_ = childBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; }
MaterializedField currentChild = childMap.get(newChild.getNamePart().getName());
if (unnestField instanceof RepeatedMapVector) { tp = ((RepeatedMapVector) unnestField) .getTransferPairToSingleMap(reference.getAsNamePart().getName(), oContext.getAllocator()); } else if (!(unnestField instanceof RepeatedValueVector)) { if (incoming.getRecordCount() != 0) { .getTransferPair(reference.getAsNamePart().getName(), oContext.getAllocator()); } else { final ValueVector vvIn = RepeatedValueVector.class.cast(unnestField).getDataVector(); tp = vvIn.getTransferPair(reference.getAsNamePart().getName(), oContext.getAllocator());
builder.setNamePart(input.mergeObject(org.apache.drill.exec.proto.UserBitShared.NamePart.newBuilder(), org.apache.drill.exec.proto.SchemaUserBitShared.NamePart.MERGE));
tp = ((RepeatedMapVector)flattenField).getTransferPairToSingleMap(reference.getAsNamePart().getName(), oContext.getAllocator()); } else if ( !(flattenField instanceof RepeatedValueVector) ) { if(incoming.getRecordCount() != 0) { tp = RepeatedValueVector.class.cast(vv).getTransferPair(reference.getAsNamePart().getName(), oContext.getAllocator()); } else { final ValueVector vvIn = RepeatedValueVector.class.cast(flattenField).getDataVector(); tp = vvIn.getTransferPair(reference.getAsNamePart().getName(), oContext.getAllocator());
public void mergeFrom(com.dyuproject.protostuff.Input input, org.apache.drill.exec.proto.UserBitShared.NamePart.Builder builder) throws java.io.IOException { for(int number = input.readFieldNumber(this);; number = input.readFieldNumber(this)) { switch(number) { case 0: return; case 1: builder.setType(org.apache.drill.exec.proto.UserBitShared.NamePart.Type.valueOf(input.readEnum())); break; case 2: builder.setName(input.readString()); break; case 3: builder.setChild(input.mergeObject(org.apache.drill.exec.proto.UserBitShared.NamePart.newBuilder(), org.apache.drill.exec.proto.SchemaUserBitShared.NamePart.MERGE)); break; default: input.handleUnknownField(number, this); } } } public boolean isInitialized(org.apache.drill.exec.proto.UserBitShared.NamePart.Builder builder)
private static NamePart getNamePart(PathSegment s) { if (s == null) { return null; } NamePart.Builder b = NamePart.newBuilder(); if (s.getChild() != null) { NamePart namePart = getNamePart(s.getChild()); if (namePart != null) { b.setChild(namePart); } } if (s.isArray()) { if (s.getArraySegment().hasIndex()) { throw new IllegalStateException("You cannot convert a indexed schema path to a NamePart. NameParts can only reference Vectors, not individual records or values."); } b.setType(Type.ARRAY); } else { b.setType(Type.NAME); b.setName(s.getNameSegment().getPath()); } return b.build(); }
public org.apache.drill.exec.proto.UserBitShared.NamePart buildPartial() { org.apache.drill.exec.proto.UserBitShared.NamePart result = new org.apache.drill.exec.proto.UserBitShared.NamePart(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.type_ = type_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.name_ = name_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } if (childBuilder_ == null) { result.child_ = child_; } else { result.child_ = childBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; }
public OrderedPartitionRecordBatch(OrderedPartitionSender pop, RecordBatch incoming, FragmentContext context) throws OutOfMemoryException { super(pop, context); this.incoming = incoming; this.partitions = pop.getDestinations().size(); this.sendingMajorFragmentWidth = pop.getSendingWidth(); this.recordsToSample = pop.getRecordsToSample(); this.samplingFactor = pop.getSamplingFactor(); this.completionFactor = pop.getCompletionFactor(); DistributedCache cache = null; // Clearly, this code is not used! this.mmap = cache.getMultiMap(MULTI_CACHE_CONFIG); this.tableMap = cache.getMap(SINGLE_CACHE_CONFIG); Preconditions.checkNotNull(tableMap); this.mapKey = String.format("%s_%d", context.getHandle().getQueryId(), context.getHandle().getMajorFragmentId()); this.minorFragmentSampleCount = cache.getCounter(mapKey); SchemaPath outputPath = popConfig.getRef(); MaterializedField outputField = MaterializedField.create(outputPath.getAsNamePart().getName(), Types.required(TypeProtos.MinorType.INT)); this.partitionKeyVector = (IntVector) TypeHelper.getNewVector(outputField, oContext.getAllocator()); }
@Override boolean materialize(final NamedExpression ne, final VectorContainer batch, final FunctionLookupContext registry) throws SchemaChangeException { final FunctionCall call = (FunctionCall) ne.getExpr(); final LogicalExpression input = ExpressionTreeMaterializer.materializeAndCheckErrors(call.args.get(0), batch, registry); if (input == null) { return false; } // make sure output vector type is Nullable, because we will write a null value in the first row of each partition TypeProtos.MajorType majorType = input.getMajorType(); if (majorType.getMode() == TypeProtos.DataMode.REQUIRED) { majorType = Types.optional(majorType.getMinorType()); } // add corresponding ValueVector to container final MaterializedField output = MaterializedField.create(ne.getRef().getAsNamePart().getName(), majorType); batch.addOrGet(output).allocateNew(); final TypedFieldId outputId = batch.getValueVectorId(ne.getRef()); writeInputToLead = new ValueVectorWriteExpression(outputId, input, true); return true; }
@Override boolean materialize(final NamedExpression ne, final VectorContainer batch, final FunctionLookupContext registry) throws SchemaChangeException { final FunctionCall call = (FunctionCall) ne.getExpr(); final LogicalExpression input = ExpressionTreeMaterializer.materializeAndCheckErrors(call.args.get(0), batch, registry); if (input == null) { return false; } // make sure output vector type is Nullable, because we will write a null value in the first row of each partition TypeProtos.MajorType majorType = input.getMajorType(); if (majorType.getMode() == TypeProtos.DataMode.REQUIRED) { majorType = Types.optional(majorType.getMinorType()); } // add lag output ValueVector to container final MaterializedField output = MaterializedField.create(ne.getRef().getAsNamePart().getName(), majorType); batch.addOrGet(output).allocateNew(); final TypedFieldId outputId = batch.getValueVectorId(ne.getRef()); writeInputToLag = new ValueVectorWriteExpression(outputId, input, true); writeLagToLag = new ValueVectorWriteExpression(outputId, new ValueVectorReadExpression(outputId), true); return true; }
private MapVector getOrCreateFamilyVector(String familyName, boolean allocateOnCreate) { try { MapVector v = familyVectorMap.get(familyName); if(v == null) { SchemaPath column = SchemaPath.getSimplePath(familyName); MaterializedField field = MaterializedField.create(column.getAsNamePart().getName(), COLUMN_FAMILY_TYPE); v = outputMutator.addField(field, MapVector.class); if (allocateOnCreate) { v.allocateNew(); } getColumns().add(column); familyVectorMap.put(familyName, v); } return v; } catch (SchemaChangeException e) { throw new DrillRuntimeException(e); } }
/** * <code>optional .exec.shared.NamePart name_part = 2;</code> */ public Builder mergeNamePart(org.apache.drill.exec.proto.UserBitShared.NamePart value) { if (namePartBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && namePart_ != org.apache.drill.exec.proto.UserBitShared.NamePart.getDefaultInstance()) { namePart_ = org.apache.drill.exec.proto.UserBitShared.NamePart.newBuilder(namePart_).mergeFrom(value).buildPartial(); } else { namePart_ = value; } onChanged(); } else { namePartBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>optional .exec.shared.NamePart child = 3;</code> */ public Builder mergeChild(org.apache.drill.exec.proto.UserBitShared.NamePart value) { if (childBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && child_ != org.apache.drill.exec.proto.UserBitShared.NamePart.getDefaultInstance()) { child_ = org.apache.drill.exec.proto.UserBitShared.NamePart.newBuilder(child_).mergeFrom(value).buildPartial(); } else { child_ = value; } onChanged(); } else { childBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /**
@Override public void load(SerializedField metadata, DrillBuf buffer) { Preconditions.checkArgument(this.field.getName().equals(metadata.getNamePart().getName()), "The field %s doesn't match the provided metadata %s.", this.field, metadata); final int actualLength = metadata.getBufferLength(); final int valueCount = metadata.getValueCount(); final int expectedLength = valueCount * VALUE_WIDTH; assert actualLength == expectedLength : String.format("Expected to load %d bytes but actually loaded %d bytes", expectedLength, actualLength); clear(); if (data != null) { data.release(1); } data = buffer.slice(0, actualLength); data.retain(1); data.writerIndex(actualLength); }
@Override public void load(SerializedField metadata, DrillBuf buffer) { Preconditions.checkArgument(this.field.getName().equals(metadata.getNamePart().getName()), "The field %s doesn't match the provided metadata %s.", this.field, metadata); final int actualLength = metadata.getBufferLength(); final int valueCount = metadata.getValueCount(); final int expectedLength = valueCount * VALUE_WIDTH; assert actualLength == expectedLength : String.format("Expected to load %d bytes but actually loaded %d bytes", expectedLength, actualLength); clear(); if (data != null) { data.release(1); } data = buffer.slice(0, actualLength); data.retain(1); data.writerIndex(actualLength); }
@Override public void load(SerializedField metadata, DrillBuf buffer) { Preconditions.checkArgument(this.field.getName().equals(metadata.getNamePart().getName()), "The field %s doesn't match the provided metadata %s.", this.field, metadata); final int actualLength = metadata.getBufferLength(); final int valueCount = metadata.getValueCount(); final int expectedLength = valueCount * VALUE_WIDTH; assert actualLength == expectedLength : String.format("Expected to load %d bytes but actually loaded %d bytes", expectedLength, actualLength); clear(); if (data != null) { data.release(1); } data = buffer.slice(0, actualLength); data.retain(1); data.writerIndex(actualLength); }