private <I> void register(Class<? extends I> clazz, ArrayListMultimap<String, Class<? extends I>> methods) { Description desc = clazz.getAnnotation(Description.class); Stream<String> namesStream; if (desc != null) { namesStream = Stream.of(desc.name().split(",")) .map(String::trim); } else { namesStream = Stream.of(clazz) .map(Class::getName) .map(name -> name.replace('.', '_')); } // Checks specified array of function names whether they should be replaced // using FUNCTION_REPLACE_MAP map. namesStream.map(String::toLowerCase) .map(functionName -> FUNCTION_REPLACE_MAP.getOrDefault(functionName, functionName)) .forEach(name -> methods.put(name, clazz)); UDFType type = clazz.getAnnotation(UDFType.class); if (type != null && !type.deterministic()) { nonDeterministicUDFs.add(clazz); } }
/** * When the option planner.type_inference.enable is turned on, the operators which are added via this method * will be used. */ public void addOperatorWithInference(String name, SqlOperator op) { drillOperatorsWithInference.add(op); drillOperatorsWithInferenceMap.put(name.toLowerCase(), op); }
/** * When the option planner.type_inference.enable is turned off, the operators which are added via this method * will be used. */ public void addOperatorWithoutInference(String name, SqlOperator op) { drillOperatorsWithoutInference.add(op); drillOperatorsWithoutInferenceMap.put(name.toLowerCase(), op); }
/** * * @param unassignedWorkList the work units to assign * @param endpointIterators the endpointIterators to assign to * @param assignMinimum wheterh to assign the minimum amount */ private void assignLeftovers(LinkedList<WorkEndpointListPair<T>> unassignedWorkList, Map<DrillbitEndpoint,FragIteratorWrapper> endpointIterators, boolean assignMinimum) { outer: for (FragIteratorWrapper iteratorWrapper : endpointIterators.values()) { while (iteratorWrapper.count < (assignMinimum ? iteratorWrapper.minCount : (iteratorWrapper.maxCount + iteratorWrapper.maxCountLeftOver))) { WorkEndpointListPair<T> workPair = unassignedWorkList.poll(); if (workPair == null) { break outer; } Integer assignment = iteratorWrapper.iter.next(); iteratorWrapper.count++; mappings.put(assignment, workPair.work); } } }
/** * * @param workList the list of work units to assign * @param endpointIterators the endpointIterators to assign to * @param assignMaxLeftOvers whether to assign upto maximum including leftovers * @return a list of unassigned work units */ private LinkedList<WorkEndpointListPair<T>> assign(List<WorkEndpointListPair<T>> workList, Map<DrillbitEndpoint,FragIteratorWrapper> endpointIterators, boolean assignMaxLeftOvers) { LinkedList<WorkEndpointListPair<T>> currentUnassignedList = Lists.newLinkedList(); outer: for (WorkEndpointListPair<T> workPair : workList) { List<DrillbitEndpoint> endpoints = workPair.sortedEndpoints; for (DrillbitEndpoint endpoint : endpoints) { FragIteratorWrapper iteratorWrapper = endpointIterators.get(endpoint); if (iteratorWrapper == null) { continue; } if (iteratorWrapper.count < (assignMaxLeftOvers ? (iteratorWrapper.maxCount + iteratorWrapper.maxCountLeftOver) : iteratorWrapper.maxCount)) { Integer assignment = iteratorWrapper.iter.next(); iteratorWrapper.count++; mappings.put(assignment, workPair.work); continue outer; } } currentUnassignedList.add(workPair); } return currentUnassignedList; }
endpointReceiverList.put(receiverLocation, receiverFragmentId); receiverFragmentId++; receiverToSenderMapping.put(receiverId, new MinorFragmentEndpoint(senderFragmentId, senderLocation)); senderToReceiversMapping.put(senderFragmentId, new MinorFragmentEndpoint(receiverId, receiverLocations.get(receiverId)));
public BroadcastSenderRootExec(RootFragmentContext context, RecordBatch incoming, BroadcastSender config) throws OutOfMemoryException { super(context, context.newOperatorContext(config, null), config); this.ok = true; this.incoming = incoming; this.config = config; this.handle = context.getHandle(); List<MinorFragmentEndpoint> destinations = config.getDestinations(); ArrayListMultimap<DrillbitEndpoint, Integer> dests = ArrayListMultimap.create(); for(MinorFragmentEndpoint destination : destinations) { dests.put(destination.getEndpoint(), destination.getId()); } int destCount = dests.keySet().size(); int i = 0; this.tunnels = new AccountingDataTunnel[destCount]; this.receivingMinorFragments = new int[destCount][]; for(final DrillbitEndpoint ep : dests.keySet()){ List<Integer> minorsList= dests.get(ep); int[] minorsArray = new int[minorsList.size()]; int x = 0; for(Integer m : minorsList){ minorsArray[x++] = m; } receivingMinorFragments[i] = minorsArray; tunnels[i] = context.getDataTunnel(ep); i++; } }
@SuppressWarnings("resource") public void add(RecordBatchData rbd) { long batchBytes = getSize(rbd.getContainer()); if (batchBytes == 0 && batches.size() > 0) { return; } if (runningBatches >= Character.MAX_VALUE) { final String errMsg = String.format("Tried to add more than %d number of batches.", (int) Character.MAX_VALUE); logger.error(errMsg); throw new DrillRuntimeException(errMsg); } if (!reservation.add(rbd.getRecordCount() * 4)) { final String errMsg = String.format("Failed to pre-allocate memory for SV. " + "Existing recordCount*4 = %d, " + "incoming batch recordCount*4 = %d", recordCount * 4, rbd.getRecordCount() * 4); logger.error(errMsg); throw new DrillRuntimeException(errMsg); } if (rbd.getRecordCount() == 0 && batches.size() > 0) { rbd.getContainer().zeroVectors(); SelectionVector2 sv2 = rbd.getSv2(); if (sv2 != null) { sv2.clear(); } return; } runningBatches++; batches.put(rbd.getContainer().getSchema(), rbd); recordCount += rbd.getRecordCount(); }
/** * Add another record batch to the set of record batches. TODO: Refactor this and other {@link #add * (RecordBatchData)} method into one method. * @param batch * @return True if the requested add completed successfully. Returns false in the case that this builder is full and cannot receive additional packages. * @throws SchemaChangeException */ public boolean add(VectorAccessible batch) { if (batch.getSchema().getSelectionVectorMode() == SelectionVectorMode.FOUR_BYTE) { throw new UnsupportedOperationException("A sort cannot currently work against a sv4 batch."); } if (batch.getRecordCount() == 0 && batches.size() > 0) { return true; // skip over empty record batches. } long batchBytes = getSize(batch); if (batchBytes == 0 && batches.size() > 0) { return true; } if (runningBatches >= Character.MAX_VALUE) { return false; // allowed in batch. } if (!reservation.add(batch.getRecordCount() * 4)) { return false; // sv allocation available. } RecordBatchData bd = new RecordBatchData(batch, allocator); runningBatches++; batches.put(batch.getSchema(), bd); recordCount += bd.getRecordCount(); return true; }
for (RecordBatchData rbd : batches.values()) { for (ValueVector v : rbd.getVectors()) { vectors.put(v.getField(), v);