public Partitioner.Partition<BasePythonExecutionOperator> clonePartition() { Partitioner.Partition<BasePythonExecutionOperator> clonedKuduInputOperator = new DefaultPartition<>(KryoCloneUtils.cloneObject(prototypePythonOperator)); return clonedKuduInputOperator; } }
@Override public Collection<Partition<T>> definePartitions(Collection<Partition<T>> partitions, PartitioningContext context) final int newPartitionCount = DefaultPartition.getRequiredPartitionCount(context, this.partitionCount); logger.debug("define partitions, partitionCount current {} requested {}", partitions.size(), newPartitionCount); newPartitions.add(new DefaultPartition<>(partition.getPartitionedInstance())); DefaultPartition.assignPartitionKeys(newPartitions, inputPortList.iterator().next()); } else if (partition.getPartitionKeys().isEmpty()) { newPartitions = repartitionInputOperator(partitions); } else {
RepartitionContext(Partitioner<Operator> partitioner, PMapping currentMapping, int partitionCount) { super(currentMapping, partitionCount); this.operators = currentMapping.partitions; this.currentPartitions = new ArrayList<>(operators.size()); this.currentPartitionMap = Maps.newHashMapWithExpectedSize(operators.size()); this.operatorIdToPartition = Maps.newHashMapWithExpectedSize(operators.size()); // collect current partitions with committed operator state // those will be needed by the partitioner for split/merge for (PTOperator pOperator : operators) { Map<InputPort<?>, PartitionKeys> pks = pOperator.getPartitionKeys(); if (pks == null) { throw new AssertionError("Null partition: " + pOperator); } // if partitions checkpoint at different windows, processing for new or modified // partitions will start from earliest checkpoint found (at least once semantics) if (minCheckpoint == null) { minCheckpoint = pOperator.recoveryCheckpoint; } else if (minCheckpoint.windowId > pOperator.recoveryCheckpoint.windowId) { minCheckpoint = pOperator.recoveryCheckpoint; } Operator partitionedOperator = loadOperator(pOperator); DefaultPartition<Operator> partition = new DefaultPartition<>(partitionedOperator, pks, pOperator.loadIndicator, pOperator.stats); currentPartitions.add(partition); currentPartitionMap.put(partition, pOperator); LOG.debug("partition load: {} {} {}", pOperator, partition.getPartitionKeys(), partition.getLoad()); operatorIdToPartition.put(pOperator.getId(), partition); } newPartitions = partitioner.definePartitions(new ArrayList<Partition<Operator>>(currentPartitions), this); }
newPartitions.add(new DefaultPartition<>(partition.getPartitionedInstance())); KryoCloneUtils<AbstractBlockReader<R, B, STREAM>> cloneUtils = KryoCloneUtils.createCloneUtils(this); while (morePartitionsToCreate-- > 0) { DefaultPartition<AbstractBlockReader<R, B, STREAM>> partition = new DefaultPartition<>(cloneUtils.getClone()); newPartitions.add(partition); DefaultPartition.assignPartitionKeys(Collections.unmodifiableCollection(newPartitions), blocksMetadataInput); int lPartitionMask = newPartitions.iterator().next().getPartitionKeys().get(blocksMetadataInput).mask;
protected int getNewPartitionCount(Collection<Partition<AbstractFileInputOperator<T>>> partitions, PartitioningContext context) { return DefaultPartition.getRequiredPartitionCount(context, this.partitionCount); }
/** * Adjust the partitions of an input operator (operator with no connected input stream). * * @param <T> The operator type * @param partitions * @return The new operators. */ public static <T extends Operator> Collection<Partition<T>> repartitionInputOperator(Collection<Partition<T>> partitions) { List<Partition<T>> newPartitions = new ArrayList<>(); List<Partition<T>> lowLoadPartitions = new ArrayList<>(); for (Partition<T> p: partitions) { int load = p.getLoad(); if (load < 0) { if (!lowLoadPartitions.isEmpty()) { newPartitions.add(lowLoadPartitions.remove(0)); } else { lowLoadPartitions.add(p); } } else if (load > 0) { newPartitions.add(new DefaultPartition<>(p.getPartitionedInstance())); newPartitions.add(new DefaultPartition<>(p.getPartitionedInstance())); } else { newPartitions.add(p); } } newPartitions.addAll(lowLoadPartitions); return newPartitions; }
@Override public Collection<Partition<Gen>> definePartitions( Collection<Partition<Gen>> list, PartitioningContext context) { if (partitions < 0) { // error String msg = String.format("Error: Bad value: partitions = %d%n", partitions); LOG.error(msg); throw new RuntimeException(msg); } final int prevCount = list.size(); if (1 == prevCount) { // initial call LOG.info("definePartitions: First call, prevCount = {}, partitions = {}", prevCount, partitions); } if (prevCount == partitions) { LOG.info("definePartitions: Nothing to do in definePartitions"); return list; // nothing to do } LOG.debug("definePartitions: Repartitioning from {} to {}", prevCount, partitions); Kryo kryo = new Kryo(); // return value: new list of partitions (includes old list) List<Partition<Gen>> newPartitions = Lists.newArrayListWithExpectedSize(partitions); for (int i = 0; i < partitions; i++) { Gen oper = cloneObject(kryo, this); newPartitions.add(new DefaultPartition<>(oper)); } LOG.info("definePartition: returning {} partitions", newPartitions.size()); return newPartitions; }
DefaultPartition<T> partition = new DefaultPartition<>(anOperator); newPartitions.add(partition);
private Collection<Partition<TestPartition>> getPartitions(Collection<Partition<TestPartition>> partitions, PartitioningContext context) { // create array of partitions to return Collection<Partition<TestPartition>> result = new ArrayList<Partition<TestPartition>>(nPartitions); int mask = getMask(nPartitions); for (int i = 0; i < nPartitions; ++i) { HashSet<Integer> set = new HashSet<>(); set.add(i); PartitionKeys keys = new PartitionKeys(mask, set); Partition partition = new DefaultPartition<TestPartition>(new TestPartition()); partition.getPartitionKeys().put(in, keys); } return result; } // getPartitions
protected Partitioner.Partition<AbstractKafkaInputOperator> createPartition( Set<AbstractKafkaPartitioner.PartitionMeta> partitionAssignment) { Partitioner.Partition<AbstractKafkaInputOperator> p = new DefaultPartition<AbstractKafkaInputOperator>(KryoCloneUtils.cloneObject(prototypeOperator)); p.getPartitionedInstance().assign(partitionAssignment); return p; }
protected Partitioner.Partition<AbstractKafkaInputOperator> createPartition( Set<AbstractKafkaPartitioner.PartitionMeta> partitionAssignment) { Partitioner.Partition<AbstractKafkaInputOperator> p = new DefaultPartition<AbstractKafkaInputOperator>(KryoCloneUtils.cloneObject(prototypeOperator)); p.getPartitionedInstance().assign(partitionAssignment); return p; }
Partition<T> newPartition = new DefaultPartition<>(p.getPartitionedInstance()); newPartition.getPartitionKeys().put(e.getKey(), new PartitionKeys(newMask, Sets.newHashSet(key))); newPartitions.add(newPartition);
DefaultPartition<BlockWriter> partition = new DefaultPartition<BlockWriter>(anOperator); partitions.add(partition);
new DefaultPartition<TestPartition>(new TestPartition()), new DefaultPartition<TestPartition>(new TestPartition()), new DefaultPartition<TestPartition>(new TestPartition()) };
/*** * Clones the original operator and sets the partition pie assignments for this operator. Kryo is used for cloning * @param scanAssignmentsForThisPartition The partition pie that is assigned to the operator according to the * configured partitioner * @return The partition that is used by the runtime to launch the new physical operator instance */ public Partitioner.Partition<AbstractKuduInputOperator> clonePartitionAndAssignScanMeta( List<KuduPartitionScanAssignmentMeta> scanAssignmentsForThisPartition) { Partitioner.Partition<AbstractKuduInputOperator> clonedKuduInputOperator = new DefaultPartition<>(KryoCloneUtils.cloneObject(prototypeKuduInputOperator)); clonedKuduInputOperator.getPartitionedInstance().setPartitionPieAssignment(scanAssignmentsForThisPartition); return clonedKuduInputOperator; }
Partition<T> newPartition = new DefaultPartition<T>(p.getPartitionedInstance()); newPartition.getPartitionKeys().put(e.getKey(), new PartitionKeys(newMask, Sets.newHashSet(key))); newPartitions.add(newPartition);
@Override public Collection<Partition<AbstractCouchBaseInputOperator<T>>> definePartitions(Collection<Partition<AbstractCouchBaseInputOperator<T>>> partitions, PartitioningContext incrementalCapacity) { conf = store.getConf(); int numPartitions = conf.getServers().size(); List<String> list = conf.getServers(); Collection<Partition<AbstractCouchBaseInputOperator<T>>> newPartitions = Lists.newArrayListWithExpectedSize(numPartitions); KryoCloneUtils<AbstractCouchBaseInputOperator<T>> cloneUtils = KryoCloneUtils.createCloneUtils(this); for (int i = 0; i < numPartitions; i++) { AbstractCouchBaseInputOperator<T> oper = cloneUtils.getClone(); oper.setServerIndex(i); oper.setServerURIString(list.get(i)); logger.debug("oper {} urlstring is {}", i, oper.getServerURIString()); newPartitions.add(new DefaultPartition<>(oper)); } return newPartitions; }
@Override public Collection<Partition<AbstractCouchBaseInputOperator<T>>> definePartitions(Collection<Partition<AbstractCouchBaseInputOperator<T>>> partitions, PartitioningContext incrementalCapacity) { conf = store.getConf(); int numPartitions = conf.getServers().size(); List<String> list = conf.getServers(); Collection<Partition<AbstractCouchBaseInputOperator<T>>> newPartitions = Lists.newArrayListWithExpectedSize(numPartitions); KryoCloneUtils<AbstractCouchBaseInputOperator<T>> cloneUtils = KryoCloneUtils.createCloneUtils(this); for (int i = 0; i < numPartitions; i++) { AbstractCouchBaseInputOperator<T> oper = cloneUtils.getClone(); oper.setServerIndex(i); oper.setServerURIString(list.get(i)); logger.debug("oper {} urlstring is {}", i, oper.getServerURIString()); newPartitions.add(new DefaultPartition<>(oper)); } return newPartitions; }
newPartitions.add(new DefaultPartition<AbstractFileInputOperator<T>>(oper));
DefaultPartition<Operator> firstPartition = new DefaultPartition<>(operator); collection.add(firstPartition);