public static Pair<Integer, Integer> getDefaultBatchSizeAndLimit(long maxMemory) { long memoryLimit = maxMemory / 10; long batchSize = 5 * 1024 * 1024; long queueLimit = 50; if (batchSize * queueLimit > memoryLimit) { queueLimit = memoryLimit / batchSize; } // make room for at least two queue items if (queueLimit < 2) { queueLimit = 2; batchSize = memoryLimit / queueLimit; } return new Pair<>((int) batchSize, (int) queueLimit); }
public void put(K key, V value, int size) { final int totalSize = size + 48; // add approximate object overhead numBytes += totalSize; super.put(key, new Pair<>(totalSize, value)); }
public void addUpdate(String updatedItemName, byte[] updatedItemData) { updateQueue.add( new Pair<>(updatedItemName, updatedItemData) ); }
public static <T> Pair<Queue, Accumulator<Queue, T>> createBySegmentAccumulatorPair() { // In parallel query runner multiple threads add to this queue concurrently Queue init = new ConcurrentLinkedQueue<>(); Accumulator<Queue, T> accumulator = new Accumulator<Queue, T>() { @Override public Queue accumulate(Queue accumulated, T in) { if (in == null) { throw new ISE("Cannot have null result"); } accumulated.offer(in); return accumulated; } }; return new Pair<>(init, accumulator); }
public static <T1, T2> Pair<T1, T2> of(@Nullable T1 lhs, @Nullable T2 rhs) { return new Pair<>(lhs, rhs); }
@Override public void onSuccess(TreeMap<Integer, Map<Integer, Long>> checkpoints) { if (!checkpoints.isEmpty()) { taskSequences.add(new Pair<>(taskId, checkpoints)); } else { log.warn("Ignoring task [%s], as probably it is not started running yet", taskId); } }
@Override public boolean registerSegmentHandoffCallback( SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable ) { log.info("Adding SegmentHandoffCallback for dataSource[%s] Segment[%s]", dataSource, descriptor); Pair<Executor, Runnable> prev = handOffCallbacks.putIfAbsent( descriptor, new Pair<>(exec, handOffRunnable) ); return prev == null; }
public Pair<String, Server> getDefaultLookup() { final String brokerServiceName = tierConfig.getDefaultBrokerServiceName(); return new Pair<>(brokerServiceName, servers.get(brokerServiceName).pick()); }
private List<Pair<Long, Long>> makeIntervalLongs() { List<Pair<Long, Long>> intervalLongs = new ArrayList<>(); for (Interval interval : intervals) { intervalLongs.add(new Pair<Long, Long>(interval.getStartMillis(), interval.getEndMillis())); } return intervalLongs; }
public Pair<Segment, Closeable> getAndIncrementSegment() { ReferenceCountingSegment segment = getIncrementedSegment(); return new Pair<>(segment, segment.decrementOnceCloseable()); }
private Pair<DataSegment, Set<String>> getSegment(String segmentId) { DataSegment theSegment = null; Set<String> servers = Sets.newHashSet(); for (DruidServer druidServer : serverInventoryView.getInventory()) { DataSegment currSegment = druidServer.getSegments().get(segmentId); if (currSegment != null) { theSegment = currSegment; servers.add(druidServer.getHost()); } } if (theSegment == null) { return null; } return new Pair<>(theSegment, servers); }
public static Pair<List<AggregatorFactory>, List<PostAggregator>> condensedAggregators( List<AggregatorFactory> aggList, List<PostAggregator> postAggList, String metric ) { List<PostAggregator> condensedPostAggs = AggregatorUtil.pruneDependentPostAgg( postAggList, metric ); // calculate dependent aggregators for these postAgg Set<String> dependencySet = new HashSet<>(); dependencySet.add(metric); for (PostAggregator postAggregator : condensedPostAggs) { dependencySet.addAll(postAggregator.getDependentFields()); } List<AggregatorFactory> condensedAggs = Lists.newArrayList(); for (AggregatorFactory aggregatorSpec : aggList) { if (dependencySet.contains(aggregatorSpec.getName())) { condensedAggs.add(aggregatorSpec); } } return new Pair(condensedAggs, condensedPostAggs); }
public synchronized Pair<Integer, Integer> findTwoConsecutiveUnusedPorts() { int firstPort = chooseNext(startPort); while (!canBind(firstPort) || !canBind(firstPort + 1)) { firstPort = chooseNext(firstPort + 1); } usedPorts.add(firstPort); usedPorts.add(firstPort + 1); return new Pair<>(firstPort, firstPort + 1); }
private static Pair<List<DimensionSpec>, List<DimensionSpec>> partitionDimensionList( StorageAdapter adapter, List<DimensionSpec> dimensions ) { final List<DimensionSpec> bitmapDims = Lists.newArrayList(); final List<DimensionSpec> nonBitmapDims = Lists.newArrayList(); final List<DimensionSpec> dimsToSearch = getDimsToSearch( adapter.getAvailableDimensions(), dimensions ); for (DimensionSpec spec : dimsToSearch) { ColumnCapabilities capabilities = adapter.getColumnCapabilities(spec.getDimension()); if (capabilities == null) { continue; } if (capabilities.hasBitmapIndexes()) { bitmapDims.add(spec); } else { nonBitmapDims.add(spec); } } return new Pair<List<DimensionSpec>, List<DimensionSpec>>( ImmutableList.copyOf(bitmapDims), ImmutableList.copyOf(nonBitmapDims) ); }
final ByteBuffer conversionDirectBuffer = ByteBuffer.allocateDirect(allocationSize); conversions[i] = conversionDirectBuffer.asIntBuffer(); directBufferAllocations.add(new Pair<>(conversionDirectBuffer, allocationSize)); } else { conversions[i] = IntBuffer.allocate(indexed.size());
return new Pair<>(grouper.iterator(), future);
return new Pair<>(brokerServiceName, nodesHolder.pick());
private static Pair<Integer, Integer> getStartEndIndexes( final BoundDimFilter boundDimFilter, final BitmapIndex bitmapIndex ) { final int startIndex; // inclusive int endIndex; // exclusive if (!boundDimFilter.hasLowerBound()) { startIndex = 0; } else { final int found = bitmapIndex.getIndex(boundDimFilter.getLower()); if (found >= 0) { startIndex = boundDimFilter.isLowerStrict() ? found + 1 : found; } else { startIndex = -(found + 1); } } if (!boundDimFilter.hasUpperBound()) { endIndex = bitmapIndex.getCardinality(); } else { final int found = bitmapIndex.getIndex(boundDimFilter.getUpper()); if (found >= 0) { endIndex = boundDimFilter.isUpperStrict() ? found : found + 1; } else { endIndex = -(found + 1); } } endIndex = startIndex > endIndex ? startIndex : endIndex; return new Pair<>(startIndex, endIndex); }
return new Pair<>(index, accumulator);
return new Pair<>(grouper, accumulator);