public static Pair<Integer, Integer> getDefaultBatchSizeAndLimit(long maxMemory) { long memoryLimit = maxMemory / 10; long batchSize = 5 * 1024 * 1024; long queueLimit = 50; if (batchSize * queueLimit > memoryLimit) { queueLimit = memoryLimit / batchSize; } // make room for at least two queue items if (queueLimit < 2) { queueLimit = 2; batchSize = memoryLimit / queueLimit; } return new Pair<>((int) batchSize, (int) queueLimit); }
return Pair.of(dimensions, aggregatorFactories.toArray(new AggregatorFactory[0]));
final ByteBuffer conversionDirectBuffer = ByteBuffer.allocateDirect(allocationSize); conversions[i] = conversionDirectBuffer.asIntBuffer(); directBufferAllocations.add(new Pair<>(conversionDirectBuffer, allocationSize)); } else { conversions[i] = IntBuffer.allocate(indexed.size()); pQueue.add(Pair.of(i, iter));
public static void notifyLocationChanged( final Iterable<Pair<TaskRunnerListener, Executor>> listeners, final String taskId, final TaskLocation location ) { log.info("Task [%s] location changed to [%s].", taskId, location); for (final Pair<TaskRunnerListener, Executor> listener : listeners) { try { listener.rhs.execute( new Runnable() { @Override public void run() { listener.lhs.locationChanged(taskId, location); } } ); } catch (Exception e) { log.makeAlert(e, "Unable to notify task listener") .addData("taskId", taskId) .addData("taskLocation", location) .addData("listener", listener.toString()) .emit(); } } }
public static void notifyStatusChanged( final Iterable<Pair<TaskRunnerListener, Executor>> listeners, final String taskId, final TaskStatus status ) { log.info("Task [%s] status changed to [%s].", taskId, status.getStatusCode()); for (final Pair<TaskRunnerListener, Executor> listener : listeners) { try { listener.rhs.execute( new Runnable() { @Override public void run() { listener.lhs.statusChanged(taskId, status); } } ); } catch (Exception e) { log.makeAlert(e, "Unable to notify task listener") .addData("taskId", taskId) .addData("taskStatus", status.getStatusCode()) .addData("listener", listener.toString()) .emit(); } } } }
/** * Split a dot-style columnName into the "main" columnName and the subColumn name after the dot. Useful for * columns that support dot notation. * * @param columnName columnName like "foo" or "foo.bar" * * @return pair of main column name (will not be null) and subColumn name (may be null) */ public static Pair<String, String> splitColumnName(String columnName) { final int i = columnName.indexOf('.'); if (i < 0) { return Pair.of(columnName, null); } else { return Pair.of(columnName.substring(0, i), columnName.substring(i + 1)); } }
public void put(K key, V value, int size) { final int totalSize = size + 48; // add approximate object overhead numBytes += totalSize; super.put(key, new Pair<>(totalSize, value)); }
@Override public void onChannelMessage(ChannelPrivMsg aMsg) { try { queue.put(Pair.of(DateTimes.nowUtc(), aMsg)); } catch (InterruptedException e) { throw new RuntimeException("interrupted adding message to queue", e); } } }
public void addUpdate(String updatedItemName, byte[] updatedItemData) { updateQueue.add( new Pair<>(updatedItemName, updatedItemData) ); }
@Override public CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) { final CallbackAction action; if (filter.apply(Pair.of(server, segment))) { action = callback.segmentRemoved(server, segment); } else { action = CallbackAction.CONTINUE; } return action; }
public static <T> Pair<Queue, Accumulator<Queue, T>> createBySegmentAccumulatorPair() { // In parallel query runner multiple threads add to this queue concurrently Queue init = new ConcurrentLinkedQueue<>(); Accumulator<Queue, T> accumulator = new Accumulator<Queue, T>() { @Override public Queue accumulate(Queue accumulated, T in) { if (in == null) { throw new ISE("Cannot have null result"); } accumulated.offer(in); return accumulated; } }; return new Pair<>(init, accumulator); }
@Override public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) { final CallbackAction action; if (filter.apply(Pair.of(server, segment))) { action = callback.segmentAdded(server, segment); } else { action = CallbackAction.CONTINUE; } return action; }
public static <T1, T2> Pair<T1, T2> of(@Nullable T1 lhs, @Nullable T2 rhs) { return new Pair<>(lhs, rhs); }
public static final Pair<Bucket, byte[]> fromGroupKey(byte[] keyBytes) { ByteBuffer buf = ByteBuffer.wrap(keyBytes); Bucket bucket = new Bucket(buf.getInt(), DateTimes.utc(buf.getLong()), buf.getInt()); byte[] bytesLeft = new byte[buf.remaining()]; buf.get(bytesLeft); return Pair.of(bucket, bytesLeft); }
@Override public boolean registerSegmentHandoffCallback( SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable ) { log.info("Adding SegmentHandoffCallback for dataSource[%s] Segment[%s]", dataSource, descriptor); Pair<Executor, Runnable> prev = handOffCallbacks.putIfAbsent( descriptor, new Pair<>(exec, handOffRunnable) ); return prev == null; }
@Nullable @Override public Pair<DateTime, String> getCreatedDateTimeAndDataSource(String taskId) { giant.lock(); try { final TaskStuff taskStuff = tasks.get(taskId); return taskStuff == null ? null : Pair.of(taskStuff.getCreatedDate(), taskStuff.getDataSource()); } finally { giant.unlock(); } }
@Override public void onSuccess(TreeMap<Integer, Map<Integer, Long>> checkpoints) { if (!checkpoints.isEmpty()) { taskSequences.add(new Pair<>(taskId, checkpoints)); } else { log.warn("Ignoring task [%s], as probably it is not started running yet", taskId); } }
void add(SegmentWithState segmentWithState) { final SegmentIdentifier identifier = segmentWithState.getSegmentIdentifier(); final Pair<SegmentWithState, List<SegmentWithState>> pair = intervalToSegments.get(identifier); final List<SegmentWithState> appendFinishedSegments = pair == null || pair.rhs == null ? new ArrayList<>() : pair.rhs; // always keep APPENDING segments for an interval start millis in the front if (segmentWithState.getState() == SegmentState.APPENDING) { if (pair != null && pair.lhs != null) { throw new ISE( "WTF?! there was already an appendingSegment[%s] before adding an appendingSegment[%s]", pair.lhs, segmentWithState ); } intervalToSegments.put(identifier, Pair.of(segmentWithState, appendFinishedSegments)); } else { final SegmentWithState appendingSegment = pair == null ? null : pair.lhs; appendFinishedSegments.add(segmentWithState); intervalToSegments.put(identifier, Pair.of(appendingSegment, appendFinishedSegments)); } }
private List<Pair<Long, Long>> makeIntervalLongs() { List<Pair<Long, Long>> intervalLongs = new ArrayList<>(); for (Interval interval : intervals) { intervalLongs.add(new Pair<Long, Long>(interval.getStartMillis(), interval.getEndMillis())); } return intervalLongs; }
@Override public Iterator<ServerHolder> pickServersToDrop(DataSegment toDrop, NavigableSet<ServerHolder> serverHolders) { List<ListenableFuture<Pair<Double, ServerHolder>>> futures = Lists.newArrayList(); for (final ServerHolder server : serverHolders) { futures.add( exec.submit( () -> Pair.of(computeCost(toDrop, server, true), server) ) ); } final ListenableFuture<List<Pair<Double, ServerHolder>>> resultsFuture = Futures.allAsList(futures); try { // results is an un-ordered list of a pair consisting of the 'cost' of a segment being on a server and the server List<Pair<Double, ServerHolder>> results = resultsFuture.get(); return results.stream() // Comparator.comapringDouble will order by lowest cost... // reverse it because we want to drop from the highest cost servers first .sorted(Comparator.comparingDouble((Pair<Double, ServerHolder> o) -> o.lhs).reversed()) .map(x -> x.rhs).collect(Collectors.toList()) .iterator(); } catch (Exception e) { log.makeAlert(e, "Cost Balancer Multithread strategy wasn't able to complete cost computation.").emit(); } return Collections.emptyIterator(); }