return Pair.of(dimensions, aggregatorFactories.toArray(new AggregatorFactory[0]));
/** * Split a dot-style columnName into the "main" columnName and the subColumn name after the dot. Useful for * columns that support dot notation. * * @param columnName columnName like "foo" or "foo.bar" * * @return pair of main column name (will not be null) and subColumn name (may be null) */ public static Pair<String, String> splitColumnName(String columnName) { final int i = columnName.indexOf('.'); if (i < 0) { return Pair.of(columnName, null); } else { return Pair.of(columnName.substring(0, i), columnName.substring(i + 1)); } }
@Override public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) { final CallbackAction action; if (filter.apply(Pair.of(server, segment))) { action = callback.segmentAdded(server, segment); } else { action = CallbackAction.CONTINUE; } return action; }
@Override public void onChannelMessage(ChannelPrivMsg aMsg) { try { queue.put(Pair.of(DateTimes.nowUtc(), aMsg)); } catch (InterruptedException e) { throw new RuntimeException("interrupted adding message to queue", e); } } }
@Override public CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) { final CallbackAction action; if (filter.apply(Pair.of(server, segment))) { action = callback.segmentRemoved(server, segment); } else { action = CallbackAction.CONTINUE; } return action; }
public static final Pair<Bucket, byte[]> fromGroupKey(byte[] keyBytes) { ByteBuffer buf = ByteBuffer.wrap(keyBytes); Bucket bucket = new Bucket(buf.getInt(), DateTimes.utc(buf.getLong()), buf.getInt()); byte[] bytesLeft = new byte[buf.remaining()]; buf.get(bytesLeft); return Pair.of(bucket, bytesLeft); }
@Nullable @Override public Pair<DateTime, String> getCreatedDateTimeAndDataSource(String taskId) { giant.lock(); try { final TaskStuff taskStuff = tasks.get(taskId); return taskStuff == null ? null : Pair.of(taskStuff.getCreatedDate(), taskStuff.getDataSource()); } finally { giant.unlock(); } }
void add(SegmentWithState segmentWithState) { final SegmentIdentifier identifier = segmentWithState.getSegmentIdentifier(); final Pair<SegmentWithState, List<SegmentWithState>> pair = intervalToSegments.get(identifier); final List<SegmentWithState> appendFinishedSegments = pair == null || pair.rhs == null ? new ArrayList<>() : pair.rhs; // always keep APPENDING segments for an interval start millis in the front if (segmentWithState.getState() == SegmentState.APPENDING) { if (pair != null && pair.lhs != null) { throw new ISE( "WTF?! there was already an appendingSegment[%s] before adding an appendingSegment[%s]", pair.lhs, segmentWithState ); } intervalToSegments.put(identifier, Pair.of(segmentWithState, appendFinishedSegments)); } else { final SegmentWithState appendingSegment = pair == null ? null : pair.lhs; appendFinishedSegments.add(segmentWithState); intervalToSegments.put(identifier, Pair.of(appendingSegment, appendFinishedSegments)); } }
@Override public Iterator<ServerHolder> pickServersToDrop(DataSegment toDrop, NavigableSet<ServerHolder> serverHolders) { List<ListenableFuture<Pair<Double, ServerHolder>>> futures = Lists.newArrayList(); for (final ServerHolder server : serverHolders) { futures.add( exec.submit( () -> Pair.of(computeCost(toDrop, server, true), server) ) ); } final ListenableFuture<List<Pair<Double, ServerHolder>>> resultsFuture = Futures.allAsList(futures); try { // results is an un-ordered list of a pair consisting of the 'cost' of a segment being on a server and the server List<Pair<Double, ServerHolder>> results = resultsFuture.get(); return results.stream() // Comparator.comapringDouble will order by lowest cost... // reverse it because we want to drop from the highest cost servers first .sorted(Comparator.comparingDouble((Pair<Double, ServerHolder> o) -> o.lhs).reversed()) .map(x -> x.rhs).collect(Collectors.toList()) .iterator(); } catch (Exception e) { log.makeAlert(e, "Cost Balancer Multithread strategy wasn't able to complete cost computation.").emit(); } return Collections.emptyIterator(); }
private static List<Pair<QueryableIndex, DataSegment>> loadSegments( List<TimelineObjectHolder<String, DataSegment>> timelineSegments, Map<DataSegment, File> segmentFileMap, IndexIO indexIO ) throws IOException { final List<Pair<QueryableIndex, DataSegment>> segments = new ArrayList<>(); for (TimelineObjectHolder<String, DataSegment> timelineSegment : timelineSegments) { final PartitionHolder<DataSegment> partitionHolder = timelineSegment.getObject(); for (PartitionChunk<DataSegment> chunk : partitionHolder) { final DataSegment segment = chunk.getObject(); final QueryableIndex queryableIndex = indexIO.loadIndex( Preconditions.checkNotNull(segmentFileMap.get(segment), "File for segment %s", segment.getIdentifier()) ); segments.add(Pair.of(queryableIndex, segment)); } } return segments; }
@Override protected DruidServer addInnerInventory( DruidServer container, String inventoryKey, DataSegment inventory ) { Predicate<Pair<DruidServerMetadata, DataSegment>> predicate = Predicates.or( defaultFilter, Predicates.or(segmentPredicates.values()) ); if (predicate.apply(Pair.of(container.getMetadata(), inventory))) { addSingleInventory(container, inventory); } return container; }
@VisibleForTesting public Pair<Integer, Integer> computeStartEnd(int cardinality) { int startIndex = ignoreFirstN; if (previousStop != null) { if (idLookup == null) { throw new UnsupportedOperationException("Only DimensionSelectors which support idLookup() are supported yet"); } int lookupId = idLookup.lookupId(previousStop) + 1; if (lookupId < 0) { lookupId *= -1; } if (lookupId > ignoreFirstN + keepOnlyN) { startIndex = ignoreFirstN + keepOnlyN; } else { startIndex = Math.max(lookupId, startIndex); } } int endIndex = Math.min(ignoreFirstN + keepOnlyN, cardinality); if (ignoreAfterThreshold && query.getDimensionsFilter() == null && query.getIntervals().stream().anyMatch(interval -> interval.contains(storageAdapter.getInterval()))) { endIndex = Math.min(endIndex, startIndex + query.getThreshold()); } return Pair.of(startIndex, endIndex); } }
private static Pair<Map<DataSegment, File>, List<TimelineObjectHolder<String, DataSegment>>> prepareSegments( TaskToolbox toolbox, SegmentProvider segmentProvider ) throws IOException, SegmentLoadingException { final List<DataSegment> usedSegments = segmentProvider.checkAndGetSegments(toolbox); final Map<DataSegment, File> segmentFileMap = toolbox.fetchSegments(usedSegments); final List<TimelineObjectHolder<String, DataSegment>> timelineSegments = VersionedIntervalTimeline .forSegments(usedSegments) .lookup(segmentProvider.interval); return Pair.of(segmentFileMap, timelineSegments); }
@Override @Nullable public Pair<DateTime, String> getCreatedDateAndDataSource(String entryId) { return connector.retryWithHandle( handle -> handle .createQuery( StringUtils.format( "SELECT created_date, datasource FROM %s WHERE id = :entryId", entryTable ) ) .bind("entryId", entryId) .map( (index, resultSet, ctx) -> Pair.of( DateTimes.of(resultSet.getString("created_date")), resultSet.getString("datasource") ) ) .first() ); }
private List<Pair<Interval, byte[]>> pruneSegmentsWithCachedResults( final byte[] queryCacheKey, final Set<ServerToSegment> segments ) { if (queryCacheKey == null) { return Collections.emptyList(); } final List<Pair<Interval, byte[]>> alreadyCachedResults = Lists.newArrayList(); Map<ServerToSegment, Cache.NamedKey> perSegmentCacheKeys = computePerSegmentCacheKeys(segments, queryCacheKey); // Pull cached segments from cache and remove from set of segments to query final Map<Cache.NamedKey, byte[]> cachedValues = computeCachedValues(perSegmentCacheKeys); perSegmentCacheKeys.forEach((segment, segmentCacheKey) -> { final Interval segmentQueryInterval = segment.getSegmentDescriptor().getInterval(); final byte[] cachedValue = cachedValues.get(segmentCacheKey); if (cachedValue != null) { // remove cached segment from set of segments to query segments.remove(segment); alreadyCachedResults.add(Pair.of(segmentQueryInterval, cachedValue)); } else if (populateCache) { // otherwise, if populating cache, add segment to list of segments to cache final String segmentIdentifier = segment.getServer().getSegment().getIdentifier(); addCachePopulator(segmentCacheKey, segmentIdentifier, segmentQueryInterval); } }); return alreadyCachedResults; }
@Override public void registerListener(TaskRunnerListener listener, Executor executor) { for (Pair<TaskRunnerListener, Executor> pair : listeners) { if (pair.lhs.getListenerId().equals(listener.getListenerId())) { throw new ISE("Listener [%s] already registered", listener.getListenerId()); } } final Pair<TaskRunnerListener, Executor> listenerPair = Pair.of(listener, executor); synchronized (statusLock) { for (Map.Entry<String, RemoteTaskRunnerWorkItem> entry : runningTasks.entrySet()) { TaskRunnerUtils.notifyLocationChanged( ImmutableList.of(listenerPair), entry.getKey(), entry.getValue().getLocation() ); } log.info("Registered listener [%s]", listener.getListenerId()); listeners.add(listenerPair); } }
@Override public void registerListener(TaskRunnerListener listener, Executor executor) { for (Pair<TaskRunnerListener, Executor> pair : listeners) { if (pair.lhs.getListenerId().equals(listener.getListenerId())) { throw new ISE("Listener [%s] already registered", listener.getListenerId()); } } final Pair<TaskRunnerListener, Executor> listenerPair = Pair.of(listener, executor); synchronized (tasks) { for (ForkingTaskRunnerWorkItem item : tasks.values()) { TaskRunnerUtils.notifyLocationChanged(ImmutableList.of(listenerPair), item.getTaskId(), item.getLocation()); } listeners.add(listenerPair); log.info("Registered listener [%s]", listener.getListenerId()); } }
@Override public void registerListener(TaskRunnerListener listener, Executor executor) { for (Pair<TaskRunnerListener, Executor> pair : listeners) { if (pair.lhs.getListenerId().equals(listener.getListenerId())) { throw new ISE("Listener [%s] already registered", listener.getListenerId()); } } final Pair<TaskRunnerListener, Executor> listenerPair = Pair.of(listener, executor); // Location never changes for an existing task, so it's ok to add the listener first and then issue bootstrap // callbacks without any special synchronization. listeners.add(listenerPair); log.info("Registered listener [%s]", listener.getListenerId()); for (ThreadPoolTaskRunnerWorkItem item : runningItems) { TaskRunnerUtils.notifyLocationChanged(ImmutableList.of(listenerPair), item.getTaskId(), item.getLocation()); } }
supervisors.put(id, Pair.of(supervisor, spec)); return true;
private void addSegment(final DataSegment segment) { if (finalPredicate.apply(Pair.of(druidServer.getMetadata(), segment))) { if (druidServer.getSegment(segment.getIdentifier()) == null) { druidServer.addDataSegment(segment); runSegmentCallbacks( new Function<SegmentCallback, CallbackAction>() { @Override public CallbackAction apply(SegmentCallback input) { return input.segmentAdded(druidServer.getMetadata(), segment); } } ); } else { log.warn( "Not adding or running callbacks for existing segment[%s] on server[%s]", segment.getIdentifier(), druidServer.getName() ); } } }