public Builder add(String columnName, ValueType columnType) { Preconditions.checkNotNull(columnName, "columnName"); Preconditions.checkNotNull(columnType, "columnType"); columnTypeList.add(Pair.of(columnName, columnType)); return this; }
/** * Split a dot-style columnName into the "main" columnName and the subColumn name after the dot. Useful for * columns that support dot notation. * * @param columnName columnName like "foo" or "foo.bar" * * @return pair of main column name (will not be null) and subColumn name (may be null) */ public static Pair<String, String> splitColumnName(String columnName) { final int i = columnName.indexOf('.'); if (i < 0) { return Pair.of(columnName, null); } else { return Pair.of(columnName.substring(0, i), columnName.substring(i + 1)); } }
@Override public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) { final CallbackAction action; if (filter.apply(Pair.of(server, segment))) { action = callback.segmentAdded(server, segment); } else { action = CallbackAction.CONTINUE; } return action; }
@Override public CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) { final CallbackAction action; if (filter.apply(Pair.of(server, segment))) { action = callback.segmentRemoved(server, segment); } else { action = CallbackAction.CONTINUE; } return action; }
@Override public void onChannelMessage(ChannelPrivMsg aMsg) { try { queue.put(Pair.of(DateTimes.nowUtc(), aMsg)); } catch (InterruptedException e) { throw new RuntimeException("interrupted adding message to queue", e); } } }
public static final Pair<Bucket, byte[]> fromGroupKey(byte[] keyBytes) { ByteBuffer buf = ByteBuffer.wrap(keyBytes); Bucket bucket = new Bucket(buf.getInt(), DateTimes.utc(buf.getLong()), buf.getInt()); byte[] bytesLeft = new byte[buf.remaining()]; buf.get(bytesLeft); return Pair.of(bucket, bytesLeft); }
@Override protected DruidServer addInnerInventory(DruidServer container, String inventoryKey, DataSegment inventory) { Predicate<Pair<DruidServerMetadata, DataSegment>> predicate = Predicates.or( defaultFilter, Predicates.or(segmentPredicates.values()) ); if (predicate.apply(Pair.of(container.getMetadata(), inventory))) { addSingleInventory(container, inventory); } return container; }
private List<Pair<Boolean, Map<String, String>>> makeListOfPairs(Object... arguments) { Preconditions.checkState(arguments.length % 2 == 0); final ArrayList<Pair<Boolean, Map<String, String>>> retVal = new ArrayList<>(); for (int i = 0; i < arguments.length; i += 2) { retVal.add(Pair.of((Boolean) arguments[i], makeMap((String) arguments[i + 1]))); } return retVal; }
private <T> Pair<QueryInterruptedException, T> doPost( final SqlQuery query, final TypeReference<T> typeReference ) throws Exception { final Pair<QueryInterruptedException, String> pair = doPostRaw(query); if (pair.rhs == null) { //noinspection unchecked return (Pair<QueryInterruptedException, T>) pair; } else { return Pair.of(pair.lhs, JSON_MAPPER.readValue(pair.rhs, typeReference)); } }
private static Pair<Map<DataSegment, File>, List<TimelineObjectHolder<String, DataSegment>>> prepareSegments( TaskToolbox toolbox, SegmentProvider segmentProvider ) throws IOException, SegmentLoadingException { final List<DataSegment> usedSegments = segmentProvider.checkAndGetSegments(toolbox); final Map<DataSegment, File> segmentFileMap = toolbox.fetchSegments(usedSegments); final List<TimelineObjectHolder<String, DataSegment>> timelineSegments = VersionedIntervalTimeline .forSegments(usedSegments) .lookup(segmentProvider.interval); return Pair.of(segmentFileMap, timelineSegments); }
@Override public void registerListener(TaskRunnerListener listener, Executor executor) { for (Pair<TaskRunnerListener, Executor> pair : listeners) { if (pair.lhs.getListenerId().equals(listener.getListenerId())) { throw new ISE("Listener [%s] already registered", listener.getListenerId()); } } final Pair<TaskRunnerListener, Executor> listenerPair = Pair.of(listener, executor); synchronized (tasks) { for (ForkingTaskRunnerWorkItem item : tasks.values()) { TaskRunnerUtils.notifyLocationChanged(ImmutableList.of(listenerPair), item.getTaskId(), item.getLocation()); } listeners.add(listenerPair); log.info("Registered listener [%s]", listener.getListenerId()); } }
@Test public void testDatabaseMetaDataCatalogs() throws Exception { final DatabaseMetaData metaData = client.getMetaData(); Assert.assertEquals( ImmutableList.of( ROW(Pair.of("TABLE_CAT", "druid")) ), getRows(metaData.getCatalogs()) ); }
@Override public void registerListener(TaskRunnerListener listener, Executor executor) { for (Pair<TaskRunnerListener, Executor> pair : listeners) { if (pair.lhs.getListenerId().equals(listener.getListenerId())) { throw new ISE("Listener [%s] already registered", listener.getListenerId()); } } final Pair<TaskRunnerListener, Executor> listenerPair = Pair.of(listener, executor); // Location never changes for an existing task, so it's ok to add the listener first and then issue bootstrap // callbacks without any special synchronization. listeners.add(listenerPair); log.info("Registered listener [%s]", listener.getListenerId()); for (TestTaskRunnerWorkItem item : runningItems) { TaskRunnerUtils.notifyLocationChanged(ImmutableList.of(listenerPair), item.getTaskId(), item.getLocation()); } }