static Set<String> toStringSet(ResultSet resultSet) throws SQLException { ImmutableSet.Builder<String> builder = ImmutableSet.builder(); final List<Ord<String>> columns = columnLabels(resultSet); while (resultSet.next()) { StringBuilder buf = new StringBuilder(); for (Ord<String> column : columns) { buf.append(column.i == 1 ? "" : "; ").append(column.e).append("=").append(resultSet.getObject(column.i)); } builder.add(buf.toString()); buf.setLength(0); } return builder.build(); }
public static boolean isCompareFunction(String functionName) { return COMPARE_FUNCTIONS_TRANSPOSE_MAP.keySet().contains(functionName); }
@Override public Set<StoragePluginOptimizerRule> getPhysicalOptimizerRules(OptimizerRulesContext optimizerRulesContext) { return ImmutableSet.of(HBasePushFilterIntoScan.FILTER_ON_SCAN, HBasePushFilterIntoScan.FILTER_ON_PROJECT); }
static <E> Set<List<E>> create(List<? extends Set<? extends E>> sets) { ImmutableList.Builder<ImmutableSet<E>> axesBuilder = new ImmutableList.Builder<ImmutableSet<E>>(sets.size()); for (Set<? extends E> set : sets) { ImmutableSet<E> copy = ImmutableSet.copyOf(set); if (copy.isEmpty()) { return ImmutableSet.of(); } axesBuilder.add(copy); } final ImmutableList<ImmutableSet<E>> axes = axesBuilder.build(); ImmutableList<List<E>> listAxes = new ImmutableList<List<E>>() { @Override public int size() { return axes.size(); } @Override public List<E> get(int index) { return axes.get(index).asList(); } @Override boolean isPartialView() { return true; } }; return new CartesianSet<E>(axes, new CartesianList<E>(listAxes)); }
public MapRDBIndexDescriptor(List<LogicalExpression> indexCols, CollationContext indexCollationContext, List<LogicalExpression> nonIndexCols, List<LogicalExpression> rowKeyColumns, String indexName, String tableName, IndexType type, Object desc, DbGroupScan scan, NullDirection nullsDirection) { super(indexCols, indexCollationContext, nonIndexCols, rowKeyColumns, indexName, tableName, type, nullsDirection); this.desc = desc; this.indexedFields = ImmutableSet.copyOf(indexColumns); this.allFields = new ImmutableSet.Builder<LogicalExpression>() .add(PluginConstants.DOCUMENT_SCHEMA_PATH) .addAll(indexColumns) .addAll(nonIndexColumns) .build(); this.pluginCost = scan.getPluginCostModel(); }
@Override public UnmodifiableIterator<V> iterator() { return new UnmodifiableIterator<V>() { final UnmodifiableIterator<Entry<K, V>> entryItr = map.entrySet().iterator(); @Override public boolean hasNext() { return entryItr.hasNext(); } @Override public V next() { return entryItr.next().getValue(); } }; }
@Override public ParallelizationInfo getReceiverParallelizationInfo(List<DrillbitEndpoint> senderFragmentEndpoints) { Preconditions.checkArgument(senderFragmentEndpoints != null && senderFragmentEndpoints.size() > 0, "Sender fragment endpoint list should not be empty"); // We want to run one mux receiver per Drillbit endpoint. // Identify the number of unique Drillbit endpoints in sender fragment endpoints. List<DrillbitEndpoint> drillbitEndpoints = ImmutableSet.copyOf(senderFragmentEndpoints).asList(); List<EndpointAffinity> affinities = Lists.newArrayList(); for(DrillbitEndpoint ep : drillbitEndpoints) { affinities.add(new EndpointAffinity(ep, Double.POSITIVE_INFINITY)); } return ParallelizationInfo.create(affinities.size(), affinities.size(), affinities); }
@Override public List<E> get(int index) { return axes.get(index).asList(); }
@Override public String[] getMechanismNames(final Map<String, ?> props) { return serverFactories.keySet().toArray(new String[0]); } }
@Override public Set<StoragePluginOptimizerRule> getPhysicalOptimizerRules(OptimizerRulesContext optimizerRulesContext) { return ImmutableSet.of(KafkaPushDownFilterIntoScan.INSTANCE); }
/** * Returns an immutable copy of the current contents of this set view. * Does not support null elements. * * <p><b>Warning:</b> this may have unexpected results if a backing set of * this view uses a nonstandard notion of equivalence, for example if it is * a {@link TreeSet} using a comparator that is inconsistent with {@link * Object#equals(Object)}. */ public ImmutableSet<E> immutableCopy() { return ImmutableSet.copyOf(this); }
UnmodifiableIterator<K> keyIterator() { final UnmodifiableIterator<Entry<K, V>> entryIterator = entrySet().iterator(); return new UnmodifiableIterator<K>() { @Override public boolean hasNext() { return entryIterator.hasNext(); } @Override public K next() { return entryIterator.next().getKey(); } }; }
@Override public ParallelizationInfo getSenderParallelizationInfo(List<DrillbitEndpoint> receiverFragmentEndpoints) { Preconditions.checkArgument(receiverFragmentEndpoints != null && receiverFragmentEndpoints.size() > 0, "Receiver fragment endpoint list should not be empty"); // We want to run one demux sender per Drillbit endpoint. // Identify the number of unique Drillbit endpoints in receiver fragment endpoints. List<DrillbitEndpoint> drillbitEndpoints = ImmutableSet.copyOf(receiverFragmentEndpoints).asList(); List<EndpointAffinity> affinities = Lists.newArrayList(); for(DrillbitEndpoint ep : drillbitEndpoints) { affinities.add(new EndpointAffinity(ep, Double.POSITIVE_INFINITY)); } return ParallelizationInfo.create(affinities.size(), affinities.size(), affinities); }
@Override public ImmutableList<V> asList() { final ImmutableList<Entry<K, V>> entryList = map.entrySet().asList(); return new ImmutableAsList<V>() { @Override public V get(int index) { return entryList.get(index).getValue(); } @Override ImmutableCollection<V> delegateCollection() { return ImmutableMapValues.this; } }; }
@Override Object writeReplace() { return new SerializedForm(toArray()); }
@Override public Set<StoragePluginOptimizerRule> getOptimizerRules() { return ImmutableSet.of(); }
@Override public Set<StoragePluginOptimizerRule> getPhysicalOptimizerRules(OptimizerRulesContext optimizerRulesContext) { ImmutableSet.Builder<StoragePluginOptimizerRule> ruleBuilder = ImmutableSet.builder(); OptionManager options = optimizerRulesContext.getPlannerSettings().getOptions(); // TODO: Remove implicit using of convert_fromTIMESTAMP_IMPALA function // once "store.parquet.reader.int96_as_timestamp" will be true by default if (options.getBoolean(ExecConstants.HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS) || options.getBoolean(ExecConstants.HIVE_OPTIMIZE_PARQUET_SCAN_WITH_NATIVE_READER)) { ruleBuilder.add(ConvertHiveParquetScanToDrillParquetScan.INSTANCE); } if (options.getBoolean(ExecConstants.HIVE_OPTIMIZE_MAPRDB_JSON_SCAN_WITH_NATIVE_READER)) { try { Class<?> hiveToDrillMapRDBJsonRuleClass = Class.forName("org.apache.drill.exec.planner.sql.logical.ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan"); ruleBuilder.add((StoragePluginOptimizerRule) hiveToDrillMapRDBJsonRuleClass.getField("INSTANCE").get(null)); } catch (ReflectiveOperationException e) { logger.warn("Current Drill build is not designed for working with Hive MapR-DB tables. " + "Please disable {} option", ExecConstants.HIVE_OPTIMIZE_MAPRDB_JSON_SCAN_WITH_NATIVE_READER); } } return ruleBuilder.build(); }
public static boolean isCompareFunction(String functionName) { return COMPARE_FUNCTIONS_TRANSPOSE_MAP.keySet().contains(functionName); }