static Set<String> toStringSet(ResultSet resultSet) throws SQLException { ImmutableSet.Builder<String> builder = ImmutableSet.builder(); final List<Ord<String>> columns = columnLabels(resultSet); while (resultSet.next()) { StringBuilder buf = new StringBuilder(); for (Ord<String> column : columns) { buf.append(column.i == 1 ? "" : "; ").append(column.e).append("=").append(resultSet.getObject(column.i)); } builder.add(buf.toString()); buf.setLength(0); } return builder.build(); }
@Override public Set<StoragePluginOptimizerRule> getPhysicalOptimizerRules(OptimizerRulesContext optimizerRulesContext) { ImmutableSet.Builder<StoragePluginOptimizerRule> ruleBuilder = ImmutableSet.builder(); OptionManager options = optimizerRulesContext.getPlannerSettings().getOptions(); // TODO: Remove implicit using of convert_fromTIMESTAMP_IMPALA function // once "store.parquet.reader.int96_as_timestamp" will be true by default if (options.getBoolean(ExecConstants.HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS) || options.getBoolean(ExecConstants.HIVE_OPTIMIZE_PARQUET_SCAN_WITH_NATIVE_READER)) { ruleBuilder.add(ConvertHiveParquetScanToDrillParquetScan.INSTANCE); } if (options.getBoolean(ExecConstants.HIVE_OPTIMIZE_MAPRDB_JSON_SCAN_WITH_NATIVE_READER)) { try { Class<?> hiveToDrillMapRDBJsonRuleClass = Class.forName("org.apache.drill.exec.planner.sql.logical.ConvertHiveMapRDBJsonScanToDrillMapRDBJsonScan"); ruleBuilder.add((StoragePluginOptimizerRule) hiveToDrillMapRDBJsonRuleClass.getField("INSTANCE").get(null)); } catch (ReflectiveOperationException e) { logger.warn("Current Drill build is not designed for working with Hive MapR-DB tables. " + "Please disable {} option", ExecConstants.HIVE_OPTIMIZE_MAPRDB_JSON_SCAN_WITH_NATIVE_READER); } } return ruleBuilder.build(); }
.addAll(scannedFieldsSet).build();
/** * Get an immutable list of partition pruning rules that will be used in logical planning. */ static RuleSet getPruneScanRules(OptimizerRulesContext optimizerRulesContext) { final ImmutableSet<RelOptRule> pruneRules = ImmutableSet.<RelOptRule>builder() .addAll(getItemStarRules()) .add( PruneScanRule.getDirFilterOnProject(optimizerRulesContext), PruneScanRule.getDirFilterOnScan(optimizerRulesContext), ParquetPruneScanRule.getFilterOnProjectParquet(optimizerRulesContext), ParquetPruneScanRule.getFilterOnScanParquet(optimizerRulesContext), // Include LIMIT_ON_PROJECT since LIMIT_ON_SCAN may not work without it DrillPushLimitToScanRule.LIMIT_ON_PROJECT, DrillPushLimitToScanRule.LIMIT_ON_SCAN ) .build(); return RuleSets.ofList(pruneRules); } /**
/** * Get an immutable list of directory-based partition pruning rules that will be used in Calcite logical planning. * * @param optimizerRulesContext rules context * @return directory-based partition pruning rules */ static RuleSet getDirPruneScanRules(OptimizerRulesContext optimizerRulesContext) { final Set<RelOptRule> pruneRules = ImmutableSet.<RelOptRule>builder() .addAll(getItemStarRules()) .add( PruneScanRule.getDirFilterOnProject(optimizerRulesContext), PruneScanRule.getDirFilterOnScan(optimizerRulesContext) ) .build(); return RuleSets.ofList(pruneRules); }
/** * Returns all top level classes whose package name is {@code packageName} or starts with * {@code packageName} followed by a '.'. */ public ImmutableSet<ClassInfo> getTopLevelClassesRecursive(String packageName) { checkNotNull(packageName); String packagePrefix = packageName + '.'; ImmutableSet.Builder<ClassInfo> builder = ImmutableSet.builder(); for (ClassInfo classInfo : getTopLevelClasses()) { if (classInfo.getName().startsWith(packagePrefix)) { builder.add(classInfo); } } return builder.build(); }
public static final Set<SQLConvertSupport> toSQLConvertSupport(Iterable<ConvertSupport> convertSupportIterable) { ImmutableSet.Builder<SQLConvertSupport> sqlConvertSupportSet = ImmutableSet.builder(); for(ConvertSupport convertSupport: convertSupportIterable) { try { sqlConvertSupportSet.add(new SQLConvertSupport( toSQLType(convertSupport.getFrom()), toSQLType(convertSupport.getTo()))); } catch(IllegalArgumentException e) { // Ignore unknown types... } } return sqlConvertSupportSet.build(); }
static RuleSet mergedRuleSets(RuleSet... ruleSets) { final Builder<RelOptRule> relOptRuleSetBuilder = ImmutableSet.builder(); for (final RuleSet ruleSet : ruleSets) { for (final RelOptRule relOptRule : ruleSet) { relOptRuleSetBuilder.add(relOptRule); } } return RuleSets.ofList(relOptRuleSetBuilder.build()); }
/** * RuleSet for join permutation, used only in VolcanoPlanner. * @param optimizerRulesContext shared state used during planning * @return set of planning rules */ static RuleSet getJoinPermRules(OptimizerRulesContext optimizerRulesContext) { return RuleSets.ofList(ImmutableSet.<RelOptRule> builder().add( RuleInstance.JOIN_PUSH_THROUGH_JOIN_RULE_RIGHT, RuleInstance.JOIN_PUSH_THROUGH_JOIN_RULE_LEFT ).build()); }
/** * Get an immutable list of rules to transpose SetOp(Union) operator with other operators.<p> * Note: Used by Hep planner only (failed for Volcano planner - CALCITE-1271) * * @return SetOp(Union) transpose rules */ private static RuleSet getSetOpTransposeRules() { return RuleSets.ofList(ImmutableSet.<RelOptRule> builder() .add( RuleInstance.FILTER_SET_OP_TRANSPOSE_RULE, RuleInstance.PROJECT_SET_OP_TRANSPOSE_RULE ).build()); }
ImmutableSet<ResourceInfo> getResources() { ImmutableSet.Builder<ResourceInfo> builder = ImmutableSet.builder(); for (Map.Entry<ClassLoader, String> entry : resources.entries()) { builder.add(ResourceInfo.of(entry.getValue(), entry.getKey())); } return builder.build(); }
@Override public Set<StoragePluginOptimizerRule> getPhysicalOptimizerRules(OptimizerRulesContext optimizerRulesContext) { Builder<StoragePluginOptimizerRule> setBuilder = ImmutableSet.builder(); for (FormatPlugin plugin : formatCreator.getConfiguredFormatPlugins()) { Set<StoragePluginOptimizerRule> rules = plugin.getOptimizerRules(); if (rules != null && rules.size() > 0) { setBuilder.addAll(rules); } } return setBuilder.build(); }
@Override public ImmutableSet<E> immutableCopy() { return new ImmutableSet.Builder<E>().addAll(set1).addAll(set2).build(); } };
/** Returns all top level classes whose package name is {@code packageName}. */ public ImmutableSet<ClassInfo> getTopLevelClasses(String packageName) { checkNotNull(packageName); ImmutableSet.Builder<ClassInfo> builder = ImmutableSet.builder(); for (ClassInfo classInfo : getTopLevelClasses()) { if (classInfo.getPackageName().equals(packageName)) { builder.add(classInfo); } } return builder.build(); }
private static RuleSet getStorageRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins, PlannerPhase phase) { final Builder<RelOptRule> rules = ImmutableSet.builder(); for(StoragePlugin plugin : plugins){ if(plugin instanceof AbstractStoragePlugin){ rules.addAll(((AbstractStoragePlugin) plugin).getOptimizerRules(context, phase)); }else{ rules.addAll(plugin.getOptimizerRules(context)); } } return RuleSets.ofList(rules.build()); }
/** * @return collection of rules to re-write item star operator for filter push down and partition pruning */ private static ImmutableSet<RelOptRule> getItemStarRules() { return ImmutableSet.<RelOptRule>builder() .add( DrillFilterItemStarReWriterRule.PROJECT_ON_SCAN, DrillFilterItemStarReWriterRule.FILTER_ON_SCAN, DrillFilterItemStarReWriterRule.FILTER_PROJECT_SCAN ).build(); }
public DrillJdbcConvention(SqlDialect dialect, String name) { super(dialect, ConstantUntypedNull.INSTANCE, name); // build rules for this convention. ImmutableSet.Builder<RelOptRule> builder = ImmutableSet.builder(); builder.add(JDBC_PRULE_INSTANCE); builder.add(new JdbcDrelConverterRule(this)); builder.add(new DrillJdbcProjectRule(this)); builder.add(new DrillJdbcFilterRule(this)); outside: for (RelOptRule rule : JdbcRules.rules(this)) { final String description = rule.toString(); // we want to black list some rules but the parent Calcite package is all or none. // Therefore, we remove rules with names we don't like. for(String black : RULES_TO_AVOID){ if(description.equals(black)){ continue outside; } } builder.add(rule); } builder.add(RuleInstance.FILTER_SET_OP_TRANSPOSE_RULE); builder.add(RuleInstance.PROJECT_REMOVE_RULE); rules = builder.build(); }
public MapRDBIndexDescriptor(List<LogicalExpression> indexCols, CollationContext indexCollationContext, List<LogicalExpression> nonIndexCols, List<LogicalExpression> rowKeyColumns, String indexName, String tableName, IndexType type, Object desc, DbGroupScan scan, NullDirection nullsDirection) { super(indexCols, indexCollationContext, nonIndexCols, rowKeyColumns, indexName, tableName, type, nullsDirection); this.desc = desc; this.indexedFields = ImmutableSet.copyOf(indexColumns); this.allFields = new ImmutableSet.Builder<LogicalExpression>() .add(PluginConstants.DOCUMENT_SCHEMA_PATH) .addAll(indexColumns) .addAll(nonIndexColumns) .build(); this.pluginCost = scan.getPluginCostModel(); }
public static final Set<SQLConvertSupport> toSQLConvertSupport(Iterable<ConvertSupport> convertSupportIterable) { ImmutableSet.Builder<SQLConvertSupport> sqlConvertSupportSet = ImmutableSet.builder(); for(ConvertSupport convertSupport: convertSupportIterable) { try { sqlConvertSupportSet.add(new SQLConvertSupport( toSQLType(convertSupport.getFrom()), toSQLType(convertSupport.getTo()))); } catch(IllegalArgumentException e) { // Ignore unknown types... } } return sqlConvertSupportSet.build(); }
@Override public Set<StoragePluginOptimizerRule> getLogicalOptimizerRules(OptimizerRulesContext optimizerContext) { final String defaultPartitionValue = hiveConf.get(ConfVars.DEFAULTPARTITIONNAME.varname); ImmutableSet.Builder<StoragePluginOptimizerRule> ruleBuilder = ImmutableSet.builder(); ruleBuilder.add(HivePushPartitionFilterIntoScan.getFilterOnProject(optimizerContext, defaultPartitionValue)); ruleBuilder.add(HivePushPartitionFilterIntoScan.getFilterOnScan(optimizerContext, defaultPartitionValue)); return ruleBuilder.build(); }