private AccumuloConfiguration createSampleConfig(AccumuloConfiguration siteConf) { ConfigurationCopy confCopy = new ConfigurationCopy(Iterables.filter(siteConf, input -> !input.getKey().startsWith(Property.TABLE_SAMPLER.getKey()))); for (Entry<String,String> entry : samplerRef.get().getFirst().toTablePropertiesMap() .entrySet()) { confCopy.set(entry.getKey(), entry.getValue()); } siteConf = confCopy; return siteConf; }
public static void checkLocalityGroups(Iterable<Entry<String,String>> config) throws LocalityGroupConfigurationError { ConfigurationCopy cc = new ConfigurationCopy(config); if (cc.get(Property.TABLE_LOCALITY_GROUPS) != null) { getLocalityGroups(cc); } }
@Override public boolean shouldCompact(Entry<FileRef,DataFileValue> file, MajorCompactionRequest request) { if (!gatherCalled) { SamplerConfigurationImpl sc = SamplerConfigurationImpl .newSamplerConfig(new ConfigurationCopy(request.getTableProperties())); return sc != null; } if (!samplingConfigured) { return false; } return !filesWithSample.contains(file.getKey()); } }
Configuration hadoopConf = config.getHadoopConfiguration(); ConfigurationCopy cc = new ConfigurationCopy(acuConf); VolumeManager fs; try { cc.get(Property.INSTANCE_ZK_HOST), (int) cc.getTimeInMillis(Property.INSTANCE_ZK_TIMEOUT), cc.get(Property.INSTANCE_SECRET));
private ConfigurationCopy updateConfigurationForLocalityGroups(ConfigurationCopy configuration) { Map<String,Set<ByteSequence>> locGroups = getLocalityGroups(); StringBuilder enabledLGs = new StringBuilder(); for (Entry<String,Set<ByteSequence>> entry : locGroups.entrySet()) { if (enabledLGs.length() > 0) { enabledLGs.append(","); } StringBuilder value = new StringBuilder(); for (ByteSequence bytes : entry.getValue()) { if (value.length() > 0) { value.append(","); } value.append(new String(bytes.toArray())); } configuration.set("table.group." + entry.getKey(), value.toString()); enabledLGs.append(entry.getKey()); } configuration.set(Property.TABLE_LOCALITY_GROUPS, enabledLGs.toString()); return configuration; }
protected int getSeqFileBlockSize() { if (!tableConfigs.isEmpty()) { return (int) tableConfigs.values().iterator().next().getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE); } else { return 0; } }
@Override public Map<String,Set<Text>> getLocalityGroups(String tableName) throws AccumuloException, TableNotFoundException { AccumuloConfiguration conf = new ConfigurationCopy(this.getProperties(tableName)); Map<String,Set<ByteSequence>> groups = LocalityGroupUtil.getLocalityGroups(conf); Map<String,Set<Text>> groups2 = new HashMap<>(); for (Entry<String,Set<ByteSequence>> entry : groups.entrySet()) { HashSet<Text> colFams = new HashSet<>(); for (ByteSequence bs : entry.getValue()) { colFams.add(new Text(bs.toArray())); } groups2.put(entry.getKey(), colFams); } return groups2; }
/** * This helper method provides an AccumuloConfiguration object constructed from the Accumulo * defaults, and overridden with Accumulo properties that have been stored in the Job's * configuration. * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf * the Hadoop configuration object to configure * @since 1.6.0 */ public static AccumuloConfiguration getAccumuloConfiguration(Class<?> implementingClass, Configuration conf) { String prefix = enumToConfKey(implementingClass, Opts.ACCUMULO_PROPERTIES) + "."; ConfigurationCopy acuConf = new ConfigurationCopy(DefaultConfiguration.getInstance()); for (Entry<String,String> entry : conf) if (entry.getKey().startsWith(prefix)) { String propString = entry.getKey().substring(prefix.length()); Property prop = Property.getPropertyByKey(propString); if (prop != null) { acuConf.set(prop, entry.getValue()); } else if (Property.isValidTablePropertyKey(propString)) { acuConf.set(propString, entry.getValue()); } else { throw new IllegalArgumentException("Unknown accumulo file property " + propString); } } return acuConf; }
@Override public SamplerConfiguration getSamplerConfiguration(String tableName) throws TableNotFoundException, AccumuloException { AccumuloConfiguration conf = new ConfigurationCopy(this.getProperties(tableName)); SamplerConfigurationImpl sci = SamplerConfigurationImpl.newSamplerConfig(conf); if (sci == null) { return null; } return sci.toSamplerConfiguration(); }
protected AccumuloConfiguration createTableConfiguration(TableConfiguration base, CompactionPlan plan) { if (plan == null || plan.writeParameters == null) return base; WriteParameters p = plan.writeParameters; ConfigurationCopy result = new ConfigurationCopy(base); if (p.getHdfsBlockSize() > 0) result.set(Property.TABLE_FILE_BLOCK_SIZE, "" + p.getHdfsBlockSize()); if (p.getBlockSize() > 0) result.set(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE, "" + p.getBlockSize()); if (p.getIndexBlockSize() > 0) result.set(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX, "" + p.getIndexBlockSize()); if (p.getCompressType() != null) result.set(Property.TABLE_FILE_COMPRESSION_TYPE, p.getCompressType()); if (p.getReplication() != 0) result.set(Property.TABLE_FILE_REPLICATION, "" + p.getReplication()); return result; }
@Override void gatherInformation(MajorCompactionRequest request) { gatherCalled = true; SamplerConfigurationImpl sc = SamplerConfigurationImpl .newSamplerConfig(new ConfigurationCopy(request.getTableProperties())); if (sc == null) { samplingConfigured = false; } else { filesWithSample = new HashSet<>(); for (FileRef fref : request.getFiles().keySet()) { try (FileSKVIterator reader = request.openReader(fref)) { if (reader.getSample(sc) != null) { filesWithSample.add(fref); } } catch (IOException e) { throw new UncheckedIOException(e); } } } }
Collections.sort(vals); ConfigurationCopy acuconf = new ConfigurationCopy(DefaultConfiguration.getInstance()); acuconf.set(Property.TABLE_BLOOM_ENABLED, "true"); acuconf.set(Property.TABLE_BLOOM_KEY_FUNCTOR, "accumulo.core.file.keyfunctor.ColumnFamilyFunctor"); acuconf.set(Property.TABLE_FILE_TYPE, RFile.EXTENSION); acuconf.set(Property.TABLE_BLOOM_LOAD_THRESHOLD, "1"); acuconf.set(Property.TSERV_BLOOM_LOAD_MAXCONCURRENT, "1");
private Path checkPath(String dir, String kind, String type) throws IOException, AccumuloException, AccumuloSecurityException { Path ret; Map<String,String> props = context.instanceOperations().getSystemConfiguration(); AccumuloConfiguration conf = new ConfigurationCopy(props); FileSystem fs = VolumeConfiguration.getVolume(dir, context.getHadoopConf(), conf) .getFileSystem(); if (dir.contains(":")) { ret = new Path(dir); } else { ret = fs.makeQualified(new Path(dir)); } try { if (!fs.getFileStatus(ret).isDirectory()) { throw new AccumuloException( kind + " import " + type + " directory " + dir + " is not a directory!"); } } catch (FileNotFoundException fnf) { throw new AccumuloException( kind + " import " + type + " directory " + dir + " does not exist!"); } if (type.equals("failure")) { FileStatus[] listStatus = fs.listStatus(ret); if (listStatus != null && listStatus.length != 0) { throw new AccumuloException("Bulk import failure directory " + ret + " is not empty"); } } return ret; }
ConfigurationCopy tableCC = new ConfigurationCopy(DefaultConfiguration.getInstance()); opts.tableConfig.forEach(tableCC::set); this.tableConf = tableCC; : new ConfigurationCopy(tableConf); try { blockCacheManager = BlockCacheManagerFactory.getClientInstance(cc); if (opts.indexCacheSize > 0) { cc.set(Property.TSERV_INDEXCACHE_SIZE, Long.toString(opts.indexCacheSize)); cc.set(Property.TSERV_DATACACHE_SIZE, Long.toString(opts.dataCacheSize));
public OfflineIterator(ScannerOptions options, ClientContext context, Authorizations authorizations, Text table, Range range) { this.options = new ScannerOptions(options); this.context = context; this.range = range; if (this.options.fetchedColumns.size() > 0) { this.range = range.bound(this.options.fetchedColumns.first(), this.options.fetchedColumns.last()); } this.tableId = Table.ID.of(table.toString()); this.authorizations = authorizations; this.readers = new ArrayList<>(); try { config = new ConfigurationCopy(context.instanceOperations().getSiteConfiguration()); nextTablet(); while (iter != null && !iter.hasTop()) nextTablet(); } catch (Exception e) { if (e instanceof RuntimeException) throw (RuntimeException) e; throw new RuntimeException(e); } }
@Override public Collection<Summary> read() throws IOException { SummarizerFactory factory = new SummarizerFactory(); ConfigurationCopy acuconf = new ConfigurationCopy(DefaultConfiguration.getInstance()); config.forEach((k, v) -> acuconf.set(k, v)); RFileSource[] sources = in.getSources(); try { SummaryCollection all = new SummaryCollection(); CryptoService cservice = CryptoServiceFactory.newInstance(acuconf, ClassloaderType.JAVA); for (RFileSource source : sources) { SummaryReader fileSummary = SummaryReader.load(in.getFileSystem().getConf(), source.getInputStream(), source.getLength(), summarySelector, factory, cservice); SummaryCollection sc = fileSummary .getSummaries(Collections.singletonList(new Gatherer.RowRange(startRow, endRow))); all.merge(sc, factory); } return all.getSummaries(); } finally { for (RFileSource source : sources) { source.getInputStream().close(); } } }
@Override public void load() throws TableNotFoundException, IOException, AccumuloException, AccumuloSecurityException { Table.ID tableId = Tables.getTableId(context, tableName); Map<String,String> props = context.instanceOperations().getSystemConfiguration(); AccumuloConfiguration conf = new ConfigurationCopy(props); FileSystem fs = VolumeConfiguration.getVolume(dir, context.getHadoopConf(), conf) .getFileSystem(); Path srcPath = checkPath(fs, dir); SortedMap<KeyExtent,Bulk.Files> mappings; if (plan == null) { mappings = computeMappingFromFiles(fs, tableId, srcPath); } else { mappings = computeMappingFromPlan(fs, tableId, srcPath); } BulkSerialize.writeLoadMapping(mappings, srcPath.toString(), fs::create); List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.getUtf8()), ByteBuffer.wrap(srcPath.toString().getBytes(UTF_8)), ByteBuffer.wrap((setTime + "").getBytes(UTF_8))); new TableOperationsImpl(context).doBulkFateOperation(args, tableName); }
private AccumuloConfiguration createSampleConfig(AccumuloConfiguration siteConf) { ConfigurationCopy confCopy = new ConfigurationCopy( Iterables.filter(siteConf, new Predicate<Entry<String,String>>() { @Override public boolean apply(Entry<String,String> input) { return !input.getKey().startsWith(Property.TABLE_SAMPLER.getKey()); } })); for (Entry<String,String> entry : samplerRef.get().getFirst().toTablePropertiesMap() .entrySet()) { confCopy.set(entry.getKey(), entry.getValue()); } siteConf = confCopy; return siteConf; }
@Override public RFileWriter build() throws IOException { FileOperations fileops = FileOperations.getInstance(); AccumuloConfiguration acuconf = DefaultConfiguration.getInstance(); HashMap<String,String> userProps = new HashMap<>(); userProps.putAll(tableConfig); userProps.putAll(summarizerProps); userProps.putAll(samplerProps); if (userProps.size() > 0) { acuconf = new ConfigurationCopy(Iterables.concat(acuconf, userProps.entrySet())); } CryptoService cs = CryptoServiceFactory.newInstance(acuconf, ClassloaderType.JAVA); if (out.getOutputStream() != null) { FSDataOutputStream fsdo; if (out.getOutputStream() instanceof FSDataOutputStream) { fsdo = (FSDataOutputStream) out.getOutputStream(); } else { fsdo = new FSDataOutputStream(out.getOutputStream(), new FileSystem.Statistics("foo")); } return new RFileWriter( fileops.newWriterBuilder().forOutputStream(".rf", fsdo, out.getConf(), cs) .withTableConfiguration(acuconf).withStartDisabled().build(), visCacheSize); } else { return new RFileWriter(fileops.newWriterBuilder() .forFile(out.path.toString(), out.getFileSystem(), out.getConf(), cs) .withTableConfiguration(acuconf).withStartDisabled().build(), visCacheSize); } }
/** * This helper method provides an AccumuloConfiguration object constructed from the Accumulo * defaults, and overridden with Accumulo properties that have been stored in the Job's * configuration. * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf * the Hadoop configuration object to configure * @since 1.6.0 */ public static AccumuloConfiguration getAccumuloConfiguration(Class<?> implementingClass, Configuration conf) { String prefix = enumToConfKey(implementingClass, Opts.ACCUMULO_PROPERTIES) + "."; ConfigurationCopy acuConf = new ConfigurationCopy(DefaultConfiguration.getInstance()); for (Entry<String,String> entry : conf) if (entry.getKey().startsWith(prefix)) { String propString = entry.getKey().substring(prefix.length()); Property prop = Property.getPropertyByKey(propString); if (prop != null) { acuConf.set(prop, entry.getValue()); } else if (Property.isValidTablePropertyKey(propString)) { acuConf.set(propString, entry.getValue()); } else { throw new IllegalArgumentException("Unknown accumulo file property " + propString); } } return acuConf; }