private AccumuloConfiguration createSampleConfig(AccumuloConfiguration siteConf) { ConfigurationCopy confCopy = new ConfigurationCopy(Iterables.filter(siteConf, input -> !input.getKey().startsWith(Property.TABLE_SAMPLER.getKey()))); for (Entry<String,String> entry : samplerRef.get().getFirst().toTablePropertiesMap() .entrySet()) { confCopy.set(entry.getKey(), entry.getValue()); } siteConf = confCopy; return siteConf; }
@Override public boolean shouldCompact(Entry<FileRef,DataFileValue> file, MajorCompactionRequest request) { if (!gatherCalled) { SamplerConfigurationImpl sc = SamplerConfigurationImpl .newSamplerConfig(new ConfigurationCopy(request.getTableProperties())); return sc != null; } if (!samplingConfigured) { return false; } return !filesWithSample.contains(file.getKey()); } }
@Override public Map<String,Set<Text>> getLocalityGroups(String tableName) throws AccumuloException, TableNotFoundException { AccumuloConfiguration conf = new ConfigurationCopy(this.getProperties(tableName)); Map<String,Set<ByteSequence>> groups = LocalityGroupUtil.getLocalityGroups(conf); Map<String,Set<Text>> groups2 = new HashMap<>(); for (Entry<String,Set<ByteSequence>> entry : groups.entrySet()) { HashSet<Text> colFams = new HashSet<>(); for (ByteSequence bs : entry.getValue()) { colFams.add(new Text(bs.toArray())); } groups2.put(entry.getKey(), colFams); } return groups2; }
public static void checkLocalityGroups(Iterable<Entry<String,String>> config) throws LocalityGroupConfigurationError { ConfigurationCopy cc = new ConfigurationCopy(config); if (cc.get(Property.TABLE_LOCALITY_GROUPS) != null) { getLocalityGroups(cc); } }
@Override public SamplerConfiguration getSamplerConfiguration(String tableName) throws TableNotFoundException, AccumuloException { AccumuloConfiguration conf = new ConfigurationCopy(this.getProperties(tableName)); SamplerConfigurationImpl sci = SamplerConfigurationImpl.newSamplerConfig(conf); if (sci == null) { return null; } return sci.toSamplerConfiguration(); }
@Override void gatherInformation(MajorCompactionRequest request) { gatherCalled = true; SamplerConfigurationImpl sc = SamplerConfigurationImpl .newSamplerConfig(new ConfigurationCopy(request.getTableProperties())); if (sc == null) { samplingConfigured = false; } else { filesWithSample = new HashSet<>(); for (FileRef fref : request.getFiles().keySet()) { try (FileSKVIterator reader = request.openReader(fref)) { if (reader.getSample(sc) != null) { filesWithSample.add(fref); } } catch (IOException e) { throw new UncheckedIOException(e); } } } }
/** * This helper method provides an AccumuloConfiguration object constructed from the Accumulo * defaults, and overridden with Accumulo properties that have been stored in the Job's * configuration. * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf * the Hadoop configuration object to configure * @since 1.6.0 */ public static AccumuloConfiguration getAccumuloConfiguration(Class<?> implementingClass, Configuration conf) { String prefix = enumToConfKey(implementingClass, Opts.ACCUMULO_PROPERTIES) + "."; ConfigurationCopy acuConf = new ConfigurationCopy(DefaultConfiguration.getInstance()); for (Entry<String,String> entry : conf) if (entry.getKey().startsWith(prefix)) { String propString = entry.getKey().substring(prefix.length()); Property prop = Property.getPropertyByKey(propString); if (prop != null) { acuConf.set(prop, entry.getValue()); } else if (Property.isValidTablePropertyKey(propString)) { acuConf.set(propString, entry.getValue()); } else { throw new IllegalArgumentException("Unknown accumulo file property " + propString); } } return acuConf; }
private Path checkPath(String dir, String kind, String type) throws IOException, AccumuloException, AccumuloSecurityException { Path ret; Map<String,String> props = context.instanceOperations().getSystemConfiguration(); AccumuloConfiguration conf = new ConfigurationCopy(props); FileSystem fs = VolumeConfiguration.getVolume(dir, context.getHadoopConf(), conf) .getFileSystem(); if (dir.contains(":")) { ret = new Path(dir); } else { ret = fs.makeQualified(new Path(dir)); } try { if (!fs.getFileStatus(ret).isDirectory()) { throw new AccumuloException( kind + " import " + type + " directory " + dir + " is not a directory!"); } } catch (FileNotFoundException fnf) { throw new AccumuloException( kind + " import " + type + " directory " + dir + " does not exist!"); } if (type.equals("failure")) { FileStatus[] listStatus = fs.listStatus(ret); if (listStatus != null && listStatus.length != 0) { throw new AccumuloException("Bulk import failure directory " + ret + " is not empty"); } } return ret; }
public OfflineIterator(ScannerOptions options, ClientContext context, Authorizations authorizations, Text table, Range range) { this.options = new ScannerOptions(options); this.context = context; this.range = range; if (this.options.fetchedColumns.size() > 0) { this.range = range.bound(this.options.fetchedColumns.first(), this.options.fetchedColumns.last()); } this.tableId = Table.ID.of(table.toString()); this.authorizations = authorizations; this.readers = new ArrayList<>(); try { config = new ConfigurationCopy(context.instanceOperations().getSiteConfiguration()); nextTablet(); while (iter != null && !iter.hasTop()) nextTablet(); } catch (Exception e) { if (e instanceof RuntimeException) throw (RuntimeException) e; throw new RuntimeException(e); } }
@Override public void load() throws TableNotFoundException, IOException, AccumuloException, AccumuloSecurityException { Table.ID tableId = Tables.getTableId(context, tableName); Map<String,String> props = context.instanceOperations().getSystemConfiguration(); AccumuloConfiguration conf = new ConfigurationCopy(props); FileSystem fs = VolumeConfiguration.getVolume(dir, context.getHadoopConf(), conf) .getFileSystem(); Path srcPath = checkPath(fs, dir); SortedMap<KeyExtent,Bulk.Files> mappings; if (plan == null) { mappings = computeMappingFromFiles(fs, tableId, srcPath); } else { mappings = computeMappingFromPlan(fs, tableId, srcPath); } BulkSerialize.writeLoadMapping(mappings, srcPath.toString(), fs::create); List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.getUtf8()), ByteBuffer.wrap(srcPath.toString().getBytes(UTF_8)), ByteBuffer.wrap((setTime + "").getBytes(UTF_8))); new TableOperationsImpl(context).doBulkFateOperation(args, tableName); }
Collections.sort(vals); ConfigurationCopy acuconf = new ConfigurationCopy(DefaultConfiguration.getInstance()); acuconf.set(Property.TABLE_BLOOM_ENABLED, "true"); acuconf.set(Property.TABLE_BLOOM_KEY_FUNCTOR,
ConfigurationCopy tableCC = new ConfigurationCopy(DefaultConfiguration.getInstance()); opts.tableConfig.forEach(tableCC::set); this.tableConf = tableCC; : new ConfigurationCopy(tableConf); try { blockCacheManager = BlockCacheManagerFactory.getClientInstance(cc);
@Override public Collection<Summary> read() throws IOException { SummarizerFactory factory = new SummarizerFactory(); ConfigurationCopy acuconf = new ConfigurationCopy(DefaultConfiguration.getInstance()); config.forEach((k, v) -> acuconf.set(k, v)); RFileSource[] sources = in.getSources(); try { SummaryCollection all = new SummaryCollection(); CryptoService cservice = CryptoServiceFactory.newInstance(acuconf, ClassloaderType.JAVA); for (RFileSource source : sources) { SummaryReader fileSummary = SummaryReader.load(in.getFileSystem().getConf(), source.getInputStream(), source.getLength(), summarySelector, factory, cservice); SummaryCollection sc = fileSummary .getSummaries(Collections.singletonList(new Gatherer.RowRange(startRow, endRow))); all.merge(sc, factory); } return all.getSummaries(); } finally { for (RFileSource source : sources) { source.getInputStream().close(); } } }
@Override public RFileWriter build() throws IOException { FileOperations fileops = FileOperations.getInstance(); AccumuloConfiguration acuconf = DefaultConfiguration.getInstance(); HashMap<String,String> userProps = new HashMap<>(); userProps.putAll(tableConfig); userProps.putAll(summarizerProps); userProps.putAll(samplerProps); if (userProps.size() > 0) { acuconf = new ConfigurationCopy(Iterables.concat(acuconf, userProps.entrySet())); } CryptoService cs = CryptoServiceFactory.newInstance(acuconf, ClassloaderType.JAVA); if (out.getOutputStream() != null) { FSDataOutputStream fsdo; if (out.getOutputStream() instanceof FSDataOutputStream) { fsdo = (FSDataOutputStream) out.getOutputStream(); } else { fsdo = new FSDataOutputStream(out.getOutputStream(), new FileSystem.Statistics("foo")); } return new RFileWriter( fileops.newWriterBuilder().forOutputStream(".rf", fsdo, out.getConf(), cs) .withTableConfiguration(acuconf).withStartDisabled().build(), visCacheSize); } else { return new RFileWriter(fileops.newWriterBuilder() .forFile(out.path.toString(), out.getFileSystem(), out.getConf(), cs) .withTableConfiguration(acuconf).withStartDisabled().build(), visCacheSize); } }
public void start(String[] args) throws MergeException { Opts opts = new Opts(); opts.parseArgs(Merge.class.getName(), args); try (AccumuloClient client = opts.createClient()) { if (!client.tableOperations().exists(opts.getTableName())) { System.err.println("table " + opts.getTableName() + " does not exist"); return; } if (opts.goalSize == null || opts.goalSize < 1) { AccumuloConfiguration tableConfig = new ConfigurationCopy( client.tableOperations().getProperties(opts.getTableName())); opts.goalSize = tableConfig.getAsBytes(Property.TABLE_SPLIT_THRESHOLD); } message("Merging tablets in table %s to %d bytes", opts.getTableName(), opts.goalSize); mergomatic(client, opts.getTableName(), opts.begin, opts.end, opts.goalSize, opts.force); } catch (Exception ex) { throw new MergeException(ex); } }
protected AccumuloConfiguration createTableConfiguration(TableConfiguration base, CompactionPlan plan) { if (plan == null || plan.writeParameters == null) return base; WriteParameters p = plan.writeParameters; ConfigurationCopy result = new ConfigurationCopy(base); if (p.getHdfsBlockSize() > 0) result.set(Property.TABLE_FILE_BLOCK_SIZE, "" + p.getHdfsBlockSize()); if (p.getBlockSize() > 0) result.set(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE, "" + p.getBlockSize()); if (p.getIndexBlockSize() > 0) result.set(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX, "" + p.getIndexBlockSize()); if (p.getCompressType() != null) result.set(Property.TABLE_FILE_COMPRESSION_TYPE, p.getCompressType()); if (p.getReplication() != 0) result.set(Property.TABLE_FILE_REPLICATION, "" + p.getReplication()); return result; }
@Override public AccumuloConfiguration getSiteConfiguration() { Configuration conf = new Configuration(false); Path accumuloSite = new Path(serverAccumuloConfDir, "accumulo-site.xml"); conf.addResource(accumuloSite); return new ConfigurationCopy( Iterables.concat(AccumuloConfiguration.getDefaultConfiguration(), conf)); } }
AccumuloConfiguration acuTableConf = new ConfigurationCopy( context.tableOperations().getProperties(tableName));
@Override public AccumuloConfiguration getSiteConfiguration() { // TODO Auto-generated method stub return new ConfigurationCopy(Iterables.concat(AccumuloConfiguration.getDefaultConfiguration(), config.getSiteConfig().entrySet())); } }
protected Connector createAccumuloConnection(BackupRestoreOptionsBase options) throws AccumuloSecurityException, AccumuloException { String instanceName = options.getAccumuloInstanceName(); String zooServers = options.getZookeeperServers(); Instance inst = new ZooKeeperInstance(instanceName, zooServers); ConfigurationCopy conf = new ConfigurationCopy(inst.getConfiguration()); conf.set(Property.INSTANCE_DFS_URI, options.getHadoopFsDefaultFS()); inst.setConfiguration(conf); AuthenticationToken authenticationToken = new PasswordToken(options.getAccumuloPassword()); return inst.getConnector(options.getAccumuloUserName(), authenticationToken); }