public synchronized DefaultConfiguration getDefaultConfiguration() { if (defaultConfig == null) { defaultConfig = DefaultConfiguration.getInstance(); } return defaultConfig; }
final org.apache.accumulo.core.client.ClientConfiguration config) { final AccumuloConfiguration defaults = DefaultConfiguration.getInstance();
public static CryptoService newDefaultInstance() { return newInstance(DefaultConfiguration.getInstance(), ClassloaderType.JAVA); } }
public Writer(BCFile.Writer bfw, int blockSize) throws IOException { this(bfw, blockSize, (int) DefaultConfiguration.getInstance() .getAsBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX), null, null); }
public static org.apache.accumulo.server.fs.VolumeManager getLocal(String localBasePath) throws IOException { AccumuloConfiguration accConf = DefaultConfiguration.getInstance(); Configuration hadoopConf = new Configuration(); Volume defaultLocalVolume = VolumeConfiguration.create(FileSystem.getLocal(hadoopConf), localBasePath); // The default volume gets placed in the map, but local filesystem is only used for testing // purposes return new VolumeManagerImpl(Collections.singletonMap(DEFAULT, defaultLocalVolume), defaultLocalVolume, accConf, hadoopConf); }
private static void exportConfig(ServerContext context, Table.ID tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws AccumuloException, AccumuloSecurityException, IOException { DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance(); Map<String,String> siteConfig = context.instanceOperations().getSiteConfiguration(); Map<String,String> systemConfig = context.instanceOperations().getSystemConfiguration(); TableConfiguration tableConfig = context.getServerConfFactory().getTableConfiguration(tableID); OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8); // only put props that are different than defaults and higher level configurations zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_TABLE_CONFIG_FILE)); for (Entry<String,String> prop : tableConfig) { if (prop.getKey().startsWith(Property.TABLE_PREFIX.getKey())) { Property key = Property.getPropertyByKey(prop.getKey()); if (key == null || !defaultConfig.get(key).equals(prop.getValue())) { if (!prop.getValue().equals(siteConfig.get(prop.getKey())) && !prop.getValue().equals(systemConfig.get(prop.getKey()))) { osw.append(prop.getKey() + "=" + prop.getValue() + "\n"); } } } } osw.flush(); } }
/** * This helper method provides an AccumuloConfiguration object constructed from the Accumulo * defaults, and overridden with Accumulo properties that have been stored in the Job's * configuration. * * @param implementingClass * the class whose name will be used as a prefix for the property configuration key * @param conf * the Hadoop configuration object to configure * @since 1.6.0 */ public static AccumuloConfiguration getAccumuloConfiguration(Class<?> implementingClass, Configuration conf) { String prefix = enumToConfKey(implementingClass, Opts.ACCUMULO_PROPERTIES) + "."; ConfigurationCopy acuConf = new ConfigurationCopy(DefaultConfiguration.getInstance()); for (Entry<String,String> entry : conf) if (entry.getKey().startsWith(prefix)) { String propString = entry.getKey().substring(prefix.length()); Property prop = Property.getPropertyByKey(propString); if (prop != null) { acuConf.set(prop, entry.getValue()); } else if (Property.isValidTablePropertyKey(propString)) { acuConf.set(propString, entry.getValue()); } else { throw new IllegalArgumentException("Unknown accumulo file property " + propString); } } return acuConf; }
public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Opts opts = new Opts(); opts.parseArgs(CreateEmpty.class.getName(), args); for (String arg : opts.files) { Path path = new Path(arg); log.info("Writing to file '{}'", path); FileSKVWriter writer = (new RFileOperations()).newWriterBuilder() .forFile(arg, path.getFileSystem(conf), conf, CryptoServiceFactory.newDefaultInstance()) .withTableConfiguration(DefaultConfiguration.getInstance()).withCompression(opts.codec) .build(); writer.close(); } }
Collections.sort(vals); ConfigurationCopy acuconf = new ConfigurationCopy(DefaultConfiguration.getInstance()); acuconf.set(Property.TABLE_BLOOM_ENABLED, "true"); acuconf.set(Property.TABLE_BLOOM_KEY_FUNCTOR,
AccumuloConfiguration aconf = DefaultConfiguration.getInstance(); CryptoService cryptoService = ConfigurationTypeHelper.getClassInstance(null, opts.cryptoClass, CryptoService.class, CryptoServiceFactory.newDefaultInstance());
ConfigurationCopy tableCC = new ConfigurationCopy(DefaultConfiguration.getInstance()); opts.tableConfig.forEach(tableCC::set); this.tableConf = tableCC; } else { this.tableConf = DefaultConfiguration.getInstance();
@Override public void run() { DefaultConfiguration defaultConf = DefaultConfiguration.getInstance(); long defaultDelay = defaultConf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_DELAY); long defaultPeriod = defaultConf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_PERIOD); long delay = conf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_DELAY); long period = conf.getTimeInMillis(Property.REPLICATION_WORK_PROCESSOR_PERIOD); try { DistributedWorkQueue workQueue; if (defaultDelay != delay && defaultPeriod != period) { log.debug("Configuration DistributedWorkQueue with delay and period of {} and {}", delay, period); workQueue = new DistributedWorkQueue( context.getZooKeeperRoot() + ReplicationConstants.ZOO_WORK_QUEUE, conf, delay, period); } else { log.debug("Configuring DistributedWorkQueue with default delay and period"); workQueue = new DistributedWorkQueue( context.getZooKeeperRoot() + ReplicationConstants.ZOO_WORK_QUEUE, conf); } workQueue.startProcessing(new ReplicationProcessor(context, conf, fs), executor); } catch (KeeperException | InterruptedException e) { throw new RuntimeException(e); } } }
@Override public Collection<Summary> read() throws IOException { SummarizerFactory factory = new SummarizerFactory(); ConfigurationCopy acuconf = new ConfigurationCopy(DefaultConfiguration.getInstance()); config.forEach((k, v) -> acuconf.set(k, v)); RFileSource[] sources = in.getSources(); try { SummaryCollection all = new SummaryCollection(); CryptoService cservice = CryptoServiceFactory.newInstance(acuconf, ClassloaderType.JAVA); for (RFileSource source : sources) { SummaryReader fileSummary = SummaryReader.load(in.getFileSystem().getConf(), source.getInputStream(), source.getLength(), summarySelector, factory, cservice); SummaryCollection sc = fileSummary .getSummaries(Collections.singletonList(new Gatherer.RowRange(startRow, endRow))); all.merge(sc, factory); } return all.getSummaries(); } finally { for (RFileSource source : sources) { source.getInputStream().close(); } } }
for (Entry<String,String> defaultEntry : DefaultConfiguration.getInstance()) { defaults.put(defaultEntry.getKey(), defaultEntry.getValue());
defaultConfig = DefaultConfiguration.getInstance(); siteConfig = context.instanceOperations().getSiteConfiguration(); systemConfig = context.instanceOperations().getSystemConfiguration();
@Override public RFileWriter build() throws IOException { FileOperations fileops = FileOperations.getInstance(); AccumuloConfiguration acuconf = DefaultConfiguration.getInstance(); HashMap<String,String> userProps = new HashMap<>(); userProps.putAll(tableConfig); userProps.putAll(summarizerProps); userProps.putAll(samplerProps); if (userProps.size() > 0) { acuconf = new ConfigurationCopy(Iterables.concat(acuconf, userProps.entrySet())); } CryptoService cs = CryptoServiceFactory.newInstance(acuconf, ClassloaderType.JAVA); if (out.getOutputStream() != null) { FSDataOutputStream fsdo; if (out.getOutputStream() instanceof FSDataOutputStream) { fsdo = (FSDataOutputStream) out.getOutputStream(); } else { fsdo = new FSDataOutputStream(out.getOutputStream(), new FileSystem.Statistics("foo")); } return new RFileWriter( fileops.newWriterBuilder().forOutputStream(".rf", fsdo, out.getConf(), cs) .withTableConfiguration(acuconf).withStartDisabled().build(), visCacheSize); } else { return new RFileWriter(fileops.newWriterBuilder() .forFile(out.path.toString(), out.getFileSystem(), out.getConf(), cs) .withTableConfiguration(acuconf).withStartDisabled().build(), visCacheSize); } }
@Override public void getProperties(Map<String,String> props, Predicate<String> filter) { for (Entry<String,String> prop : DefaultConfiguration.getInstance()) if (filter.apply(prop.getKey())) props.put(prop.getKey(), prop.getValue()); for (Entry<String,String> prop : xml) if (filter.apply(prop.getKey())) props.put(prop.getKey(), prop.getValue()); }
@Override public String get(Property property) { String value = xml.get(property.getKey()); if (value != null) return value; return DefaultConfiguration.getInstance().get(property); } };
replicationTableDefaultTabletDir); String ext = FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
public static void main(String[] args) throws Exception { VolumeManager fs = VolumeManagerImpl.get(); Opts opts = new Opts(); opts.parseArgs(TableDiskUsage.class.getName(), args); Connector conn = opts.getConnector(); org.apache.accumulo.server.util.TableDiskUsage .printDiskUsage(DefaultConfiguration.getInstance(), opts.tables, fs, conn, false); }