private static void exportConfig(ServerContext context, Table.ID tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws AccumuloException, AccumuloSecurityException, IOException { DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance(); Map<String,String> siteConfig = context.instanceOperations().getSiteConfiguration(); Map<String,String> systemConfig = context.instanceOperations().getSystemConfiguration(); TableConfiguration tableConfig = context.getServerConfFactory().getTableConfiguration(tableID); OutputStreamWriter osw = new OutputStreamWriter(dataOut, UTF_8); // only put props that are different than defaults and higher level configurations zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_TABLE_CONFIG_FILE)); for (Entry<String,String> prop : tableConfig) { if (prop.getKey().startsWith(Property.TABLE_PREFIX.getKey())) { Property key = Property.getPropertyByKey(prop.getKey()); if (key == null || !defaultConfig.get(key).equals(prop.getValue())) { if (!prop.getValue().equals(siteConfig.get(prop.getKey())) && !prop.getValue().equals(systemConfig.get(prop.getKey()))) { osw.append(prop.getKey() + "=" + prop.getValue() + "\n"); } } } } osw.flush(); } }
private Path checkPath(String dir, String kind, String type) throws IOException, AccumuloException, AccumuloSecurityException { Path ret; Map<String,String> props = context.instanceOperations().getSystemConfiguration(); AccumuloConfiguration conf = new ConfigurationCopy(props); FileSystem fs = VolumeConfiguration.getVolume(dir, context.getHadoopConf(), conf) .getFileSystem(); if (dir.contains(":")) { ret = new Path(dir); } else { ret = fs.makeQualified(new Path(dir)); } try { if (!fs.getFileStatus(ret).isDirectory()) { throw new AccumuloException( kind + " import " + type + " directory " + dir + " is not a directory!"); } } catch (FileNotFoundException fnf) { throw new AccumuloException( kind + " import " + type + " directory " + dir + " does not exist!"); } if (type.equals("failure")) { FileStatus[] listStatus = fs.listStatus(ret); if (listStatus != null && listStatus.length != 0) { throw new AccumuloException("Bulk import failure directory " + ret + " is not empty"); } } return ret; }
@Override public void load() throws TableNotFoundException, IOException, AccumuloException, AccumuloSecurityException { Table.ID tableId = Tables.getTableId(context, tableName); Map<String,String> props = context.instanceOperations().getSystemConfiguration(); AccumuloConfiguration conf = new ConfigurationCopy(props); FileSystem fs = VolumeConfiguration.getVolume(dir, context.getHadoopConf(), conf) .getFileSystem(); Path srcPath = checkPath(fs, dir); SortedMap<KeyExtent,Bulk.Files> mappings; if (plan == null) { mappings = computeMappingFromFiles(fs, tableId, srcPath); } else { mappings = computeMappingFromPlan(fs, tableId, srcPath); } BulkSerialize.writeLoadMapping(mappings, srcPath.toString(), fs::create); List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.getUtf8()), ByteBuffer.wrap(srcPath.toString().getBytes(UTF_8)), ByteBuffer.wrap((setTime + "").getBytes(UTF_8))); new TableOperationsImpl(context).doBulkFateOperation(args, tableName); }
final Map<String,String> properties = client.instanceOperations().getSystemConfiguration(); final Map<String,String> peers = new HashMap<>(); final String definedPeersPrefix = Property.REPLICATION_PEERS.getKey();
shellState.getAccumuloClient().instanceOperations().getSystemConfiguration() .get(Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + classpath); .getSystemConfiguration();
.putAll(shellState.getAccumuloClient().instanceOperations().getSystemConfiguration()); .getSystemConfiguration().entrySet(); if (tableName != null) { acuconf = shellState.getAccumuloClient().tableOperations().getProperties(tableName);
try { final Map<String,String> properties = shellState.getAccumuloClient() .instanceOperations().getSystemConfiguration(); final String table = properties.get(Property.TRACE_TABLE.getKey()); final String user = shellState.getAccumuloClient().whoami();
systemConfig = context.instanceOperations().getSystemConfiguration(); if (opts.allConfiguration || opts.users) { localUsers = Lists.newArrayList(context.securityOperations().listLocalUsers());
@Before public void alterConfig() throws Exception { InstanceOperations iops = getConnector().instanceOperations(); Map<String,String> sysConfig = iops.getSystemConfiguration(); scanMaxOpenFiles = sysConfig.get(Property.TSERV_SCAN_MAX_OPENFILES.getKey()); majcConcurrent = sysConfig.get(Property.TSERV_MAJC_MAXCONCURRENT.getKey()); majcThreadMaxOpen = sysConfig.get(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey()); }
@Override public Map<String,String> getSystemConfiguration(ByteBuffer login) throws org.apache.accumulo.proxy.thrift.AccumuloException, org.apache.accumulo.proxy.thrift.AccumuloSecurityException, TException { try { return getConnector(login).instanceOperations().getSystemConfiguration(); } catch (Exception e) { handleException(e); return null; } }
@Before public void reduceSessionIdle() throws Exception { InstanceOperations ops = getConnector().instanceOperations(); sessionIdle = ops.getSystemConfiguration().get(Property.TSERV_SESSION_MAXIDLE.getKey()); ops.setProperty(Property.TSERV_SESSION_MAXIDLE.getKey(), getMaxIdleTimeString()); log.info("Waiting for existing session idle time to expire"); Thread.sleep(AccumuloConfiguration.getTimeInMillis(sessionIdle)); log.info("Finished waiting"); }
@Before public void checkProperty() throws Exception { Connector conn = getConnector(); // TABLE_VOLUME_CHOOSER is a valid property that can be updated in ZK, whereas the crypto // properties are not. // This lets us run this test more generically rather than forcibly needing to update some // property in accumulo-site.xml origPropValue = conn.instanceOperations().getSystemConfiguration() .get(Property.TABLE_VOLUME_CHOOSER.getKey()); conn.instanceOperations().setProperty(Property.TABLE_VOLUME_CHOOSER.getKey(), FairVolumeChooser.class.getName()); }
@Before public void getTimeoutFactor() throws Exception { try { timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor")); } catch (NumberFormatException e) { log.warn("Could not parse property value for 'timeout.factor' as integer: " + System.getProperty("timeout.factor")); } Assert.assertTrue("Timeout factor must be greater than or equal to 1", timeoutFactor >= 1); String[] names = getUniqueNames(2); REG_TABLE_NAME = names[0]; PRE_SPLIT_TABLE_NAME = names[1]; Connector c = getConnector(); tservMajcDelay = c.instanceOperations().getSystemConfiguration() .get(Property.TSERV_MAJC_DELAY.getKey()); c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "10ms"); }
@Before public void updateMajcDelay() throws Exception { Connector c = getConnector(); majcDelay = c.instanceOperations().getSystemConfiguration() .get(Property.TSERV_MAJC_DELAY.getKey()); c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "100ms"); if (getClusterType() == ClusterType.STANDALONE) { Thread.sleep(AccumuloConfiguration.getTimeInMillis(majcDelay)); } }
@Before public void updateMajcDelay() throws Exception { Connector c = getConnector(); majcDelay = c.instanceOperations().getSystemConfiguration() .get(Property.TSERV_MAJC_DELAY.getKey()); c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1s"); if (getClusterType() == ClusterType.STANDALONE) { // Gotta wait for the cluster to get out of the default sleep value Thread.sleep(AccumuloConfiguration.getTimeInMillis(majcDelay)); } }
@After public void onlineTraceTable() throws Exception { if (null != cluster) { Connector conn = getConnector(); String traceTable = conn.instanceOperations().getSystemConfiguration() .get(Property.TRACE_TABLE.getKey()); if (conn.tableOperations().exists(traceTable)) { conn.tableOperations().online(traceTable, true); } } }
@Before public void offlineTraceTable() throws Exception { Connector conn = getConnector(); String traceTable = conn.instanceOperations().getSystemConfiguration() .get(Property.TRACE_TABLE.getKey()); if (conn.tableOperations().exists(traceTable)) { conn.tableOperations().offline(traceTable, true); } }
@Before public void alterConfig() throws Exception { Connector conn = getConnector(); majcDelay = conn.instanceOperations().getSystemConfiguration() .get(Property.TSERV_MAJC_DELAY.getKey()); if (!"1s".equals(majcDelay)) { conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1s"); getClusterControl().stopAllServers(ServerType.TABLET_SERVER); getClusterControl().startAllServers(ServerType.TABLET_SERVER); } }
@Before public void alterConfig() throws Exception { if (ClusterType.STANDALONE == getClusterType()) { InstanceOperations iops = getConnector().instanceOperations(); Map<String,String> config = iops.getSystemConfiguration(); majcThreadMaxOpen = config.get(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey()); majcDelay = config.get(Property.TSERV_MAJC_DELAY.getKey()); majcMaxConcurrent = config.get(Property.TSERV_MAJC_MAXCONCURRENT.getKey()); iops.setProperty(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), "4"); iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1"); iops.setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1"); getClusterControl().stopAllServers(ServerType.TABLET_SERVER); getClusterControl().startAllServers(ServerType.TABLET_SERVER); } }
@Before public void alterConfig() throws Exception { if (ClusterType.MINI == getClusterType()) { return; } InstanceOperations iops = getConnector().instanceOperations(); Map<String,String> conf = iops.getSystemConfiguration(); majcDelay = conf.get(Property.TSERV_MAJC_DELAY.getKey()); maxMem = conf.get(Property.TSERV_MAXMEM.getKey()); iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "0"); iops.setProperty(Property.TSERV_MAXMEM.getKey(), "50K"); getClusterControl().stopAllServers(ServerType.TABLET_SERVER); getClusterControl().startAllServers(ServerType.TABLET_SERVER); }