Refine search
@Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } }
/** * * @param instanceName * The name of specific accumulo instance. This is set at initialization time. * @param zooKeepers * A comma separated list of zoo keeper server locations. Each location can contain an * optional port, of the format host:port. */ public ZooKeeperInstance(String instanceName, String zooKeepers) { this(ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zooKeepers)); }
@SuppressWarnings("deprecation") public static org.apache.accumulo.core.client.ClientConfiguration toClientConf( Properties properties) { org.apache.accumulo.core.client.ClientConfiguration config = org.apache.accumulo.core.client.ClientConfiguration .create(); for (Object keyObj : properties.keySet()) { String propKey = (String) keyObj; String val = properties.getProperty(propKey); String confKey = propsConf.get(propKey); if (confKey == null) { config.setProperty(propKey, val); } else { config.setProperty(confKey, val); } if (propKey.equals(ClientProperty.SSL_KEYSTORE_PATH.getKey())) { config.setProperty( org.apache.accumulo.core.client.ClientConfiguration.ClientProperty.INSTANCE_RPC_SSL_CLIENT_AUTH, "true"); } } return config; }
/** * Configures a {@link org.apache.accumulo.core.client.ZooKeeperInstance} for this job. * * @param job * the Hadoop job instance to be configured * @param instanceName * the Accumulo instance name * @param zooKeepers * a comma-separated list of zookeeper servers * @since 1.5.0 * @deprecated since 1.6.0 */ @Deprecated public static void setZooKeeperInstance(Job job, String instanceName, String zooKeepers) { setZooKeeperInstance(job, org.apache.accumulo.core.client.ClientConfiguration.create() .withInstance(instanceName).withZkHosts(zooKeepers)); }
public static Connector metricsConnection(Configuration c) throws AccumuloException, AccumuloSecurityException { final String mtxZk = c.get(MetricsConfig.ZOOKEEPERS), mtxInst = c.get(MetricsConfig.INSTANCE), mtxUser = c.get(MetricsConfig.USER), mtxPass = c .get(MetricsConfig.PASS); return new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(mtxInst).withZkHosts(mtxZk)).getConnector(mtxUser, new PasswordToken( mtxPass)); }
ExportTask(String instanceName, String zookeepers, String user, String password, String table) throws TableNotFoundException, AccumuloException, AccumuloSecurityException { ZooKeeperInstance zki = new ZooKeeperInstance( new ClientConfiguration().withInstance(instanceName).withZkHosts(zookeepers)); // TODO need to close batch writer Connector conn = zki.getConnector(user, new PasswordToken(password)); try { bw = conn.createBatchWriter(table, new BatchWriterConfig()); } catch (TableNotFoundException tnfe) { try { conn.tableOperations().create(table); } catch (TableExistsException e) { // nothing to do } bw = conn.createBatchWriter(table, new BatchWriterConfig()); } }
BatchWriter writer = Connections.warehouseConnection(conf).createBatchWriter(conf.get(MetricsConfig.ERRORS_TABLE, MetricsConfig.DEFAULT_ERRORS_TABLE), bwConfig); BatchScanner scanner = Connections.metricsConnection(conf).createBatchScanner(conf.get(MetricsConfig.ERRORS_TABLE, MetricsConfig.DEFAULT_ERRORS_TABLE), Authorizations.EMPTY, 8); job.setNumReduceTasks(1); PasswordToken warehousePW = new PasswordToken(conf.get(MetricsConfig.WAREHOUSE_PASSWORD, "")); ClientConfiguration zkConfig = ClientConfiguration.loadDefault().withInstance(conf.get(MetricsConfig.WAREHOUSE_INSTANCE)) .withZkHosts(conf.get(MetricsConfig.WAREHOUSE_ZOOKEEPERS)); ZooKeeperInstance instance = new ZooKeeperInstance(zkConfig); Connector connector = instance.getConnector(conf.get(MetricsConfig.WAREHOUSE_USERNAME), warehousePW); AccumuloInputFormat.setConnectorInfo(job, conf.get(MetricsConfig.WAREHOUSE_USERNAME), warehousePW); AccumuloInputFormat.setInputTableName(job, conf.get(MetricsConfig.ERRORS_TABLE, MetricsConfig.DEFAULT_ERRORS_TABLE)); AccumuloInputFormat.setScanAuthorizations(job, connector.securityOperations().getUserAuthorizations(conf.get(MetricsConfig.WAREHOUSE_USERNAME))); job.setInputFormatClass(AccumuloInputFormat.class); job.setOutputFormatClass(AccumuloOutputFormat.class);
/** * * @param instanceId * The UUID that identifies the accumulo instance you want to connect to. * @param zooKeepers * A comma separated list of zoo keeper server locations. Each location can contain an * optional port, of the format host:port. * @param sessionTimeout * zoo keeper session time out in milliseconds. * @deprecated since 1.6.0; Use {@link #ZooKeeperInstance(ClientConfiguration)} instead. */ @Deprecated public ZooKeeperInstance(UUID instanceId, String zooKeepers, int sessionTimeout) { this(ClientConfiguration.loadDefault().withInstance(instanceId).withZkHosts(zooKeepers) .withZkTimeout(sessionTimeout)); }
ClientConfiguration clientConf = ClientConfiguration.loadDefault().withInstance(args[3]) .withZkHosts(args[4]); if (clientConf.hasSasl()) { if (!conn.securityOperations().hasSystemPermission(conn.whoami(), SystemPermission.OBTAIN_DELEGATION_TOKEN)) { log.error( + " MapReduce without distributing the user's credentials."); throw new IllegalStateException( conn.whoami() + " does not have permission to obtain a delegation token"); AccumuloInputFormat.setConnectorInfo(job, args[0], new PasswordToken(args[1])); AccumuloOutputFormat.setConnectorInfo(job, args[0], new PasswordToken(args[1]));
ClientConfiguration zkConfig = ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zookeepers); Instance instance = (instanceName != null ? new ZooKeeperInstance(zkConfig) : HdfsZooInstance.getInstance()); Connector connector = instance.getConnector(username, new PasswordToken(password)); connector.tableOperations().addSplits(tableName, splits); BatchWriter w = connector.createBatchWriter(tableName, new BatchWriterConfig().setMaxLatency(1, TimeUnit.SECONDS).setMaxMemory(100000L) .setMaxWriteThreads(4)); try {
String secTableName, systemUserName, tableUserName, secNamespaceName; ClientConfiguration clientConf = ClientConfiguration.loadDefault(); if (clientConf.hasSasl()) { throw new IllegalStateException( "Security module currently cannot support Kerberos/SASL instances"); secNamespaceName = String.format("securityNs_%s", hostname); if (conn.tableOperations().exists(secTableName)) conn.tableOperations().delete(secTableName); Set<String> users = conn.securityOperations().listLocalUsers(); if (users.contains(tableUserName)) conn.securityOperations().dropLocalUser(tableUserName); conn.securityOperations().dropLocalUser(systemUserName); PasswordToken sysUserPass = new PasswordToken("sysUser"); conn.securityOperations().createLocalUser(systemUserName, sysUserPass); WalkingSecurity.get(state, env).createUser(systemUserName, sysUserPass); WalkingSecurity.get(state, env).changePassword(tableUserName, new PasswordToken(new byte[0]));
protected void setTableIdsAndConfigs() throws IOException { ZooKeeperInstance instance = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(conf.get(INSTANCE_NAME)) .withZkHosts(conf.get(ZOOKEEPERS))); Connector connector = null; tableConfigs = new HashMap<>(); Iterable<String> localityGroupTables = Splitter.on(",").split(conf.get(CONFIGURE_LOCALITY_GROUPS, "")); try { connector = instance.getConnector(conf.get(USERNAME), new PasswordToken(Base64.decodeBase64(conf.get(PASSWORD)))); tableIds = connector.tableOperations().tableIdMap(); Set<String> compressionTableBlackList = getCompressionTableBlackList(conf); String compressionType = getCompressionType(conf); for (String tableName : tableIds.keySet()) { ConfigurationCopy tableConfig = new ConfigurationCopy(connector.tableOperations().getProperties(tableName)); tableConfig.set(Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), (compressionTableBlackList.contains(tableName) ? Compression.COMPRESSION_NONE : compressionType)); if (Iterables.contains(localityGroupTables, tableName)) { Map<String,Set<Text>> localityGroups = connector.tableOperations().getLocalityGroups(tableName);
apacheConf.setProperty("instance.name", accumuloConf.getInstanceName()); apacheConf.setProperty("instance.zookeeper.host", accumuloConf.getZookeepers()); final ClientConfiguration aconf = new ClientConfiguration(Collections.singletonList(apacheConf)); final Instance instance = new ZooKeeperInstance(aconf); connector = instance.getConnector(accumuloConf.getUsername(), new PasswordToken(accumuloConf.getPassword())); bwConfig = new BatchWriterConfig(); bwConfig.setMaxLatency(getTimeInMillis(accumuloConf.getWrite().getLatency()), TimeUnit.MILLISECONDS); defaultAgeOffMilliSec = this.getAgeOffForMetric(MetricAgeOffIterator.DEFAULT_AGEOFF_KEY); final Map<String, String> tableIdMap = connector.tableOperations().tableIdMap(); if (!tableIdMap.containsKey(metricsTable)) { try { LOG.info("Creating table " + metricsTable); connector.tableOperations().create(metricsTable); } catch (final TableExistsException ex) { try { LOG.info("Creating table " + metaTable); connector.tableOperations().create(metaTable); } catch (final TableExistsException ex) {
public static void main(String[] args) throws Exception { try (ConfigurableApplicationContext ctx = new SpringApplicationBuilder(SpringBootstrap.class) .bannerMode(Mode.OFF).web(false).run(args)) { Configuration conf = ctx.getBean(Configuration.class); final BaseConfiguration apacheConf = new BaseConfiguration(); Accumulo accumuloConf = conf.getAccumulo(); apacheConf.setProperty("instance.name", accumuloConf.getInstanceName()); apacheConf.setProperty("instance.zookeeper.host", accumuloConf.getZookeepers()); final ClientConfiguration aconf = new ClientConfiguration(Collections.singletonList(apacheConf)); final Instance instance = new ZooKeeperInstance(aconf); Connector con = instance.getConnector(accumuloConf.getUsername(), new PasswordToken(accumuloConf.getPassword())); Scanner s = con.createScanner(conf.getMetaTable(), con.securityOperations().getUserAuthorizations(con.whoami())); try { s.setRange(new Range(Meta.METRIC_PREFIX, true, Meta.TAG_PREFIX, false)); for (Entry<Key, Value> e : s) { System.out.println(e.getKey().getRow().toString().substring(Meta.METRIC_PREFIX.length())); } } finally { s.close(); } } } }
ClientConfiguration clientConf = ClientConfiguration.create().withInstance(args[3]) .withZkHosts(args[4]); if (clientConf.hasSasl()) { if (!conn.securityOperations().hasSystemPermission(conn.whoami(), SystemPermission.OBTAIN_DELEGATION_TOKEN)) { log.error( + " MapReduce without distributing the user's credentials."); throw new IllegalStateException( conn.whoami() + " does not have permission to obtain a delegation token"); token = new PasswordToken(args[1]);
public void init(Properties scaleProps, Properties testProps, int numTabletServers) throws AccumuloException, AccumuloSecurityException { this.scaleProps = scaleProps; this.testProps = testProps; this.numTabletServers = numTabletServers; // get properties to create connector String instanceName = this.scaleProps.getProperty("INSTANCE_NAME"); String zookeepers = this.scaleProps.getProperty("ZOOKEEPERS"); String user = this.scaleProps.getProperty("USER"); String password = this.scaleProps.getProperty("PASSWORD"); System.out.println(password); conn = new ZooKeeperInstance( ClientConfiguration.create().withInstance(instanceName).withZkHosts(zookeepers)) .getConnector(user, new PasswordToken(password)); }
/** * */ public AccumuloConnection(ConnectionProperties conn) throws AccumuloException,AccumuloSecurityException { this.conn = conn; ClientConfiguration cconfig = new ClientConfiguration().withInstance(conn.getInstanceName()).withZkHosts(conn.getHost()).withZkTimeout(conn.getSessionTimeOut()); this.instance = new ZooKeeperInstance(cconfig); principal = conn.getUser(); token = new PasswordToken(conn.getPass()); //principal = username = this.conn.getUser() //System.out.println("about to make connector: user="+this.conn.getUser()+" password="+ new String(this.passwordToken.getPassword())); this.connector = this.instance.getConnector(this.conn.getUser(), token); //System.out.println("made connector"); String [] sAuth = conn.getAuthorizations(); if (sAuth != null && sAuth.length > 0) { this.auth = new Authorizations(sAuth); } else { this.auth= Authorizations.EMPTY; } if (log.isDebugEnabled()) log.debug("!!!WHOAMI="+this.connector.whoami()); }
/** * Configures a {@link ZooKeeperInstance} for this job. * * @param job * the Hadoop job instance to be configured * @param instanceName * the Accumulo instance name * @param zooKeepers * a comma-separated list of zookeeper servers * @since 1.5.0 * @deprecated since 1.6.0; Use {@link #setZooKeeperInstance(Job, ClientConfiguration)} instead. */ @Deprecated public static void setZooKeeperInstance(Job job, String instanceName, String zooKeepers) { setZooKeeperInstance(job, new ClientConfiguration().withInstance(instanceName).withZkHosts(zooKeepers)); }
Connector conn = getConnector(); String table = getUniqueNames(1)[0]; conn.tableOperations().create(table); insertData(table, currentTimeMillis()); clientConf.setProperty(prop, clusterClientConf.get(prop.getKey())); for (int i = 0; i < 10000; i += 1000) splitsToAdd.add(new Text(String.format("%09d", i))); conn.tableOperations().addSplits(table, splitsToAdd); sleepUninterruptibly(500, TimeUnit.MILLISECONDS); // wait for splits to be propagated Collection<Text> actualSplits = conn.tableOperations().listSplits(table); List<InputSplit> splits = inputFormat.getSplits(job); assertEquals(actualSplits.size() + 1, splits.size()); // No ranges set on the job so it'll start
Instance instance = new ZooKeeperInstance(ClientConfiguration.loadDefault().withInstance(instanceName).withZkHosts(zooKeepers)); TableOperations tops = instance.getConnector(credentials.getPrincipal(), credentials.getToken()).tableOperations(); Map<String,String> tableIds = Tables.getNameToIdMap(instance); FileStatus[] tableDirs = fs.globStatus(new Path(mapFilesDir, "*"));