@Override public Iterator<Map.Entry<String, String>> iterator() { return encapsulated.iterator(); }
@Override public Iterator<Map.Entry<String, String>> iterator() { return encapsulated.iterator(); }
@Override public Iterator<Map.Entry<String, String>> iterator() { return conf.iterator(); }
@Override public Iterator<Map.Entry<String,String>> iterator() { return c.iterator(); }
public static Properties getProperties(Configuration conf) { Iterator<Map.Entry<String, String>> iter = conf.iterator(); Properties p = new Properties(); while (iter.hasNext()) { Map.Entry<String, String> e = iter.next(); p.setProperty(e.getKey(), e.getValue()); } return p; }
/** * Put all configuration properties in a given {@link Configuration} object into a given * {@link Properties} object. * * @param configuration the given {@link Configuration} object * @param properties the given {@link Properties} object */ public static void putConfigurationIntoProperties(Configuration configuration, Properties properties) { for (Iterator<Entry<String, String>> it = configuration.iterator(); it.hasNext();) { Entry<String, String> entry = it.next(); properties.put(entry.getKey(), entry.getValue()); } }
private Configuration prepareConf(String providerName) { Configuration newConf = new Configuration(); Iterator<Map.Entry<String, String>> entries = conf.iterator(); String providerKey = MAPPING_PROVIDER_CONFIG_PREFIX + "." + providerName; while (entries.hasNext()) { Map.Entry<String, String> entry = entries.next(); String key = entry.getKey(); // get a property like "hadoop.security.group.mapping.provider.PROVIDER-X.ldap.url" if (key.startsWith(providerKey) && !key.equals(providerKey)) { // restore to be the one like "hadoop.security.group.mapping.ldap.url" // so that can be used by original provider. key = key.replace(".provider." + providerName, ""); newConf.set(key, entry.getValue()); } } return newConf; } }
static ColumnFileMetaData filterMetadata(final Configuration configuration) { final ColumnFileMetaData meta = new ColumnFileMetaData(); Iterator<Entry<String, String>> keyIterator = configuration.iterator(); while (keyIterator.hasNext()) { Entry<String, String> confEntry = keyIterator.next(); if (confEntry.getKey().startsWith(META_PREFIX)) meta.put(confEntry.getKey().substring(META_PREFIX.length()), confEntry .getValue().getBytes(MetaData.UTF8)); } return meta; } }
public static Properties extractKafkaConfigToProperties(Configuration configuration) { Set<String> configNames = new HashSet<String>(); try { configNames = ConsumerConfig.configNames(); } catch (Exception e) { // the Kafka configNames api is supported on 0.10.1.0+, in case NoSuchMethodException which is an Error, not Exception String[] configNamesArray = ("metric.reporters, metadata.max.age.ms, partition.assignment.strategy, reconnect.backoff.ms," + "sasl.kerberos.ticket.renew.window.factor, max.partition.fetch.bytes, bootstrap.servers, ssl.keystore.type," + " enable.auto.commit, sasl.mechanism, interceptor.classes, exclude.internal.topics, ssl.truststore.password," + " client.id, ssl.endpoint.identification.algorithm, max.poll.records, check.crcs, request.timeout.ms, heartbeat.interval.ms," + " auto.commit.interval.ms, receive.buffer.bytes, ssl.truststore.type, ssl.truststore.location, ssl.keystore.password, fetch.min.bytes," + " fetch.max.bytes, send.buffer.bytes, max.poll.interval.ms, value.deserializer, group.id, retry.backoff.ms," + " ssl.secure.random.implementation, sasl.kerberos.kinit.cmd, sasl.kerberos.service.name, sasl.kerberos.ticket.renew.jitter, ssl.trustmanager.algorithm, ssl.key.password, fetch.max.wait.ms, sasl.kerberos.min.time.before.relogin, connections.max.idle.ms, session.timeout.ms, metrics.num.samples, key.deserializer, ssl.protocol, ssl.provider, ssl.enabled.protocols, ssl.keystore.location, ssl.cipher.suites, security.protocol, ssl.keymanager.algorithm, metrics.sample.window.ms, auto.offset.reset").split(","); configNames.addAll(Arrays.asList(configNamesArray)); } Properties result = new Properties(); for (Iterator<Map.Entry<String, String>> it = configuration.iterator(); it.hasNext();) { Map.Entry<String, String> entry = it.next(); String key = entry.getKey(); String value = entry.getValue(); if (configNames.contains(key)) { result.put(key, value); } } return result; }
@Override public Iterator<Map.Entry<String, String>> iterator() { Map<String, String> ret = new HashMap<>(); // add in reverse order so that oldest get overridden. if (!configs.isEmpty()) { for (int i = configs.size() - 1; i >= 0; i--) { ImmutableConfigMap map = configs.get(i); Iterator<Map.Entry<String, String>> iter = map.iterator(); while (iter.hasNext()) { Map.Entry<String, String> entry = iter.next(); ret.put(entry.getKey(), entry.getValue()); } } } // add mutations to this CompoundConfiguration last. if (mutableConf != null) { Iterator<Map.Entry<String, String>> miter = mutableConf.iterator(); while (miter.hasNext()) { Map.Entry<String, String> entry = miter.next(); ret.put(entry.getKey(), entry.getValue()); } } return UnmodifiableIterator.unmodifiableIterator(ret.entrySet().iterator()); }
public static void dumpConfig(Configuration originalConf, StringBuilder sb) { Set<String> hiddenSet = getHiddenSet(originalConf); sb.append("Values omitted for security reason if present: ").append(hiddenSet).append("\n"); Configuration conf = new Configuration(originalConf); stripConfigurations(conf, hiddenSet); Iterator<Map.Entry<String, String>> configIter = conf.iterator(); List<Map.Entry<String, String>> configVals = new ArrayList<>(); while(configIter.hasNext()) { configVals.add(configIter.next()); } Collections.sort(configVals, new Comparator<Map.Entry<String, String>>() { @Override public int compare(Map.Entry<String, String> ent, Map.Entry<String, String> ent2) { return ent.getKey().compareTo(ent2.getKey()); } }); for(Map.Entry<String, String> entry : configVals) { //use get() to make sure variable substitution works if(entry.getKey().toLowerCase().contains("path")) { StringTokenizer st = new StringTokenizer(conf.get(entry.getKey()), File.pathSeparator); sb.append(entry.getKey()).append("=\n"); while(st.hasMoreTokens()) { sb.append(" ").append(st.nextToken()).append(File.pathSeparator).append('\n'); } } else { sb.append(entry.getKey()).append('=').append(conf.get(entry.getKey())).append('\n'); } } }
private static void populateFromMasterConfig(Configuration masterConf, Configuration conf) { Iterator<Entry<String, String>> it = masterConf.iterator(); while (it.hasNext()) { Entry<String, String> e = it.next(); conf.set(e.getKey(), e.getValue()); } }
/** * addJobConfToEnvironment is mostly shamelessly copied from hadoop streaming. Added additional * check on environment variable length */ void addJobConfToEnvironment(Configuration conf, Map<String, String> env) { Iterator<Map.Entry<String, String>> it = conf.iterator(); while (it.hasNext()) { Map.Entry<String, String> en = it.next(); String name = en.getKey(); if (!blackListed(conf, name)) { // String value = (String)en.getValue(); // does not apply variable // expansion String value = conf.get(name); // does variable expansion name = safeEnvVarName(name); boolean truncate = conf .getBoolean(HiveConf.ConfVars.HIVESCRIPTTRUNCATEENV.toString(), false); value = safeEnvVarValue(value, name, truncate); env.put(name, value); } } }
/** * addJobConfToEnvironment is mostly shamelessly copied from hadoop streaming. Added additional * check on environment variable length */ void addJobConfToEnvironment(Configuration conf, Map<String, String> env) { Iterator<Map.Entry<String, String>> it = conf.iterator(); while (it.hasNext()) { Map.Entry<String, String> en = it.next(); String name = en.getKey(); if (!blackListed(conf, name)) { // String value = (String)en.getValue(); // does not apply variable // expansion String value = conf.get(name); // does variable expansion name = safeEnvVarName(name); boolean truncate = conf .getBoolean(HiveConf.ConfVars.HIVESCRIPTTRUNCATEENV.toString(), false); value = safeEnvVarValue(value, name, truncate); env.put(name, value); } } }
public static void printConfiguration(Configuration config) { if (Boolean.getBoolean("dev")) { for (Iterator<Entry<String, String>> iterator = config.iterator(); iterator.hasNext(); ) { Entry<String, String> entry = iterator.next(); System.out.println(entry.getKey() + "=" + entry.getValue()); } } }
public ReadOnlyProps getProps() { // Ensure that HBase RPC time out value is at least as large as our thread time out for query. int threadTimeOutMS = config.getInt(THREAD_TIMEOUT_MS_ATTRIB, DEFAULT_THREAD_TIMEOUT_MS); int hbaseRPCTimeOut = config.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); if (threadTimeOutMS > hbaseRPCTimeOut) { config.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, threadTimeOutMS); } return new ReadOnlyProps(config.iterator()); }
public ReadOnlyProps getProps(ReadOnlyProps defaultProps) { return new ReadOnlyProps(defaultProps, config.iterator()); }
public static Properties combineProperties(Properties props, final Configuration conf, Set<String> withoutTheseProps) { Iterator<Map.Entry<String, String>> iterator = conf.iterator(); Properties copy = deepCopy(props); if (iterator != null) { while (iterator.hasNext()) { Map.Entry<String, String> entry = iterator.next(); // set the property from config only if props doesn't have it already if (copy.getProperty(entry.getKey()) == null && !withoutTheseProps.contains(entry.getKey())) { copy.setProperty(entry.getKey(), entry.getValue()); } } } return copy; }
public static org.apache.commons.configuration.Configuration makeApacheConfiguration(final Configuration hadoopConfiguration) { final BaseConfiguration apacheConfiguration = new BaseConfiguration(); apacheConfiguration.setDelimiterParsingDisabled(true); hadoopConfiguration.iterator().forEachRemaining(e -> apacheConfiguration.setProperty(e.getKey(), e.getValue())); return apacheConfiguration; }
public BuildIndexScheduleTask(RegionCoprocessorEnvironment env, List<String> onlyTheseTables) { this.onlyTheseTables = onlyTheseTables == null ? null : ImmutableList.copyOf(onlyTheseTables); this.env = env; Configuration configuration = env.getConfiguration(); this.rebuildIndexBatchSize = configuration.getLong( QueryServices.INDEX_FAILURE_HANDLING_REBUILD_PERIOD, HConstants.LATEST_TIMESTAMP); this.configuredBatches = configuration.getLong( QueryServices.INDEX_FAILURE_HANDLING_REBUILD_NUMBER_OF_BATCHES_PER_TABLE, 10); this.indexDisableTimestampThreshold = configuration.getLong(QueryServices.INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD, QueryServicesOptions.DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD); this.pendingDisableThreshold = configuration.getLong(QueryServices.INDEX_PENDING_DISABLE_THRESHOLD, QueryServicesOptions.DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD); this.props = new ReadOnlyProps(env.getConfiguration().iterator()); }