public String get(String k, String defaultString) { if (!containsKey(k)) { return defaultString; } return get(k); }
public List<String> getResourceNames() { Config subConfig = config.subset(RESOURCE_PREFIX, true); List<String> resourceNames = new ArrayList<String>(); for (String key : subConfig.keySet()) { if (key.endsWith(PATH_SUFFIX)) { resourceNames.add(key.substring(0, key.length() - PATH_SUFFIX.length())); } } return resourceNames; }
private void logAllSettings(Config config) { StringBuilder b = new StringBuilder(); b.append("Elasticsearch System settings: "); b.append("\n"); for (Map.Entry<String, String> entry : config.entrySet()) { b.append('\t'); b.append(entry.getKey()); b.append(" = "); b.append(entry.getValue()); b.append("\n"); } LOGGER.info(b.toString()); } }
public List<String> getList(String k) { if (!containsKey(k)) throw new ConfigException("Missing key " + k + "."); return getList(k, null); }
public static Options options(Config storeConfig, int numTasksForContainer) { Options options = new Options(); Long writeBufSize = storeConfig.getLong("container.write.buffer.size.bytes", 32 * 1024 * 1024); String compressionInConfig = storeConfig.get(ROCKSDB_COMPRESSION, "snappy"); switch (compressionInConfig) { case "snappy": int blockSize = storeConfig.getInt(ROCKSDB_BLOCK_SIZE_BYTES, 4096); BlockBasedTableConfig tableOptions = new BlockBasedTableConfig(); tableOptions.setBlockCacheSize(blockCacheSize).setBlockSize(blockSize); String compactionStyleInConfig = storeConfig.get(ROCKSDB_COMPACTION_STYLE, "universal"); switch (compactionStyleInConfig) { case "universal": options.setMaxWriteBufferNumber(storeConfig.getInt(ROCKSDB_NUM_WRITE_BUFFERS, 3)); options.setCreateIfMissing(true); options.setErrorIfExists(false); options.setMaxLogFileSize(storeConfig.getLong(ROCKSDB_MAX_LOG_FILE_SIZE_BYTES, 64 * 1024 * 1024L)); options.setKeepLogFileNum(storeConfig.getLong(ROCKSDB_KEEP_LOG_FILE_NUM, 2));
private void validateIntermediateStreamConfigure(String streamId, String physicalName, Config config) { Config intStreamConfig = config.subset(String.format("streams.%s.", streamId), true); assertEquals("intermediate-system", intStreamConfig.get("samza.system")); assertEquals(String.valueOf(Integer.MAX_VALUE), intStreamConfig.get("samza.priority")); assertEquals("true", intStreamConfig.get("samza.delete.committed.messages")); assertEquals(physicalName, intStreamConfig.get("samza.physical.name")); assertEquals("true", intStreamConfig.get("samza.intermediate")); assertEquals("oldest", intStreamConfig.get("samza.offset.default")); }
public ConfigManager(Config config) { //get rm address and port if (!config.containsKey(rmAddressOpt) || !config.containsKey(rmPortOpt)) { throw new IllegalArgumentException("Missing config: the config file does not contain the rm host or port."); } String rmAddress = config.get(rmAddressOpt); int rmPort = config.getInt(rmPortOpt); //get job name and id; if (!config.containsKey(JobConfig.JOB_NAME())) { throw new IllegalArgumentException("Missing config: the config does not contain the job name"); } jobName = config.get(JobConfig.JOB_NAME()); jobID = config.getInt(JobConfig.JOB_ID(), 1); //set polling interval if (config.containsKey(pollingIntervalOpt)) { long pollingInterval = config.getLong(pollingIntervalOpt); if (pollingInterval <= 0) { throw new IllegalArgumentException("polling interval cannot be a negative value"); } this.interval = pollingInterval; } else { this.interval = defaultPollingInterval; } this.config = config; this.coordinatorStreamConsumer = new CoordinatorStreamSystemConsumer(config, new MetricsRegistryMap()); this.yarnUtil = new YarnUtil(rmAddress, rmPort); }
private Config fetchSystemConfigs(String systemName) { return config.subset(systemName + "."); } }
public ConfigBasedIOResolver(Config config) { this.config = config; String metadataTopicPrefix = config.get(CFG_METADATA_TOPIC_PREFIX, DEFAULT_METADATA_TOPIC_PREFIX); this.changeLogStorePrefix = metadataTopicPrefix + (metadataTopicPrefix.isEmpty() ? "" : "_"); }
@Override public void init(Context context) throws Exception { Config config = context.getJobContext().getConfig(); this.expectedMessageCount = config.getInt("app.messageCount"); this.outputTopic = config.get("app.outputTopic", "output"); this.outputSystem = config.get("app.outputSystem", "test-system"); }
/** * * @param config contains all the configuration that are defined for the monitors. * @return a unique collection of monitor names for which configuration has been defined in the config object */ private static Set<String> getMonitorNames(Config config) { Set<String> monitorNames = new HashSet<>(); for (String configKey : config.keySet()) { String[] configKeyComponents = StringUtils.split(configKey, MONITOR_CONFIG_KEY_SEPARATOR); Preconditions.checkState(configKeyComponents.length != 0); String monitorName = configKeyComponents[0]; monitorNames.add(monitorName); } return monitorNames; }
@Override public DiskQuotaPolicy create(Config config) { final int entryCount = config.getInt(POLICY_COUNT_KEY, 0); if (entryCount == 0) { log.info("Using a no throttling disk quota policy because policy entry count was missing or set to zero ({})", POLICY_COUNT_KEY); return new NoThrottlingDiskQuotaPolicy(); } final List<WatermarkDiskQuotaPolicy.Entry> entries = new ArrayList<WatermarkDiskQuotaPolicy.Entry>(); for (int i = 0; i < entryCount; ++i) { final double lowWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.lowWaterMark", i)); final double highWaterMark = config.getDouble(String.format("container.disk.quota.policy.%d.highWaterMark", i)); final double workFactor = config.getDouble(String.format("container.disk.quota.policy.%d.workFactor", i)); entries.add(new WatermarkDiskQuotaPolicy.Entry(lowWaterMark, highWaterMark, workFactor)); } return new WatermarkDiskQuotaPolicy(entries); } }
@Override public JobModel getJobModel() { SystemAdmins systemAdmins = new SystemAdmins(config); StreamMetadataCache streamMetadataCache = new StreamMetadataCache(systemAdmins, 5000, SystemClock.instance()); systemAdmins.start(); try { String containerId = Integer.toString(config.getInt(JobConfig.PROCESSOR_ID())); GrouperMetadata grouperMetadata = new GrouperMetadataImpl(ImmutableMap.of(String.valueOf(containerId), locationId), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); return JobModelManager.readJobModel(this.config, Collections.emptyMap(), streamMetadataCache, grouperMetadata); } finally { systemAdmins.stop(); } }
private Map<String, Serde> validateAndGetDeserializedSerdes(Config jobConfig, int numSerdes) { Config serializers = jobConfig.subset("serializers.registry.", true); // make sure that the serializers deserialize correctly SerializableSerde<Serde> serializableSerde = new SerializableSerde<>(); assertEquals(numSerdes, serializers.size()); return serializers.entrySet().stream().collect(Collectors.toMap( e -> e.getKey().replace(SerializerConfig.SERIALIZED_INSTANCE_SUFFIX(), ""), e -> serializableSerde.fromBytes(Base64.getDecoder().decode(e.getValue().getBytes())) )); }
public static <T> T initializePlugin(String pluginName, String plugin, Config staticConfig, String pluginDomainFormat, BiFunction<Object, Config, T> factoryInvoker) { String pluginDomain = String.format(pluginDomainFormat, plugin); Config pluginConfig = staticConfig.subset(pluginDomain); String factoryName = pluginConfig.getOrDefault(CFG_FACTORY, ""); Validate.notEmpty(factoryName, String.format("Factory is not set for %s", plugin)); Object factory = ReflectionUtils.createInstance(factoryName); Validate.notNull(factory, String.format("Factory creation failed for %s", plugin)); LOG.info("Instantiating {} using factory {} with props {}", pluginName, factoryName, pluginConfig); return factoryInvoker.apply(factory, pluginConfig); }
protected Config rewrite(Config config, Map<String, String> env) { Map<String, String> envConfig = new HashMap<>(); for (Map.Entry<String, String> entry: env.entrySet()) { if (entry.getKey().startsWith(SAMZA_KEY_PREFIX)) { String key = renameKey(entry.getKey()); String value = entry.getValue(); if (config.containsKey(key)) { LOGGER.info(String.format("Overriding property from environment: %s=%s", key, value)); } else { LOGGER.info(String.format("Setting property from environment: %s=%s", key, value)); } envConfig.put(key, value); } } for (Map.Entry<String, String> entry: config.entrySet()) { String key = entry.getKey(); if (!envConfig.containsKey(key) && envConfig.containsKey(key.toLowerCase())) { throw new IllegalArgumentException(String.format( "Can't override property from environment with mixed case: %s", key)); } } return new MapConfig(Arrays.asList(config, envConfig)); }
public TestAvroSystemConsumer(String systemName, Config config) { numMessages = config.getInt(String.format("systems.%s.%s", systemName, CFG_NUM_MESSAGES), DEFAULT_NUM_EVENTS); includeNullForeignKeys = config.getBoolean(String.format("systems.%s.%s", systemName, CFG_INCLUDE_NULL_FOREIGN_KEYS), false); includeNullSimpleRecords = config.getBoolean(String.format("systems.%s.%s", systemName, CFG_INCLUDE_NULL_SIMPLE_RECORDS), false); sleepBetweenPollsMs = config.getLong(String.format("systems.%s.%s", systemName, CFG_SLEEP_BETWEEN_POLLS_MS), 0); }
@Override public List<? extends Object> getResourceInstances(Config config) { List<Object> resources = new ArrayList<>(); if (config.containsKey(JobsResourceConfig.CONFIG_JOB_PROXY_FACTORY)) { resources.add(new JobsResource(new JobsResourceConfig(config))); } if (config.containsKey(TaskResourceConfig.CONFIG_TASK_PROXY_FACTORY)) { resources.add(new TasksResource(new TaskResourceConfig(config))); } return resources; } }
public static Long getBlockCacheSize(Config storeConfig, int numTasksForContainer) { long cacheSize = storeConfig.getLong("container.cache.size.bytes", 100 * 1024 * 1024L); return cacheSize / numTasksForContainer; } }