public static void setConfigInteger(String key, int value) { config.setProperty(key, value); }
public static void setConfigBoolean(String key, boolean value) { config.setProperty(key, value); }
public static void setConfigString(String key, String value) { config.setProperty(key, value); }
public static void addColumnMinMaxValueInfo(PropertiesConfiguration properties, String column, String minValue, String maxValue) { properties.setProperty(getKeyFor(column, MIN_VALUE), minValue); properties.setProperty(getKeyFor(column, MAX_VALUE), maxValue); }
private void createMetadataFile(File currentDir, File v3Dir) throws ConfigurationException { File v2MetadataFile = new File(currentDir, V1Constants.MetadataKeys.METADATA_FILE_NAME); File v3MetadataFile = new File(v3Dir, V1Constants.MetadataKeys.METADATA_FILE_NAME); final PropertiesConfiguration properties = new PropertiesConfiguration(v2MetadataFile); // update the segment version properties.setProperty(V1Constants.MetadataKeys.Segment.SEGMENT_VERSION, SegmentVersion.v3.toString()); properties.save(v3MetadataFile); }
/** * Helper method to update the metadata.properties for the converted segment. * * @param segmentDir Segment directory * @param columns Converted columns * @param tableName New table name to be written in the meta-data. Skipped if null. * @throws IOException * @throws ConfigurationException */ private void updateMetadata(File segmentDir, String[] columns, String tableName) throws IOException, ConfigurationException { File metadataFile = new File(segmentDir, V1Constants.MetadataKeys.METADATA_FILE_NAME); PropertiesConfiguration properties = new PropertiesConfiguration(metadataFile); if (tableName != null) { properties.setProperty(V1Constants.MetadataKeys.Segment.TABLE_NAME, tableName); } for (String column : columns) { properties.setProperty( V1Constants.MetadataKeys.Column.getKeyFor(column, V1Constants.MetadataKeys.Column.HAS_DICTIONARY), false); properties.setProperty( V1Constants.MetadataKeys.Column.getKeyFor(column, V1Constants.MetadataKeys.Column.BITS_PER_ELEMENT), -1); } properties.save(); }
import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.InputStreamReader; import org.apache.commons.configuration.ConfigurationException; import org.apache.commons.configuration.PropertiesConfiguration; import org.apache.commons.configuration.PropertiesConfigurationLayout; public class PropertiesReader { public static void main(String args[]) throws ConfigurationException, FileNotFoundException { File file = new File(args[0] + ".properties"); PropertiesConfiguration config = new PropertiesConfiguration(); PropertiesConfigurationLayout layout = new PropertiesConfigurationLayout(config); layout.load(new InputStreamReader(new FileInputStream(file))); config.setProperty("test", "testValue"); layout.save(propsFile.newWriter()); } }
@Override protected SecorConfig initialValue() { // Load the default configuration file first Properties systemProperties = System.getProperties(); String configProperty = systemProperties.getProperty("config"); PropertiesConfiguration properties; try { properties = new PropertiesConfiguration(configProperty); } catch (ConfigurationException e) { throw new RuntimeException("Error loading configuration from " + configProperty, e); } for (final Map.Entry<Object, Object> entry : systemProperties.entrySet()) { properties.setProperty(entry.getKey().toString(), entry.getValue()); } if (LOG.isDebugEnabled()) { LOG.debug("Configuration: {}", ConfigurationUtils.toString(properties)); } return new SecorConfig(properties); } };
_segmentProperties.setProperty(V1Constants.MetadataKeys.Segment.DIMENSIONS, dimensionColumns); _segmentProperties.setProperty(V1Constants.MetadataKeys.Segment.METRICS, metricColumns);
String s = configuration.getString(k); Preconditions.checkArgument(StringUtils.isNotBlank(s),"Invalid Configuration: key %s has null empty value",k); configuration.setProperty(k,getAbsolutePath(configParent,s));
defaultConfiguration.setProperty(NinjaConstant.applicationSecret, secret);
@Test public void testPartitionsConfig() { PropertiesConfiguration config = new PropertiesConfiguration(); config.setProperty("partitions", PARTITIONS); Set<Integer> actual = ColumnPartitionMetadata.extractPartitions(config.getList("partitions")); assertEquals(actual, PARTITIONS); }
@Test public void testLegacyPartitionRangesConfig() { PropertiesConfiguration config = new PropertiesConfiguration(); config.setProperty("partitionRanges", LEGACY_PARTITION_RANGES_STRING); Set<Integer> actual = ColumnPartitionMetadata.extractPartitions(config.getList("partitionRanges")); assertEquals(actual, PARTITIONS); } }
conf.setProperty(ResourceManager.QUERY_WORKER_CONFIG_KEY, 60); conf.setProperty(ResourceManager.QUERY_RUNNER_CONFIG_KEY, 20); conf.setProperty(ResourceLimitPolicy.THREADS_PER_QUERY_PCT, 50); conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 60); conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_SOFT_LIMIT, 40); conf.setProperty(MultiLevelPriorityQueue.MAX_PENDING_PER_GROUP_KEY, 10);
_convertedProperties.setProperty( V1Constants.MetadataKeys.Column.getKeyFor(columnName, V1Constants.MetadataKeys.Column.HAS_DICTIONARY), false); _convertedProperties.setProperty( V1Constants.MetadataKeys.Column.getKeyFor(columnName, V1Constants.MetadataKeys.Column.BITS_PER_ELEMENT), BITS_PER_ELEMENT_FOR_RAW_INDEX);
protected void verify(String zookeeperPath, String expectedOffsetPath) { ZookeeperConnector zookeeperConnector = new ZookeeperConnector(); PropertiesConfiguration properties = new PropertiesConfiguration(); properties.setProperty("kafka.zookeeper.path", zookeeperPath); properties.setProperty("secor.kafka.group", "secor_cg"); SecorConfig secorConfig = new SecorConfig(properties); zookeeperConnector.setConfig(secorConfig); Assert.assertEquals(expectedOffsetPath, zookeeperConnector.getCommittedOffsetGroupPath()); } }
@Test(enabled = false) public void testOutOfCapacityResponse() throws Exception { PropertiesConfiguration conf = new PropertiesConfiguration(); conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 5); conf.setProperty(MultiLevelPriorityQueue.MAX_PENDING_PER_GROUP_KEY, 1); TestPriorityScheduler scheduler = TestPriorityScheduler.create(conf); scheduler.start(); List<ListenableFuture<byte[]>> results = new ArrayList<>(); results.add(scheduler.submit(createServerQueryRequest("1", metrics))); TestSchedulerGroup group = TestPriorityScheduler.groupFactory.groupMap.get("1"); group.addReservedThreads(10); group.addLast(createQueryRequest("1", metrics)); results.add(scheduler.submit(createServerQueryRequest("1", metrics))); DataTable dataTable = DataTableFactory.getDataTable(results.get(1).get()); assertTrue(dataTable.getMetadata() .containsKey(DataTable.EXCEPTION_METADATA_KEY + QueryException.SERVER_OUT_OF_CAPACITY_ERROR.getErrorCode())); scheduler.stop(); }
throws InterruptedException, ExecutionException, IOException, BrokenBarrierException { PropertiesConfiguration conf = new PropertiesConfiguration(); conf.setProperty(ResourceLimitPolicy.THREADS_PER_QUERY_PCT, 50); conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 40); conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_SOFT_LIMIT, 20); useBarrier = true; startupBarrier = new CyclicBarrier(2);
@Test public void testStartStopQueries() throws ExecutionException, InterruptedException, IOException { TestPriorityScheduler scheduler = TestPriorityScheduler.create(); scheduler.start(); PropertiesConfiguration conf = new PropertiesConfiguration(); conf.setProperty(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 5); conf.setProperty(MultiLevelPriorityQueue.MAX_PENDING_PER_GROUP_KEY, 5); List<ListenableFuture<byte[]>> results = new ArrayList<>(); results.add(scheduler.submit(createServerQueryRequest("1", metrics))); TestSchedulerGroup group = TestPriorityScheduler.groupFactory.groupMap.get("1"); group.addReservedThreads(10); group.addLast(createQueryRequest("1", metrics)); results.add(scheduler.submit(createServerQueryRequest("1", metrics))); scheduler.stop(); long queueWakeTimeMicros = ((MultiLevelPriorityQueue) scheduler.getQueue()).getWakeupTimeMicros(); long sleepTimeMs = queueWakeTimeMicros >= 1000 ? queueWakeTimeMicros / 1000 + 10 : 10; Thread.sleep(sleepTimeMs); int hasServerShuttingDownError = 0; for (ListenableFuture<byte[]> result : results) { DataTable table = DataTableFactory.getDataTable(result.get()); hasServerShuttingDownError += table.getMetadata() .containsKey(DataTable.EXCEPTION_METADATA_KEY + QueryException.SERVER_SCHEDULER_DOWN_ERROR.getErrorCode()) ? 1 : 0; } assertTrue(hasServerShuttingDownError > 0); }
@Test public void testPutOutOfCapacity() throws OutOfCapacityException { PropertiesConfiguration conf = new PropertiesConfiguration(); conf.setProperty(MultiLevelPriorityQueue.MAX_PENDING_PER_GROUP_KEY, 2); ResourceManager rm = new UnboundedResourceManager(conf); MultiLevelPriorityQueue queue = createQueue(conf, rm); queue.put(createQueryRequest(groupOne, metrics)); groupFactory.groupMap.get(groupOne).addReservedThreads(rm.getTableThreadsHardLimit()); // we should still be able to add one more waiting query queue.put(createQueryRequest(groupOne, metrics)); // this assert is to test that above call to put() is not the one // throwing exception assertTrue(true); // it should throw now try { queue.put(createQueryRequest(groupOne, metrics)); } catch (OutOfCapacityException e) { assertTrue(true); return; } assertTrue(false); }