.addClusteringColumn("d", DataType.cint()) .withOptions() .compactStorage());
@Test(groups = "unit") public void should_create_table_with_compact_storage() throws Exception { // When SchemaStatement statement = createTable("test") .addPartitionKey("id", DataType.bigint()) .addClusteringColumn("col1", DataType.uuid()) .addClusteringColumn("col2", DataType.uuid()) .addColumn("name", DataType.text()) .withOptions() .compactStorage(); // Then assertThat(statement.getQueryString()) .isEqualTo( "\n\tCREATE TABLE test(\n\t\t" + "id bigint,\n\t\t" + "col1 uuid,\n\t\t" + "col2 uuid,\n\t\t" + "name text,\n\t\t" + "PRIMARY KEY(id, col1, col2))\n\t" + "WITH COMPACT STORAGE"); }
@Test( groups = "unit", expectedExceptions = IllegalStateException.class, expectedExceptionsMessageRegExp = "Cannot create table 'test' with compact storage and static columns '\\[stat\\]'") public void should_fail_creating_table_with_static_columns_and_compact_storage() throws Exception { createTable("test") .addPartitionKey("pk", DataType.bigint()) .addClusteringColumn("cluster", DataType.uuid()) .addStaticColumn("stat", DataType.text()) .withOptions() .compactStorage() .getQueryString(); } }
.clusteringOrder("col1", Direction.ASC) .clusteringOrder("col2", Direction.DESC) .compactStorage() .bloomFilterFPChance(0.01) .caching(Caching.ROWS_ONLY)
.clusteringOrder(RANGE_TOKEN_COLUMN, SchemaBuilder.Direction.ASC) .clusteringOrder(IS_START_TOKEN_COLUMN, SchemaBuilder.Direction.ASC) .compactStorage() .defaultTimeToLive(TTL));
.clusteringOrder(RANGE_TOKEN_COLUMN, SchemaBuilder.Direction.ASC) .clusteringOrder(IS_START_TOKEN_COLUMN, SchemaBuilder.Direction.ASC) .compactStorage() .defaultTimeToLive(TTL));
.caching(SchemaBuilder.Caching.KEYS_ONLY) .compactionOptions(getCompaction(appendHeavyReadLight)) .compactStorage() .compressionOptions(getCompression(tableMetadata.getExplicitCompressionBlockSizeKB())) .dcLocalReadRepairChance(0.1)
@Test(groups = "unit") public void should_create_table_with_compact_storage() throws Exception { // When SchemaStatement statement = createTable("test") .addPartitionKey("id", DataType.bigint()) .addClusteringColumn("col1", DataType.uuid()) .addClusteringColumn("col2", DataType.uuid()) .addColumn("name", DataType.text()) .withOptions() .compactStorage(); // Then assertThat(statement.getQueryString()) .isEqualTo( "\n\tCREATE TABLE test(\n\t\t" + "id bigint,\n\t\t" + "col1 uuid,\n\t\t" + "col2 uuid,\n\t\t" + "name text,\n\t\t" + "PRIMARY KEY(id, col1, col2))\n\t" + "WITH COMPACT STORAGE"); }
private static void initializeTable(final Session session, final String keyspaceName, final String tableName, final Configuration configuration, final boolean allowCompactStorage) { final Options createTable = createTable(keyspaceName, tableName) .ifNotExists() .addPartitionKey(KEY_COLUMN_NAME, DataType.blob()) .addClusteringColumn(COLUMN_COLUMN_NAME, DataType.blob()) .addColumn(VALUE_COLUMN_NAME, DataType.blob()) .withOptions() .compressionOptions(compressionOptions(configuration)) .compactionOptions(compactionOptions(configuration)); // COMPACT STORAGE is allowed on Cassandra 2 or earlier // when COMPACT STORAGE is allowed, the default is to enable it final boolean useCompactStorage = (allowCompactStorage && configuration.has(CF_COMPACT_STORAGE)) ? configuration.get(CF_COMPACT_STORAGE) : allowCompactStorage; session.execute(useCompactStorage ? createTable.compactStorage() : createTable); }
@Test( groups = "unit", expectedExceptions = IllegalStateException.class, expectedExceptionsMessageRegExp = "Cannot create table 'test' with compact storage and static columns '\\[stat\\]'") public void should_fail_creating_table_with_static_columns_and_compact_storage() throws Exception { createTable("test") .addPartitionKey("pk", DataType.bigint()) .addClusteringColumn("cluster", DataType.uuid()) .addStaticColumn("stat", DataType.text()) .withOptions() .compactStorage() .getQueryString(); } }
.clusteringOrder("col1", Direction.ASC) .clusteringOrder("col2", Direction.DESC) .compactStorage() .bloomFilterFPChance(0.01) .caching(Caching.ROWS_ONLY)
.caching(SchemaBuilder.Caching.KEYS_ONLY) .compactionOptions(getCompaction(appendHeavyReadLight)) .compactStorage() .compressionOptions(getCompression(tableMetadata.getExplicitCompressionBlockSizeKB())) .dcLocalReadRepairChance(0.1)