private Set<String> getTableNames(CassandraClient client, String keyspace, Function<CfDef, String> nameGetter) throws TException { try { CassandraKeyValueServices .waitForSchemaVersions(config, client, "before making a call to get all table names."); } catch (IllegalStateException e) { throw new InsufficientConsistencyException("Could not reach a quorum of nodes agreeing on schema versions " + "before making a call to get all table names.", e); } KsDef ks = client.describe_keyspace(keyspace); return ks.getCf_defs().stream() .map(nameGetter) .collect(Collectors.toSet()); } }
static void currentRfOnKeyspaceMatchesDesiredRf(CassandraClient client, CassandraKeyValueServiceConfig config) throws TException { KsDef ks = client.describe_keyspace(config.getKeyspaceOrThrow()); Set<String> dcs = sanityCheckDatacenters(client, config); sanityCheckReplicationFactor(ks, config, dcs); }
private static boolean keyspaceAlreadyExists(InetSocketAddress host, CassandraKeyValueServiceConfig config) throws TException { try { CassandraClient client = CassandraClientFactory.getClientInternal(host, config); client.describe_keyspace(config.getKeyspaceOrThrow()); CassandraKeyValueServices.waitForSchemaVersions(config, client, "while checking if schemas diverged on startup"); return true; } catch (NotFoundException e) { return false; } }
private void lowerConsistencyWhenSafe() { Set<String> dcs; Map<String, String> strategyOptions; try { dcs = clientPool.runWithRetry(client -> CassandraVerifier.sanityCheckDatacenters( client, config)); KsDef ksDef = clientPool.runWithRetry(client -> client.describe_keyspace(config.getKeyspaceOrThrow())); strategyOptions = Maps.newHashMap(ksDef.getStrategy_options()); if (dcs.size() == 1) { String dc = dcs.iterator().next(); if (strategyOptions.get(dc) != null) { int currentRf = Integer.parseInt(strategyOptions.get(dc)); if (currentRf == config.replicationFactor()) { if (currentRf == 2 && config.clusterMeetsNormalConsistencyGuarantees()) { log.info("Setting Read Consistency to ONE, as cluster has only one datacenter at RF2."); readConsistency = ConsistencyLevel.ONE; rangeLoader.setConsistencyLevel(readConsistency); } } } } } catch (TException e) { return; } }
void dropTables(final Set<TableReference> tablesToDrop) { try { clientPool.runWithRetry(client -> { KsDef ks = client.describe_keyspace(config.getKeyspaceOrThrow()); Set<TableReference> existingTables = Sets.newHashSet(); existingTables.addAll(ks.getCf_defs().stream() .map(CassandraKeyValueServices::tableReferenceFromCfDef) .collect(Collectors.toList())); for (TableReference table : tablesToDrop) { CassandraVerifier.sanityCheckTableName(table); if (existingTables.contains(table)) { CassandraKeyValueServices.runWithWaitingForSchemas( () -> truncateThenDrop(table, client), config, client, "dropping the column family for table " + table + " in a call to drop tables"); cassandraTableMetadata.deleteAllMetadataRowsForTable(table); } else { log.warn("Ignored call to drop a table ({}) that did not exist.", LoggingArgs.tableRef(table)); } } return null; }); } catch (Exception e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } }
client = CassandraClientFactory.getClientInternal(host, config); try { client.describe_keyspace(config.getKeyspaceOrThrow()); } catch (NotFoundException e) { return; // don't care to check for ring consistency when we're not even fully initialized
client.describe_keyspace(config.getKeyspaceOrThrow()).getCf_defs());
private void assertThatGcGraceSecondsIs(CassandraKeyValueService kvs, int gcGraceSeconds) throws TException { List<CfDef> knownCfs = kvs.getClientPool().runWithRetry(client -> client.describe_keyspace(CASSANDRA.getConfig().getKeyspaceOrThrow()).getCf_defs()); CfDef clusterSideCf = Iterables.getOnlyElement(knownCfs.stream() .filter(cf -> cf.getName().equals(getInternalTestTableName())) .collect(Collectors.toList())); assertThat(clusterSideCf.gc_grace_seconds, equalTo(gcGraceSeconds)); }
private static void updateExistingKeyspace(CassandraClientPool clientPool, CassandraKeyValueServiceConfig config) throws TException { clientPool.runWithRetry((FunctionCheckedException<CassandraClient, Void, TException>) client -> { KsDef originalKsDef = client.describe_keyspace(config.getKeyspaceOrThrow()); // there was an existing keyspace // check and make sure it's definition is up to date with our config KsDef modifiedKsDef = originalKsDef.deepCopy(); checkAndSetReplicationFactor( client, modifiedKsDef, config); if (!modifiedKsDef.equals(originalKsDef)) { // Can't call system_update_keyspace to update replication factor if CfDefs are set modifiedKsDef.setCf_defs(ImmutableList.of()); client.system_update_keyspace(modifiedKsDef); CassandraKeyValueServices.waitForSchemaVersions(config, client, "after updating the existing keyspace"); } return null; }); }
private void changeReplicationFactor(int replicationFactor) throws TException { clientPool.run((FunctionCheckedException<CassandraClient, Void, TException>) client -> { KsDef originalKsDef = client.describe_keyspace(CASSANDRA.getConfig().getKeyspaceOrThrow()); KsDef modifiedKsDef = originalKsDef.deepCopy(); modifiedKsDef.setStrategy_class(CassandraConstants.NETWORK_STRATEGY); modifiedKsDef.setStrategy_options(ImmutableMap.of("dc1", Integer.toString(replicationFactor))); modifiedKsDef.setCf_defs(ImmutableList.of()); client.system_update_keyspace(modifiedKsDef); return null; }); }
@Test public void testCfEqualityChecker() throws TException { CassandraKeyValueServiceImpl kvs; if (keyValueService instanceof CassandraKeyValueService) { kvs = (CassandraKeyValueServiceImpl) keyValueService; } else if (keyValueService instanceof TableSplittingKeyValueService) { // scylla tests KeyValueService delegate = ((TableSplittingKeyValueService) keyValueService).getDelegate(NEVER_SEEN); assertTrue("The nesting of Key Value Services has apparently changed", delegate instanceof CassandraKeyValueService); kvs = (CassandraKeyValueServiceImpl) delegate; } else { throw new IllegalArgumentException("Can't run this cassandra-specific test against a non-cassandra KVS"); } kvs.createTable(NEVER_SEEN, getMetadata()); List<CfDef> knownCfs = kvs.getClientPool().runWithRetry(client -> client.describe_keyspace(CASSANDRA.getConfig().getKeyspaceOrThrow()).getCf_defs()); CfDef clusterSideCf = Iterables.getOnlyElement(knownCfs.stream() .filter(cf -> cf.getName().equals(getInternalTestTableName())) .collect(Collectors.toList())); assertTrue("After serialization and deserialization to database, Cf metadata did not match.", ColumnFamilyDefinitions.isMatchingCf(kvs.getCfForTable(NEVER_SEEN, getMetadata(), FOUR_DAYS_IN_SECONDS), clusterSideCf)); }
private Set<String> getTableNames(CassandraClient client, String keyspace, Function<CfDef, String> nameGetter) throws TException { try { CassandraKeyValueServices .waitForSchemaVersions(config, client, "before making a call to get all table names."); } catch (IllegalStateException e) { throw new InsufficientConsistencyException("Could not reach a quorum of nodes agreeing on schema versions " + "before making a call to get all table names.", e); } KsDef ks = client.describe_keyspace(keyspace); return ks.getCf_defs().stream() .map(nameGetter) .collect(Collectors.toSet()); } }
private static boolean keyspaceAlreadyExists(InetSocketAddress host, CassandraKeyValueServiceConfig config) throws TException { try { CassandraClient client = CassandraClientFactory.getClientInternal(host, config); client.describe_keyspace(config.getKeyspaceOrThrow()); CassandraKeyValueServices.waitForSchemaVersions(config, client, "while checking if schemas diverged on startup"); return true; } catch (NotFoundException e) { return false; } }
static void currentRfOnKeyspaceMatchesDesiredRf(CassandraClient client, CassandraKeyValueServiceConfig config) throws TException { KsDef ks = client.describe_keyspace(config.getKeyspaceOrThrow()); Set<String> dcs = sanityCheckDatacenters(client, config); sanityCheckReplicationFactor(ks, config, dcs); }
private void lowerConsistencyWhenSafe() { Set<String> dcs; Map<String, String> strategyOptions; try { dcs = clientPool.runWithRetry(client -> CassandraVerifier.sanityCheckDatacenters( client, config)); KsDef ksDef = clientPool.runWithRetry(client -> client.describe_keyspace(config.getKeyspaceOrThrow())); strategyOptions = Maps.newHashMap(ksDef.getStrategy_options()); if (dcs.size() == 1) { String dc = dcs.iterator().next(); if (strategyOptions.get(dc) != null) { int currentRf = Integer.parseInt(strategyOptions.get(dc)); if (currentRf == config.replicationFactor()) { if (currentRf == 2 && config.clusterMeetsNormalConsistencyGuarantees()) { log.info("Setting Read Consistency to ONE, as cluster has only one datacenter at RF2."); readConsistency = ConsistencyLevel.ONE; rangeLoader.setConsistencyLevel(readConsistency); } } } } } catch (TException e) { return; } }
void dropTables(final Set<TableReference> tablesToDrop) { try { clientPool.runWithRetry(client -> { KsDef ks = client.describe_keyspace(config.getKeyspaceOrThrow()); Set<TableReference> existingTables = Sets.newHashSet(); existingTables.addAll(ks.getCf_defs().stream() .map(CassandraKeyValueServices::tableReferenceFromCfDef) .collect(Collectors.toList())); for (TableReference table : tablesToDrop) { CassandraVerifier.sanityCheckTableName(table); if (existingTables.contains(table)) { CassandraKeyValueServices.runWithWaitingForSchemas( () -> truncateThenDrop(table, client), config, client, "dropping the column family for table " + table + " in a call to drop tables"); cassandraTableMetadata.deleteAllMetadataRowsForTable(table); } else { log.warn("Ignored call to drop a table ({}) that did not exist.", LoggingArgs.tableRef(table)); } } return null; }); } catch (Exception e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } }
client = CassandraClientFactory.getClientInternal(host, config); try { client.describe_keyspace(config.getKeyspaceOrThrow()); } catch (NotFoundException e) { return; // don't care to check for ring consistency when we're not even fully initialized
client.describe_keyspace(config.getKeyspaceOrThrow()).getCf_defs());
private static void updateExistingKeyspace(CassandraClientPool clientPool, CassandraKeyValueServiceConfig config) throws TException { clientPool.runWithRetry((FunctionCheckedException<CassandraClient, Void, TException>) client -> { KsDef originalKsDef = client.describe_keyspace(config.getKeyspaceOrThrow()); // there was an existing keyspace // check and make sure it's definition is up to date with our config KsDef modifiedKsDef = originalKsDef.deepCopy(); checkAndSetReplicationFactor( client, modifiedKsDef, config); if (!modifiedKsDef.equals(originalKsDef)) { // Can't call system_update_keyspace to update replication factor if CfDefs are set modifiedKsDef.setCf_defs(ImmutableList.of()); client.system_update_keyspace(modifiedKsDef); CassandraKeyValueServices.waitForSchemaVersions(config, client, "after updating the existing keyspace"); } return null; }); }