@Override public synchronized void storeUpperLimit(final long limit) { DebugLogger.logger.debug("[PUT] Storing upper limit of {}.", limit); clientPool.runWithRetry(new FunctionCheckedException<CassandraClient, Void, RuntimeException>() { @GuardedBy("CassandraTimestampBoundStore.this") @Override public Void apply(CassandraClient client) { cas(client, currentLimit, limit); return null; } }); }
private boolean doesConfigReplicationFactorMatchWithCluster() { return clientPool.runWithRetry(client -> { try { CassandraVerifier.currentRfOnKeyspaceMatchesDesiredRf(client, config); return true; } catch (Exception e) { log.warn("The config and Cassandra cluster do not agree on the replication factor.", e); return false; } }); }
private void runTruncateInternal(Collection<TableReference> tablesToTruncate) throws TException { clientPool.runWithRetry(client -> runTruncateOnClient(tablesToTruncate, client)); }
private Set<String> getExistingLowerCased(String keyspace) throws TException { return clientPool.runWithRetry((client) -> getExistingLowerCased(client, keyspace)); }
void createTables(Map<TableReference, byte[]> tableRefToMetadata) { try { clientPool.runWithRetry(client -> { for (Map.Entry<TableReference, byte[]> entry : tableRefToMetadata.entrySet()) { CassandraKeyValueServices.runWithWaitingForSchemas( () -> createTable(entry.getKey(), entry.getValue(), client), config, client, "adding the column family for table " + entry.getKey() + " in a call to create tables"); } return null; }); } catch (Exception e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } }
Set<String> getExisting() { String keyspace = config.getKeyspaceOrThrow(); try { return clientPool.runWithRetry(new FunctionCheckedException<CassandraClient, Set<String>, Exception>() { @Override public Set<String> apply(CassandraClient client) throws Exception { return getExisting(client, keyspace); } @Override public String toString() { return "describe_keyspace(" + keyspace + ")"; } }); } catch (Exception e) { throw Throwables.throwUncheckedException(e); } }
private void lowerConsistencyWhenSafe() { Set<String> dcs; Map<String, String> strategyOptions; try { dcs = clientPool.runWithRetry(client -> CassandraVerifier.sanityCheckDatacenters( client, config)); KsDef ksDef = clientPool.runWithRetry(client -> client.describe_keyspace(config.getKeyspaceOrThrow())); strategyOptions = Maps.newHashMap(ksDef.getStrategy_options()); if (dcs.size() == 1) { String dc = dcs.iterator().next(); if (strategyOptions.get(dc) != null) { int currentRf = Integer.parseInt(strategyOptions.get(dc)); if (currentRf == config.replicationFactor()) { if (currentRf == 2 && config.clusterMeetsNormalConsistencyGuarantees()) { log.info("Setting Read Consistency to ONE, as cluster has only one datacenter at RF2."); readConsistency = ConsistencyLevel.ONE; rangeLoader.setConsistencyLevel(readConsistency); } } } } } catch (TException e) { return; } }
private String getUniqueReachableSchemaVersionOrThrow() throws TException { Map<String, List<String>> schemaVersions = getTestKvs().getClientPool().runWithRetry( CassandraClient::describe_schema_versions); return Iterables.getOnlyElement( schemaVersions.keySet().stream() .filter(schema -> !schema.equals(CassandraKeyValueServices.VERSION_UNREACHABLE)) .collect(Collectors.toList())); } }
throws KeyAlreadyExistsException { try { Optional<KeyAlreadyExistsException> failure = clientPool.runWithRetry(client -> { for (Entry<Cell, byte[]> e : values.entrySet()) { CheckAndSetRequest request = CheckAndSetRequest.newCell(tableRef, e.getKey(), e.getValue());
void dropTables(final Set<TableReference> tablesToDrop) { try { clientPool.runWithRetry(client -> { KsDef ks = client.describe_keyspace(config.getKeyspaceOrThrow()); Set<TableReference> existingTables = Sets.newHashSet(); existingTables.addAll(ks.getCf_defs().stream() .map(CassandraKeyValueServices::tableReferenceFromCfDef) .collect(Collectors.toList())); for (TableReference table : tablesToDrop) { CassandraVerifier.sanityCheckTableName(table); if (existingTables.contains(table)) { CassandraKeyValueServices.runWithWaitingForSchemas( () -> truncateThenDrop(table, client), config, client, "dropping the column family for table " + table + " in a call to drop tables"); cassandraTableMetadata.deleteAllMetadataRowsForTable(table); } else { log.warn("Ignored call to drop a table ({}) that did not exist.", LoggingArgs.tableRef(table)); } } return null; }); } catch (Exception e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } }
private void putMetadataAndMaybeAlterTables( boolean possiblyNeedToPerformSettingsChanges, Map<Cell, byte[]> newMetadata, Collection<CfDef> updatedCfs) { try { clientPool.runWithRetry(client -> { if (possiblyNeedToPerformSettingsChanges) { for (CfDef cf : updatedCfs) { client.system_update_column_family(cf); } CassandraKeyValueServices.waitForSchemaVersions(config, client, schemaChangeDescriptionForPutMetadataForTables(updatedCfs)); } // Done with actual schema mutation, push the metadata put(AtlasDbConstants.DEFAULT_METADATA_TABLE, newMetadata, System.currentTimeMillis()); return null; }); } catch (Exception e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } }
final Collection<CfDef> updatedCfs = Lists.newArrayListWithExpectedSize(metadataForTables.size()); List<CfDef> knownCfs = clientPool.runWithRetry(client -> client.describe_keyspace(config.getKeyspaceOrThrow()).getCf_defs());
public void checkAndSet(final CheckAndSetRequest request) throws CheckAndSetException { try { CheckAndSetResult<ByteString> casResult = clientPool.runWithRetry( client -> checkAndSetRunner.executeCheckAndSet(client, request)); if (!casResult.successful()) {
private void assertThatGcGraceSecondsIs(CassandraKeyValueService kvs, int gcGraceSeconds) throws TException { List<CfDef> knownCfs = kvs.getClientPool().runWithRetry(client -> client.describe_keyspace(CASSANDRA.getConfig().getKeyspaceOrThrow()).getCf_defs()); CfDef clusterSideCf = Iterables.getOnlyElement(knownCfs.stream() .filter(cf -> cf.getName().equals(getInternalTestTableName())) .collect(Collectors.toList())); assertThat(clusterSideCf.gc_grace_seconds, equalTo(gcGraceSeconds)); }
clientPool().runWithRetry(client -> { BoundData boundData = getCurrentBoundData(client); byte[] currentBound = boundData.bound();
private static void updateExistingKeyspace(CassandraClientPool clientPool, CassandraKeyValueServiceConfig config) throws TException { clientPool.runWithRetry((FunctionCheckedException<CassandraClient, Void, TException>) client -> { KsDef originalKsDef = client.describe_keyspace(config.getKeyspaceOrThrow()); // there was an existing keyspace // check and make sure it's definition is up to date with our config KsDef modifiedKsDef = originalKsDef.deepCopy(); checkAndSetReplicationFactor( client, modifiedKsDef, config); if (!modifiedKsDef.equals(originalKsDef)) { // Can't call system_update_keyspace to update replication factor if CfDefs are set modifiedKsDef.setCf_defs(ImmutableList.of()); client.system_update_keyspace(modifiedKsDef); CassandraKeyValueServices.waitForSchemaVersions(config, client, "after updating the existing keyspace"); } return null; }); }
return clientPool().runWithRetry(client -> { BoundData boundData = getCurrentBoundData(client); byte[] currentBound = boundData.bound();
@Override public void deleteRange(final TableReference tableRef, final RangeRequest range) { if (range.equals(RangeRequest.all())) { try { cassandraTableTruncator.truncateTables(ImmutableSet.of(tableRef)); } catch (AtlasDbDependencyException e) { log.info("Tried to make a deleteRange({}, RangeRequest.all())" + " into a more garbage-cleanup friendly truncate(), but this failed.", LoggingArgs.tableRef(tableRef), e); super.deleteRange(tableRef, range); } } else if (isForSingleRow(range.getStartInclusive(), range.getEndExclusive())) { try { long timestamp = mutationTimestampProvider.getRemoveTimestamp(); byte[] row = range.getStartInclusive(); clientPool.runWithRetry(client -> { client.remove("deleteRange", tableRef, row, timestamp, DELETE_CONSISTENCY); return null; }); } catch (UnavailableException e) { throw new InsufficientConsistencyException( "Deleting requires all Cassandra nodes to be up and available.", e); } catch (TException e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } } else { super.deleteRange(tableRef, range); } }
@Test public void testCfEqualityChecker() throws TException { CassandraKeyValueServiceImpl kvs; if (keyValueService instanceof CassandraKeyValueService) { kvs = (CassandraKeyValueServiceImpl) keyValueService; } else if (keyValueService instanceof TableSplittingKeyValueService) { // scylla tests KeyValueService delegate = ((TableSplittingKeyValueService) keyValueService).getDelegate(NEVER_SEEN); assertTrue("The nesting of Key Value Services has apparently changed", delegate instanceof CassandraKeyValueService); kvs = (CassandraKeyValueServiceImpl) delegate; } else { throw new IllegalArgumentException("Can't run this cassandra-specific test against a non-cassandra KVS"); } kvs.createTable(NEVER_SEEN, getMetadata()); List<CfDef> knownCfs = kvs.getClientPool().runWithRetry(client -> client.describe_keyspace(CASSANDRA.getConfig().getKeyspaceOrThrow()).getCf_defs()); CfDef clusterSideCf = Iterables.getOnlyElement(knownCfs.stream() .filter(cf -> cf.getName().equals(getInternalTestTableName())) .collect(Collectors.toList())); assertTrue("After serialization and deserialization to database, Cf metadata did not match.", ColumnFamilyDefinitions.isMatchingCf(kvs.getCfForTable(NEVER_SEEN, getMetadata(), FOUR_DAYS_IN_SECONDS), clusterSideCf)); }
private void putDummyValueAtCellAndTimestamp( TableReference tableReference, Cell cell, long atlasTimestamp, long cassandraTimestamp) throws TException { CassandraKeyValueServiceImpl ckvs = (CassandraKeyValueServiceImpl) keyValueService; ckvs.getClientPool().runWithRetry(input -> { CqlQuery cqlQuery = CqlQuery.builder() .safeQueryFormat("INSERT INTO \"%s\".\"%s\" (key, column1, column2, value)" + " VALUES (%s, %s, %s, %s) USING TIMESTAMP %s;") .addArgs( SafeArg.of("keyspace", CASSANDRA.getConfig().getKeyspaceOrThrow()), LoggingArgs.internalTableName(tableReference), UnsafeArg.of("row", convertBytesToHexString(cell.getRowName())), UnsafeArg.of("column", convertBytesToHexString(cell.getColumnName())), SafeArg.of("atlasTimestamp", ~atlasTimestamp), UnsafeArg.of("value", convertBytesToHexString(PtBytes.toBytes("testtesttest"))), SafeArg.of("cassandraTimestamp", cassandraTimestamp)) .build(); return input.execute_cql3_query( cqlQuery, Compression.NONE, ConsistencyLevel.QUORUM); }); }