@Override public synchronized void storeUpperLimit(final long limit) { DebugLogger.logger.debug("[PUT] Storing upper limit of {}.", limit); clientPool.runWithRetry(new FunctionCheckedException<CassandraClient, Void, RuntimeException>() { @GuardedBy("CassandraTimestampBoundStore.this") @Override public Void apply(CassandraClient client) { cas(client, currentLimit, limit); return null; } }); }
private ClusterAvailabilityStatus getStatusByRunningOperationsOnEachHost() { int countUnreachableNodes = 0; for (InetSocketAddress host : clientPool.getCurrentPools().keySet()) { try { clientPool.runOnHost(host, CassandraVerifier.healthCheck); if (!partitionerIsValid(host)) { return ClusterAvailabilityStatus.TERMINAL; } } catch (Exception e) { countUnreachableNodes++; } } return getNodeAvailabilityStatus(countUnreachableNodes); }
TokenBackedBasicResultsPage<Entry<Cell, Value>, byte[]> page(final byte[] startCol) throws Exception { return clientPool.runWithRetryOnHost(host, new FunctionCheckedException< CassandraClient, TokenBackedBasicResultsPage<Entry<Cell, Value>, byte[]>,
private boolean partitionerIsValid(InetSocketAddress host) { try { clientPool.runOnHost(host, clientPool.getValidatePartitioner()); return true; } catch (Exception e) { return false; } }
public List<KeySlice> getRows(String kvsMethodName, KeyRange keyRange, SlicePredicate slicePredicate) { InetSocketAddress host = clientPool.getRandomHostForKey(keyRange.getStart_key()); return clientPool.runWithRetryOnHost( host, new FunctionCheckedException<CassandraClient, List<KeySlice>, RuntimeException>() { @Override public List<KeySlice> apply(CassandraClient client) { try { return queryRunner.run(client, tableRef, () -> client.get_range_slices(kvsMethodName, tableRef, slicePredicate, keyRange, consistency)); } catch (UnavailableException e) { throw new InsufficientConsistencyException("get_range_slices requires " + consistency + " Cassandra nodes to be up and available.", e); } catch (Exception e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } } @Override public String toString() { return "get_range_slices(" + tableRef + ")"; } }); } }
private void verifyNumberOfAttemptsOnHost(InetSocketAddress host, CassandraClientPool cassandraClientPool, int numAttempts) { Mockito.verify(cassandraClientPool.getCurrentPools().get(host), Mockito.times(numAttempts)) .runWithPooledResource( Mockito.<FunctionCheckedException<CassandraClient, Object, RuntimeException>>any()); }
private void runNoopOnHost(InetSocketAddress host, CassandraClientPool pool) { pool.runOnHost(host, noOp()); }
/** * Performs non-destructive cleanup when the KVS is no longer needed. */ @Override public void close() { clientPool.shutdown(); super.close(); }
private InetSocketAddress getHostForRow(byte[] row) { return clientPool.getRandomHostForKey(row); }
/** * This is a request from pbrown / FDEs; basically it's a pain to do DB surgery to get out * of failed patch upgrades, the majority of which requires schema mutations; they would find * it preferable to stop before starting the actual patch upgrade / setting APPLYING state. */ static void warnUserInInitializationIfClusterAlreadyInInconsistentState( CassandraClientPool clientPool, CassandraKeyValueServiceConfig config) { try { clientPool.run(client -> { waitForSchemaVersions(config, client, " during an initialization check"); return null; }); } catch (Exception e) { log.warn("Failed to retrieve current Cassandra cluster schema status.", e); } }
private boolean partitionerIsValid(InetSocketAddress host) { try { clientPool.runOnHost(host, clientPool.getValidatePartitioner()); return true; } catch (Exception e) { return false; } }
public List<KeySlice> getRows(String kvsMethodName, KeyRange keyRange, SlicePredicate slicePredicate) { InetSocketAddress host = clientPool.getRandomHostForKey(keyRange.getStart_key()); return clientPool.runWithRetryOnHost( host, new FunctionCheckedException<CassandraClient, List<KeySlice>, RuntimeException>() { @Override public List<KeySlice> apply(CassandraClient client) { try { return queryRunner.run(client, tableRef, () -> client.get_range_slices(kvsMethodName, tableRef, slicePredicate, keyRange, consistency)); } catch (UnavailableException e) { throw new InsufficientConsistencyException("get_range_slices requires " + consistency + " Cassandra nodes to be up and available.", e); } catch (Exception e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } } @Override public String toString() { return "get_range_slices(" + tableRef + ")"; } }); } }
void inPool(CassandraClientPool cassandraClientPool) { CassandraClientPoolingContainer container = mock(CassandraClientPoolingContainer.class); when(container.getHost()).thenReturn(address); try { OngoingStubbing<Object> stubbing = when(container.runWithPooledResource( Mockito.<FunctionCheckedException<CassandraClient, Object, Exception>>any())); for (Exception ex : exceptions) { stubbing = stubbing.thenThrow(ex); } if (returnsValue) { stubbing.thenReturn("Response"); } } catch (Exception ex) { throw new RuntimeException(ex); } cassandraClientPool.getCurrentPools().put(address, container); } }
private static CassandraKeyValueService createOrShutdownClientPool( MetricsManager metricsManager, CassandraKeyValueServiceConfig config, CassandraClientPool clientPool, Optional<LeaderConfig> leaderConfig, CassandraMutationTimestampProvider mutationTimestampProvider, Logger log, boolean initializeAsync) { try { return createAndInitialize(metricsManager, config, clientPool, leaderConfig, mutationTimestampProvider, log, initializeAsync); } catch (Exception e) { log.warn("Error occurred in creating Cassandra KVS. Now attempting to shut down client pool...", e); try { clientPool.shutdown(); log.info("Cassandra client pool shut down."); } catch (RuntimeException internalException) { log.info("An error occurred whilst shutting down the Cassandra client pool", internalException); throw internalException; } throw Throwables.rewrapAndThrowUncheckedException(e); } }
static <V> Map<InetSocketAddress, List<V>> partitionByHost(CassandraClientPool clientPool, Iterable<V> iterable, Function<V, byte[]> keyExtractor) { // Ensure that the same key goes to the same partition. This is important when writing multiple columns // to the same row, since this is a normally a single write in cassandra, whereas splitting the columns // into different requests results in multiple writes. ListMultimap<ByteBuffer, V> partitionedByKey = ArrayListMultimap.create(); for (V value : iterable) { partitionedByKey.put(ByteBuffer.wrap(keyExtractor.apply(value)), value); } ListMultimap<InetSocketAddress, V> valuesByHost = ArrayListMultimap.create(); for (ByteBuffer key : partitionedByKey.keySet()) { InetSocketAddress host = clientPool.getRandomHostForKey(key.array()); valuesByHost.putAll(host, partitionedByKey.get(key)); } return Multimaps.asMap(valuesByHost); } }
/** * This is a request from pbrown / FDEs; basically it's a pain to do DB surgery to get out * of failed patch upgrades, the majority of which requires schema mutations; they would find * it preferable to stop before starting the actual patch upgrade / setting APPLYING state. */ static void warnUserInInitializationIfClusterAlreadyInInconsistentState( CassandraClientPool clientPool, CassandraKeyValueServiceConfig config) { try { clientPool.run(client -> { waitForSchemaVersions(config, client, " during an initialization check"); return null; }); } catch (Exception e) { log.warn("Failed to retrieve current Cassandra cluster schema status.", e); } }
private boolean doesConfigReplicationFactorMatchWithCluster() { return clientPool.runWithRetry(client -> { try { CassandraVerifier.currentRfOnKeyspaceMatchesDesiredRf(client, config); return true; } catch (Exception e) { log.warn("The config and Cassandra cluster do not agree on the replication factor.", e); return false; } }); }
final Iterable<Map.Entry<Cell, Value>> values, boolean overrideTimestamps) throws Exception { clientPool.runWithRetryOnHost(host, new FunctionCheckedException<CassandraClient, Void, Exception>() { @Override
private ClusterAvailabilityStatus getStatusByRunningOperationsOnEachHost() { int countUnreachableNodes = 0; for (InetSocketAddress host : clientPool.getCurrentPools().keySet()) { try { clientPool.runOnHost(host, CassandraVerifier.healthCheck); if (!partitionerIsValid(host)) { return ClusterAvailabilityStatus.TERMINAL; } } catch (Exception e) { countUnreachableNodes++; } } return getNodeAvailabilityStatus(countUnreachableNodes); }
@Test public void resilientToRollingRestarts() { CassandraClientPool cassandraClientPool = clientPoolWithServersInCurrentPool(ImmutableSet.of(HOST_1, HOST_2)); AtomicReference<InetSocketAddress> downHost = new AtomicReference<>(HOST_1); cassandraClientPool.getCurrentPools().values().forEach(pool -> setConditionalTimeoutFailureForHost( pool, container -> container.getHost().equals(downHost.get()))); runNoopWithRetryOnHost(HOST_1, cassandraClientPool); assertThat(blacklist.contains(HOST_1), is(true)); downHost.set(HOST_2); runNoopWithRetryOnHost(HOST_2, cassandraClientPool); assertThat(blacklist.contains(HOST_1), is(false)); }