@Override public String toString() { return MoreObjects.toStringHelper(getClass()) .add("host", this.host) .add("keyspace", config.getKeyspaceOrThrow()) .add("usingSsl", config.usingSsl()) .add("sslConfiguration", config.sslConfiguration().isPresent() ? config.sslConfiguration().get() : "unspecified") .add("socketTimeoutMillis", config.socketTimeoutMillis()) .add("socketQueryTimeoutMillis", config.socketQueryTimeoutMillis()) .toString(); }
private boolean isQuorumAvailable(int countUnreachableNodes) { int replicationFactor = config.replicationFactor(); return countUnreachableNodes < (replicationFactor + 1) / 2; }
private synchronized void refreshPool() { blacklist.checkAndUpdate(cassandra.getPools()); if (config.autoRefreshNodes()) { setServersInPoolTo(cassandra.refreshTokenRangesAndGetServers()); } else { setServersInPoolTo(config.servers()); } cassandra.debugLogStateOfPool(); }
private static void checkMoreRacksThanRfOrFewerHostsThanRf(CassandraKeyValueServiceConfig config, Set<String> hosts, Multimap<String, String> dcRack) { if (dcRack.values().size() < config.replicationFactor() && hosts.size() > config.replicationFactor()) { logErrorOrThrow("The cassandra cluster only has one DC, " + "and is set up with less racks than the desired number of replicas, " + "and there are more hosts than the replication factor. " + "It is very likely that your rack configuration is incorrect and replicas " + "would not be placed correctly for the failure tolerance you want. " + "If you fully understand how NetworkTopology replica placement strategy will be placing " + "your replicas, feel free to set the 'ignoreNodeTopologyChecks' KVS config option.", config.ignoreNodeTopologyChecks()); } }
GenericObjectPoolConfig poolConfig = new GenericObjectPoolConfig(); poolConfig.setMinIdle(config.poolSize()); poolConfig.setMaxIdle(config.maxConnectionBurstSize()); poolConfig.setMaxTotal(config.maxConnectionBurstSize()); poolConfig.setMaxWaitMillis(config.socketTimeoutMillis()); TimeUnit.MILLISECONDS.convert(config.idleConnectionTimeoutSeconds(), TimeUnit.SECONDS)); int timeBetweenEvictionsSeconds = config.timeBetweenConnectionEvictionRunsSeconds(); int delta = ThreadLocalRandom.current().nextInt(Math.min(timeBetweenEvictionsSeconds / 2, 10)); poolConfig.setTimeBetweenEvictionRunsMillis( TimeUnit.MILLISECONDS.convert(timeBetweenEvictionsSeconds + delta, TimeUnit.SECONDS)); poolConfig.setNumTestsPerEvictionRun(-(int) (1.0 / config.proportionConnectionsToCheckPerEvictionRun())); poolConfig.setTestWhileIdle(true);
@Test public void hostsAreNotRemovedOrAddedWhenRefreshIsDisabled() { when(config.servers()).thenReturn(ImmutableSet.of(HOST_1, HOST_2)); when(config.autoRefreshNodes()).thenReturn(false); setCassandraServersTo(HOST_1); createClientPool(); assertThat(poolServers).containsExactlyInAnyOrder(HOST_1, HOST_2); setCassandraServersTo(HOST_1, HOST_2, HOST_3); deterministicExecutor.tick(config.poolRefreshIntervalSeconds(), TimeUnit.SECONDS); assertThat(poolServers).containsExactlyInAnyOrder(HOST_1, HOST_2); }
public static FunctionCheckedException<CassandraClient, List<TokenRange>, Exception> getDescribeRing( CassandraKeyValueServiceConfig config) { return client -> client.describe_ring(config.getKeyspaceOrThrow()); } }
private static Cassandra.Client getRawClient(InetSocketAddress addr, CassandraKeyValueServiceConfig config) throws TException { TSocket thriftSocket = new TSocket(addr.getHostString(), addr.getPort(), config.socketTimeoutMillis()); thriftSocket.open(); try { thriftSocket.getSocket().setKeepAlive(true); thriftSocket.getSocket().setSoTimeout(config.socketQueryTimeoutMillis()); } catch (SocketException e) { log.error("Couldn't set socket keep alive for host {}", if (config.usingSsl()) { boolean success = false; try { final SSLSocketFactory factory; if (config.sslConfiguration().isPresent()) { factory = SslSocketFactories.createSslSocketFactory(config.sslConfiguration().get()); } else { factory = sslSocketFactories.getUnchecked(addr); Cassandra.Client client = new Cassandra.Client(protocol); if (config.credentials().isPresent()) { try { login(client, config.credentials().get()); } catch (TException e) { client.getOutputProtocol().getTransport().close();
@Before public void setup() { config = mock(CassandraKeyValueServiceConfig.class); when(config.poolRefreshIntervalSeconds()).thenReturn(POOL_REFRESH_INTERVAL_SECONDS); when(config.timeBetweenConnectionEvictionRunsSeconds()).thenReturn(TIME_BETWEEN_EVICTION_RUNS_SECONDS); when(config.unresponsiveHostBackoffTimeSeconds()).thenReturn(UNRESPONSIVE_HOST_BACKOFF_SECONDS); blacklist = new Blacklist(config); doAnswer(invocation -> poolServers.add(getInvocationAddress(invocation))).when(cassandra).addPool(any()); doAnswer(invocation -> poolServers.remove(getInvocationAddress(invocation))).when(cassandra).removePool(any()); doAnswer(invocation -> poolServers.stream().collect( Collectors.toMap(x -> x, x -> mock(CassandraClientPoolingContainer.class)))).when(cassandra).getPools(); when(config.socketTimeoutMillis()).thenReturn(1); }
private void lowerConsistencyWhenSafe() { Set<String> dcs; Map<String, String> strategyOptions; try { dcs = clientPool.runWithRetry(client -> CassandraVerifier.sanityCheckDatacenters( client, config)); KsDef ksDef = clientPool.runWithRetry(client -> client.describe_keyspace(config.getKeyspaceOrThrow())); strategyOptions = Maps.newHashMap(ksDef.getStrategy_options()); if (dcs.size() == 1) { String dc = dcs.iterator().next(); if (strategyOptions.get(dc) != null) { int currentRf = Integer.parseInt(strategyOptions.get(dc)); if (currentRf == config.replicationFactor()) { if (currentRf == 2 && config.clusterMeetsNormalConsistencyGuarantees()) { log.info("Setting Read Consistency to ONE, as cluster has only one datacenter at RF2."); readConsistency = ConsistencyLevel.ONE; rangeLoader.setConsistencyLevel(readConsistency); } } } } } catch (TException e) { return; } }
static KsDef createKsDefForFresh(CassandraClient client, CassandraKeyValueServiceConfig config) throws TException { KsDef ksDef = new KsDef(config.getKeyspaceOrThrow(), CassandraConstants.NETWORK_STRATEGY, ImmutableList.of()); Set<String> dcs = sanityCheckDatacenters(client, config); ksDef.setStrategy_options(Maps.asMap(dcs, ignore -> String.valueOf(config.replicationFactor()))); ksDef.setDurable_writes(true); return ksDef; }
private static Cassandra.Client getRawClientWithKeyspace(InetSocketAddress addr, CassandraKeyValueServiceConfig config) throws Exception { Client ret = getRawClient(addr, config); try { ret.set_keyspace(config.getKeyspaceOrThrow()); log.debug("Created new client for {}/{}{}{}", SafeArg.of("address", CassandraLogHelper.host(addr)), UnsafeArg.of("keyspace", config.getKeyspaceOrThrow()), SafeArg.of("usingSsl", config.usingSsl() ? " over SSL" : ""), UnsafeArg.of("usernameConfig", config.credentials().isPresent() ? " as user " + config.credentials().get().username() : "")); return ret; } catch (Exception e) { ret.getOutputProtocol().getTransport().close(); throw e; } }
@Test public void preservesOtherPropertiesOnResolvedConfigWithNamespace() { CassandraKeyValueServiceConfig newConfig = CassandraAtlasDbFactory.preprocessKvsConfig(CONFIG_WITHOUT_KEYSPACE, Optional::empty, Optional.of(KEYSPACE)); assertThat(newConfig.servers()).isEqualTo(SERVERS); assertThat(newConfig.replicationFactor()).isEqualTo(1); }
private static void checkOneDatacenter(CassandraKeyValueServiceConfig config, Set<String> datacenters) { if (datacenters.size() > 1) { logErrorOrThrow(SIMPLE_PARTITIONING_ERROR_MSG, config.ignoreNodeTopologyChecks()); } }
@Override public CassandraClient create() throws Exception { try { return instrumentClient(getRawClientWithKeyspace(addr, config)); } catch (Exception e) { String message = String.format("Failed to construct client for %s/%s", addr, config.getKeyspaceOrThrow()); if (config.usingSsl()) { message += " over SSL"; } throw new ClientCreationFailedException(message, e); } }
public void cacheInitialCassandraHosts() { cassandraHosts = config.servers().stream() .sorted(Comparator.comparing(InetSocketAddress::toString)) .collect(Collectors.toList()); cassandraHosts.forEach(this::addPool); }
@Value.Check protected final void check() { Preconditions.checkState(!servers().isEmpty(), "'servers' must have at least one entry"); for (InetSocketAddress addr : servers()) { Preconditions.checkState(addr.getPort() > 0, "each server must specify a port ([host]:[port])"); } double evictionCheckProportion = proportionConnectionsToCheckPerEvictionRun(); Preconditions.checkArgument(evictionCheckProportion > 0.01 && evictionCheckProportion <= 1, "'proportionConnectionsToCheckPerEvictionRun' must be between 0.01 and 1"); } }
@VisibleForTesting InetSocketAddress getAddressForHost(String host) throws UnknownHostException { if (config.addressTranslation().containsKey(host)) { return config.addressTranslation().get(host); } InetAddress resolvedHost = InetAddress.getByName(host); Set<InetSocketAddress> allKnownHosts = Sets.union(currentPools.keySet(), config.servers()); for (InetSocketAddress address : allKnownHosts) { if (Objects.equals(address.getAddress(), resolvedHost)) { return address; } } Set<Integer> allKnownPorts = allKnownHosts.stream() .map(InetSocketAddress::getPort) .collect(Collectors.toSet()); if (allKnownPorts.size() == 1) { // if everyone is on one port, try and use that return new InetSocketAddress(resolvedHost, Iterables.getOnlyElement(allKnownPorts)); } else { throw new UnknownHostException("Couldn't find the provided host in server list or current servers"); } }
@Override public Integer sweepReadThreads() { return chooseConfig(CassandraKeyValueServiceRuntimeConfig::sweepReadThreads, config.sweepReadThreads()); }