.withClusterName("TestCluster") .addContactPointsWithPorts(ImmutableList.of( new InetSocketAddress(HOST, PORT)))
.withClusterName(cassandraConfig.getClusterName()) .addContactPoints(cassandraConfig.getHosts().split(",")) .withMaxSchemaAgreementWaitSeconds(45)
/** * Create new cassandra cluster using builder. */ @PostConstruct public void init() { Cluster.Builder builder = Cluster.builder() .addContactPointsWithPorts(parseNodeList(nodeList)) .withClusterName(clusterName) .withSocketOptions(clientOptions.getSocketOptions()); if (!useJmx) { builder.withoutJMXReporting(); } if (disableMetrics) { builder.withoutMetrics(); } if (useCredentials) { builder.withCredentials(username, password); } if (useSsl) { builder.withSSL(); } builder.withQueryOptions(clientOptions.getQueryOptions()); builder.withCompression(parseCompression(compression)); cluster = builder.build(); }
/** * Initialize the client */ @Override public void init(DataGenerator dataGenerator) { this.dataGenerator = dataGenerator; this.ClusterName = configs.getCluster(); logger.info("Cassandra Cluster: " + ClusterName); this.ClusterContactPoint = configs.getHost(); this.KeyspaceName = configs.getKeyspace(); this.TableName = configs.getCfname(); this.WriteConsistencyLevel = ConsistencyLevel.valueOf(configs.getWriteConsistencyLevel()); this.ReadConsistencyLevel = ConsistencyLevel.valueOf(configs.getReadConsistencyLevel()); cluster = Cluster.builder() .withClusterName(ClusterName) .addContactPoint(ClusterContactPoint) .build(); session = cluster.connect(); upsertKeyspace(this.session); upsertCF(this.session); writePstmt = session.prepare("INSERT INTO "+ TableName +" (\"_id\", name) VALUES (?, ?)"); readPstmt = session.prepare("SELECT * From "+ TableName +" Where \"_id\" = ?"); logger.info("Initialized ElassandraCassJavaDriverPlugin"); }
/** * Initialize the client */ @Override public void init(DataGenerator dataGenerator) { this.dataGenerator = dataGenerator; this.ClusterName = configs.getCluster(); logger.info("Cassandra Cluster: " + ClusterName); this.ClusterContactPoint = configs.getHost(); this.KeyspaceName = configs.getKeyspace(); this.TableName = configs.getCfname(); this.WriteConsistencyLevel = ConsistencyLevel.valueOf(configs.getWriteConsistencyLevel()); this.ReadConsistencyLevel = ConsistencyLevel.valueOf(configs.getReadConsistencyLevel()); cluster = Cluster.builder() .withClusterName(ClusterName) .addContactPoint(ClusterContactPoint) .build(); session = cluster.connect(); upsertKeyspace(this.session); upsertCF(this.session); writePstmt = session.prepare("INSERT INTO "+ TableName +" (\"_id\", name) VALUES (?, ?)"); readPstmt = session.prepare("SELECT * From "+ TableName +" Where \"_id\" = ?"); logger.info("Initialized ElassandraCassJavaDriverPlugin"); }
@Override public DataContext create(DataContextProperties properties, ResourceFactoryRegistry resourceFactoryRegistry) throws UnsupportedDataContextPropertiesException, ConnectionException { final Map<String, Object> map = properties.toMap(); final Builder clusterBuilder = Cluster.builder(); final String hostname = properties.getHostname(); if (!Strings.isNullOrEmpty(hostname)) { clusterBuilder.addContactPoints(hostname.split(",")); } if (properties.getPort() != null) { clusterBuilder.withPort(properties.getPort()); } if (map.containsKey("cluster-name")) { clusterBuilder.withClusterName((String) map.get("cluster-name")); } if (properties.getUsername() != null && properties.getPassword() != null) { clusterBuilder.withCredentials(properties.getUsername(), properties.getPassword()); } final Cluster cluster = clusterBuilder.build(); final String keySpace = getString(map.get("keyspace"), properties.getDatabaseName()); return new CassandraDataContext(cluster, keySpace, properties.getTableDefs()); }
private void ensureClusterCreation() throws InterruptedException { // Retry the connection until we either connect or timeout Cluster.Builder cassandraClusterBuilder = Cluster.builder(); Cluster cluster = cassandraClusterBuilder.addContactPoint(CLUSTER_HOST).withClusterName(CLUSTER_NAME_TEST).withPort(CLUSTER_PORT) .build(); int retryCount = 0; while (retryCount < MAX_RETRIES) { try { Session cassSession = cluster.connect(); if (cassSession.getState().getConnectedHosts().size() > 0) { cassSession.close(); return; } } catch (Exception e) { Thread.sleep(1000); } retryCount++; } throw new SkipException("Unable to connect to embedded Cassandra after " + MAX_RETRIES + " seconds."); } }
@Override public DataContext create(DataContextProperties properties, ResourceFactoryRegistry resourceFactoryRegistry) throws UnsupportedDataContextPropertiesException, ConnectionException { final Map<String, Object> map = properties.toMap(); final Builder clusterBuilder = Cluster.builder(); final String hostname = properties.getHostname(); if (!Strings.isNullOrEmpty(hostname)) { clusterBuilder.addContactPoints(hostname.split(",")); } if (properties.getPort() != null) { clusterBuilder.withPort(properties.getPort()); } if (map.containsKey("cluster-name")) { clusterBuilder.withClusterName((String) map.get("cluster-name")); } if (properties.getUsername() != null && properties.getPassword() != null) { clusterBuilder.withCredentials(properties.getUsername(), properties.getPassword()); } final Cluster cluster = clusterBuilder.build(); final String keySpace = getString(map.get("keyspace"), properties.getDatabaseName()); return new CassandraDataContext(cluster, keySpace, properties.getTableDefs()); }
@Override public void startComponent() { if (cluster == null) { // Configure and build up the Cassandra cluster. cluster = Cluster.builder() .withClusterName(clusterName) .withPort(port) .withRetryPolicy(DefaultRetryPolicy.INSTANCE) // TokenAware requires query has routing info (e.g. BoundStatement with all PK value bound). .withLoadBalancingPolicy(new TokenAwarePolicy(DCAwareRoundRobinPolicy.builder().build())) .addContactPoints(contactPoints.toArray(new String[contactPoints.size()])) .build(); // Register any codecs. cluster.getConfiguration().getCodecRegistry() .register(new CassandraEnumCodec<>(AccessMode.class, AccessMode.getValueMap())) .register(new CassandraEnumCodec<>(Direction.class, Direction.getValueMap())) .register(new CassandraEnumCodec<>(SourceEntity.Type.class, SourceEntity.Type.getValueMap())); // Create a session. manager = new MappingManager(cluster.connect()); } }
private void ensureClusterCreation() throws InterruptedException { // Retry the connection until we either connect or timeout Cluster.Builder cassandraClusterBuilder = Cluster.builder(); Cluster cluster = cassandraClusterBuilder.addContactPoint(CLUSTER_HOST).withClusterName(CLUSTER_NAME_TEST).withPort(CLUSTER_PORT) .build(); int retryCount = 0; while (retryCount < MAX_RETRIES) { try { Session cassSession = cluster.connect(); if (cassSession.getState().getConnectedHosts().size() > 0) { cassSession.close(); return; } } catch (Exception e) { Thread.sleep(1000); } retryCount++; } throw new SkipException("Unable to connect to embedded Cassandra after " + MAX_RETRIES + " seconds."); } }
protected Cluster.Builder createClusterBuilder() throws Exception { CassandraLoadBalancingPolicies cassLoadBalancingPolicies = new CassandraLoadBalancingPolicies(); Cluster.Builder clusterBuilder = Cluster.builder(); for (String host : hosts.split(",")) { clusterBuilder = clusterBuilder.addContactPoint(host); } if (port != null) { clusterBuilder = clusterBuilder.withPort(port); } if (clusterName != null) { clusterBuilder = clusterBuilder.withClusterName(clusterName); } if (username != null && !username.isEmpty() && password != null) { clusterBuilder.withCredentials(username, password); } if (loadBalancingPolicy != null && !loadBalancingPolicy.isEmpty()) { clusterBuilder.withLoadBalancingPolicy(cassLoadBalancingPolicies.getLoadBalancingPolicy(loadBalancingPolicy)); } return clusterBuilder; }
@BeforeClass public static void startCassandra() throws Exception { //Start the Embedded Cassandra Service cassandra.start(); final SocketOptions socketOptions = new SocketOptions(); // Setting this to 0 disables read timeouts. socketOptions.setReadTimeoutMillis(0); // This defaults to 5 s. Increase to a minute. socketOptions.setConnectTimeoutMillis(60 * 1000); cluster = Cluster.builder() .addContactPoint(CASSANDRA_HOST) .withClusterName("beam") .withSocketOptions(socketOptions) .build(); session = cluster.connect(); createCassandraData(); }
@BeforeClass public static void startCassandra() throws Exception { //Start the Embedded Cassandra Service cassandra.start(); final SocketOptions socketOptions = new SocketOptions(); // Setting this to 0 disables read timeouts. socketOptions.setReadTimeoutMillis(0); // This defaults to 5 s. Increase to a minute. socketOptions.setConnectTimeoutMillis(60 * 1000); cluster = Cluster.builder() .addContactPoint(CASSANDRA_HOST) .withClusterName("beam") .withSocketOptions(socketOptions) .build(); session = cluster.connect(); createCassandraData(); }
@Override public Cluster registerCluster(String clName, String contactPoint, int connections, int port, String username, String password) { PoolingOptions poolingOpts = new PoolingOptions() .setConnectionsPerHost(HostDistance.LOCAL, connections, connections) .setMaxRequestsPerConnection(HostDistance.LOCAL, 32768); Cluster.Builder clusterBuilder = Cluster.builder() .withClusterName(clName) .addContactPoint(contactPoint) .withPoolingOptions(poolingOpts) .withPort(port) .withLoadBalancingPolicy( new TokenAwarePolicy( new RoundRobinPolicy() ) ); if ((username != null) && (password != null)) { clusterBuilder = clusterBuilder.withCredentials(username, password); } cluster = clusterBuilder.build(); return cluster; }
@Override public Cluster registerCluster(String clName, String contactPoint, int connections, int port, String username, String password) { PoolingOptions poolingOpts = new PoolingOptions() .setConnectionsPerHost(HostDistance.LOCAL, connections, connections) .setMaxRequestsPerConnection(HostDistance.LOCAL, 32768); Cluster.Builder clusterBuilder = Cluster.builder() .withClusterName(clName) .addContactPoint(contactPoint) .withPoolingOptions(poolingOpts) .withPort(port) .withLoadBalancingPolicy( new TokenAwarePolicy( new RoundRobinPolicy() ) ); if ((username != null) && (password != null)) { clusterBuilder = clusterBuilder.withCredentials(username, password); } cluster = clusterBuilder.build(); return cluster; }
@Override public Cluster build( CassandraEntityStoreConfiguration config ) { this.config = config; String clusterName = clusterName( config ); Collection<InetSocketAddress> connectionPoints = cassandraConnectionPoints(); Cluster.Builder builder = Cluster.builder() .withClusterName( clusterName ) .addContactPointsWithPorts( connectionPoints ) .withCredentials( username(), password() ); builder = customConfiguration( builder ); return builder.build(); }
@VisibleForTesting void build() { builder = Cluster.builder() .withClusterName("Scalar Cluster") .addContactPoints(config.getContactPoints().toArray(new String[0])) .withPort( config.getContactPort() == 0 ? DEFAULT_CASSANDRA_PORT : config.getContactPort()) // .withCompression ? // .withPoolingOptions ? .withRetryPolicy(DefaultRetryPolicy.INSTANCE) .withLoadBalancingPolicy( new TokenAwarePolicy(DCAwareRoundRobinPolicy.builder().build())); }
public CassandraController(JSONConfig config) { Cluster.Builder builder = Cluster.builder().withClusterName("FlareBot Nodes") .withCredentials(config.getString("cassandra.username").get(), config.getString("cassandra.password").get()) .withPoolingOptions(new PoolingOptions().setConnectionsPerHost(HostDistance.LOCAL, 2, 4).setConnectionsPerHost(HostDistance.REMOTE, 2, 4)); config.getArray("cassandra.nodes").ifPresent(array -> array.forEach(ip -> builder.addContactPoint(ip.getAsString()))); cluster = builder.build(); session = cluster.connect(); }
private void configureOther() { final String nameConfiguration = (String) configuration.get(TRIDENT_CASSANDRA_CLUSTER_NAME); if (StringUtils.isNotEmpty(nameConfiguration)) { builder = builder.withClusterName(nameConfiguration); } }
@BeforeClass(groups = "short") public void setup() throws InterruptedException { Cluster.Builder builder = Cluster.builder() .addContactPoints(getContactPoints()) .withPort(ccm().getBinaryPort()) .withQueryOptions(nonDebouncingQueryOptions()); cluster1 = builder.build(); cluster2 = builder.build(); schemaDisabledCluster = spy( Cluster.builder() .addContactPoints(getContactPoints()) .withPort(ccm().getBinaryPort()) .withClusterName("schema-disabled") .withQueryOptions(nonDebouncingQueryOptions().setMetadataEnabled(false)) .build()); schemaDisabledSession = schemaDisabledCluster.connect(); schemaDisabledControlConnection = spy(schemaDisabledCluster.manager.controlConnection); schemaDisabledCluster.manager.controlConnection = schemaDisabledControlConnection; session1 = cluster1.connect(); cluster2.init(); cluster1.register(listener1 = mock(SchemaChangeListener.class)); cluster2.register(listener2 = mock(SchemaChangeListener.class)); listeners = Lists.newArrayList(listener1, listener2); schemaDisabledCluster.register(schemaDisabledListener = mock(SchemaChangeListener.class)); verify(schemaDisabledListener, times(1)).onRegister(schemaDisabledCluster); execute(CREATE_KEYSPACE, "lowercase"); execute(CREATE_KEYSPACE, "\"CaseSensitive\""); }