private TokenRange rangeToTokenRange(Range<Token> range) { TokenFactory tokenFactory = this.partitioner.getTokenFactory(); Metadata metadata = this.session.metadata(); return metadata.newTokenRange( metadata.newToken(tokenFactory.toString(range.left)), metadata.newToken(tokenFactory.toString(range.right))); }
@Test(groups = "short") public void should_create_token_from_partition_key() { Metadata metadata = cluster().getMetadata(); Row row = session().execute("SELECT token(i) FROM foo WHERE i = 1").one(); Token expected = row.getToken(0); ProtocolVersion protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); assertThat(metadata.newToken(TypeCodec.cint().serialize(1, protocolVersion))) .isEqualTo(expected); }
@Test(groups = "short") public void should_expose_token_and_range_creation_methods() { Metadata metadata = cluster().getMetadata(); // Pick a random range TokenRange range = metadata.getTokenRanges().iterator().next(); Token start = metadata.newToken(range.getStart().toString()); Token end = metadata.newToken(range.getEnd().toString()); assertThat(metadata.newTokenRange(start, end)).isEqualTo(range); }
/** * Ensures that calling {@link Metadata#newToken(String)} on a Cluster that has schema metadata * disabled will throw a {@link IllegalStateException}. * * @jira_ticket JAVA-858 * @since 2.0.11 */ @Test(groups = "short", expectedExceptions = IllegalStateException.class) public void should_throw_illegal_state_exception_on_newToken_with_metadata_disabled() { Cluster cluster = Cluster.builder() .addContactPoints(getContactPoints()) .withPort(ccm().getBinaryPort()) .withQueryOptions(nonDebouncingQueryOptions().setMetadataEnabled(false)) .build(); try { cluster.init(); cluster.getMetadata().newToken("0x00"); } finally { cluster.close(); } }
public TokenRange getRange(ByteBuffer key) { Token t = partitioner.getToken(key); com.datastax.driver.core.Token driverToken = metadata.newToken(partitioner.getTokenFactory().toString(t)); for (TokenRange range : rangeMap.keySet()) { if (range.contains(driverToken)) { return range; } } throw new RuntimeException("Invalid token information returned by describe_ring: " + rangeMap); }
public TokenRange getRange(ByteBuffer key) { Token t = partitioner.getToken(key); com.datastax.driver.core.Token driverToken = metadata.newToken(partitioner.getTokenFactory().toString(t)); for (TokenRange range : rangeMap.keySet()) { if (range.contains(driverToken)) { return range; } } throw new RuntimeException("Invalid token information returned by describe_ring: " + rangeMap); }
private TokenRange rangeToTokenRange(Range<Token> range) { TokenFactory tokenFactory = this.partitioner.getTokenFactory(); Metadata metadata = this.session.metadata(); return metadata.newTokenRange( metadata.newToken(tokenFactory.toString(range.left)), metadata.newToken(tokenFactory.toString(range.right))); }
public TokenRange getRange(ByteBuffer key) { Token t = partitioner.getToken(key); com.datastax.driver.core.Token driverToken = metadata.newToken(partitioner.getTokenFactory().toString(t)); for (TokenRange range : rangeMap.keySet()) { if (range.contains(driverToken)) { return range; } } throw new RuntimeException("Invalid token information returned by describe_ring: " + rangeMap); }
private Observable.Transformer<BoundStatement, Integer> applyMicroBatching() { return tObservable -> tObservable .groupBy(b -> { ByteBuffer routingKey = b.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, codecRegistry); Token token = metadata.newToken(routingKey); for (TokenRange tokenRange : session.getCluster().getMetadata().getTokenRanges()) { if (tokenRange.contains(token)) { return tokenRange; } } log.warn("Unable to find any Cassandra node to insert token " + token.toString()); return session.getCluster().getMetadata().getTokenRanges().iterator().next(); }) .flatMap(g -> g.compose(new BoundBatchStatementTransformer())) .flatMap(batch -> rxSession .execute(batch) .compose(applyInsertRetryPolicy()) .map(resultSet -> batch.size()) ); }
private Observable.Transformer<BoundStatement, Integer> applyMicroBatching() { return tObservable -> tObservable .groupBy(b -> { ByteBuffer routingKey = b.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, codecRegistry); Token token = metadata.newToken(routingKey); for (TokenRange tokenRange : session.getCluster().getMetadata().getTokenRanges()) { if (tokenRange.contains(token)) { return tokenRange; } } log.warn("Unable to find any Cassandra node to insert token " + token.toString()); return session.getCluster().getMetadata().getTokenRanges().iterator().next(); }) .flatMap(g -> g.compose(new BoundBatchStatementTransformer())) .flatMap(batch -> rxSession .execute(batch) .compose(applyInsertRetryPolicy()) .map(resultSet -> batch.size()) ); }
@Test(groups = "short") public void should_expose_token_and_range_creation_methods() { Metadata metadata = cluster().getMetadata(); // Pick a random range TokenRange range = metadata.getTokenRanges().iterator().next(); Token start = metadata.newToken(range.getStart().toString()); Token end = metadata.newToken(range.getEnd().toString()); assertThat(metadata.newTokenRange(start, end)).isEqualTo(range); }
@Test(groups = "short") public void should_create_token_from_partition_key() { Metadata metadata = cluster().getMetadata(); Row row = session().execute("SELECT token(i) FROM foo WHERE i = 1").one(); Token expected = row.getToken(0); ProtocolVersion protocolVersion = cluster().getConfiguration().getProtocolOptions().getProtocolVersion(); assertThat(metadata.newToken(TypeCodec.cint().serialize(1, protocolVersion))) .isEqualTo(expected); }
@Override public KeyIterator getKeys(final KeyRangeQuery query, final StoreTransaction txh) throws BackendException { if (!this.storeManager.getFeatures().hasOrderedScan()) { throw new PermanentBackendException("This operation is only allowed when the byteorderedpartitioner is used."); } final Metadata metadata = this.session.getCluster().getMetadata(); return Try.of(() -> new CQLResultSetKeyIterator( query, this.getter, this.session.execute(this.getKeysRanged.bind() .setToken(KEY_START_BINDING, metadata.newToken(query.getKeyStart().asByteBuffer())) .setToken(KEY_END_BINDING, metadata.newToken(query.getKeyEnd().asByteBuffer())) .setBytes(SLICE_START_BINDING, query.getSliceStart().asByteBuffer()) .setBytes(SLICE_END_BINDING, query.getSliceEnd().asByteBuffer()) .setFetchSize(this.storeManager.getPageSize()) .setConsistencyLevel(getTransaction(txh).getReadConsistencyLevel())))) .getOrElseThrow(EXCEPTION_MAPPER); }
/** * Ensures that calling {@link Metadata#newToken(String)} on a Cluster that has schema metadata * disabled will throw a {@link IllegalStateException}. * * @jira_ticket JAVA-858 * @since 2.0.11 */ @Test(groups = "short", expectedExceptions = IllegalStateException.class) public void should_throw_illegal_state_exception_on_newToken_with_metadata_disabled() { Cluster cluster = Cluster.builder() .addContactPoints(getContactPoints()) .withPort(ccm().getBinaryPort()) .withQueryOptions(nonDebouncingQueryOptions().setMetadataEnabled(false)) .build(); try { cluster.init(); cluster.getMetadata().newToken("0x00"); } finally { cluster.close(); } }