private static Object resultSet(final Object value) { if (value instanceof ResultSet) { return ((ResultSet) value).all(); } else if (value instanceof Result) { return ((Result) value).all(); } return value; }
@Override public Map<String, String> buildResultsCQL( final ResultSet resultSet ) { final Map<String, String> results = new HashMap<>(); resultSet.all().forEach( row -> { @SuppressWarnings("unchecked") List<Object> keys = (List) deserializeMapEntryKey(row.getBytes("key")); String value = (String)DataType.text().deserialize( row.getBytes("value"), ProtocolVersion.NEWEST_SUPPORTED ); // the actual string key value is the last element results.put((String)keys.get(keys.size() -1), value); }); return results; } }
@Override public List<String> getListOfQueues() { logger.trace( "getListOfQueues " ); Statement select = QueryBuilder.select().all().from( TABLE_QUEUES ); ResultSet rs = cassandraClient.getApplicationSession().execute( select ); return rs.all().stream() .map( row -> row.getString( COLUMN_QUEUE_NAME )) .collect( Collectors.toList() ); }
private Iterable<Row> queryPartitionKeysLegacyWithMultipleQueries(CassandraTable table, List<Set<Object>> filterPrefixes) { CassandraTableHandle tableHandle = table.getTableHandle(); List<CassandraColumnHandle> partitionKeyColumns = table.getPartitionKeyColumns(); Set<List<Object>> filterCombinations = Sets.cartesianProduct(filterPrefixes); ImmutableList.Builder<Row> rowList = ImmutableList.builder(); for (List<Object> combination : filterCombinations) { Select partitionKeys = CassandraCqlUtils.selectDistinctFrom(tableHandle, partitionKeyColumns); addWhereClause(partitionKeys.where(), partitionKeyColumns, combination); List<Row> resultRows = execute(partitionKeys).all(); if (resultRows != null && !resultRows.isEmpty()) { rowList.addAll(resultRows); } } return rowList.build(); }
@Override public List<UUID> getTokensForPrincipal(ByteBuffer principalKeyBuffer){ Preconditions.checkNotNull(principalKeyBuffer, "principal key bytebuffer cannot be null"); Clause inPrincipal = QueryBuilder.eq("key", principalKeyBuffer); Statement statement = QueryBuilder .select() .column("column1") .from(PRINCIPAL_TOKENS_TABLE) .where(inPrincipal); final List<Row> rows = session.execute(statement).all(); final List<UUID> tokenUUIDs = new ArrayList<>(rows.size()); rows.forEach(row -> tokenUUIDs.add(row.getUUID("column1"))); logger.trace("getTokensForPrincipal, token UUIDs: {}", tokenUUIDs); return tokenUUIDs; }
private static void checkConnectivity(CassandraSession session) { ResultSet result = session.execute("SELECT release_version FROM system.local"); List<Row> rows = result.all(); assertEquals(rows.size(), 1); String version = rows.get(0).getString(0); log.info("Cassandra version: %s", version); }
private Iterable<Row> queryPartitionKeysWithInClauses(CassandraTable table, List<Set<Object>> filterPrefixes) { CassandraTableHandle tableHandle = table.getTableHandle(); List<CassandraColumnHandle> partitionKeyColumns = table.getPartitionKeyColumns(); Select partitionKeys = CassandraCqlUtils.selectDistinctFrom(tableHandle, partitionKeyColumns); addWhereInClauses(partitionKeys.where(), partitionKeyColumns, filterPrefixes); return execute(partitionKeys).all(); }
@Override public List<SizeEstimate> getSizeEstimates(String keyspaceName, String tableName) { checkSizeEstimatesTableExist(); Statement statement = select("range_start", "range_end", "mean_partition_size", "partitions_count") .from(SYSTEM, SIZE_ESTIMATES) .where(eq("keyspace_name", keyspaceName)) .and(eq("table_name", tableName)); ResultSet result = executeWithSession(session -> session.execute(statement)); ImmutableList.Builder<SizeEstimate> estimates = ImmutableList.builder(); for (Row row : result.all()) { SizeEstimate estimate = new SizeEstimate( row.getString("range_start"), row.getString("range_end"), row.getLong("mean_partition_size"), row.getLong("partitions_count")); estimates.add(estimate); } return estimates.build(); }
private Long retrieveCounterFromStorage( String queueName, DatabaseQueueMessage.Type type ) { Statement query = QueryBuilder.select().from( TABLE_MESSAGE_COUNTERS ) .where( QueryBuilder.eq( COLUMN_QUEUE_NAME, queueName ) ) .and( QueryBuilder.eq( COLUMN_MESSAGE_TYPE, type.toString()) ); ResultSet resultSet = cassandraClient.getQueueMessageSession().execute( query ); List<Row> all = resultSet.all(); if ( all.size() > 1 ) { throw new QakkaRuntimeException( "Multiple rows for counter " + queueName + " type " + type ); } if ( all.isEmpty() ) { return null; } return all.get(0).getLong( COLUMN_COUNTER_VALUE ); }
List<Task> tasks = new ArrayList<>(); List<Row> rows = resultSet.all(); if (rows.size() == 0) { LOGGER.info("Workflow {} not found in datastore", workflowId);
Long retrieveCounterFromStorage( String queueName, Shard.Type type, long shardId ) { Statement query = QueryBuilder.select().from( TABLE_COUNTERS ) .where( QueryBuilder.eq( COLUMN_QUEUE_NAME, queueName ) ) .and( QueryBuilder.eq( COLUMN_SHARD_TYPE, type.toString()) ) .and( QueryBuilder.eq( COLUMN_SHARD_ID, shardId ) ); ResultSet resultSet = cassandraClient.getQueueMessageSession().execute( query ); List<Row> all = resultSet.all(); if ( all.size() > 1 ) { throw new QakkaRuntimeException( "Multiple rows for counter " + queueName + " type " + type + " shardId " + shardId ); } if ( all.isEmpty() ) { return null; } return all.get(0).getLong( COLUMN_COUNTER_VALUE ); }
public static void insertIntoTableClusteringKeys(CassandraSession session, SchemaTableName table, int rowsCount) { for (Integer rowNumber = 1; rowNumber <= rowsCount; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("key", "key_" + rowNumber.toString()) .value("clust_one", "clust_one") .value("clust_two", "clust_two_" + rowNumber.toString()) .value("clust_three", "clust_three_" + rowNumber.toString()); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), rowsCount); }
public static void insertIntoTableMultiPartitionClusteringKeys(CassandraSession session, SchemaTableName table) { for (Integer rowNumber = 1; rowNumber < 10; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("partition_one", "partition_one_" + rowNumber.toString()) .value("partition_two", "partition_two_" + rowNumber.toString()) .value("clust_one", "clust_one") .value("clust_two", "clust_two_" + rowNumber.toString()) .value("clust_three", "clust_three_" + rowNumber.toString()); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), 9); }
@Override public Map<String, Object> getTokenInfo(UUID tokenUUID){ Preconditions.checkNotNull(tokenUUID, "token UUID is required"); List<ByteBuffer> tokenProperties = new ArrayList<>(); TOKEN_PROPERTIES.forEach( prop -> tokenProperties.add(DataType.serializeValue(prop, ProtocolVersion.NEWEST_SUPPORTED))); final ByteBuffer key = DataType.uuid().serialize(tokenUUID, ProtocolVersion.NEWEST_SUPPORTED); final Clause inKey = QueryBuilder.eq("key", key); final Clause inColumn = QueryBuilder.in("column1", tokenProperties ); final Statement statement = QueryBuilder.select().all().from(TOKENS_TABLE) .where(inKey) .and(inColumn) .setConsistencyLevel(cassandraConfig.getDataStaxReadCl()); final ResultSet resultSet = session.execute(statement); final List<Row> rows = resultSet.all(); Map<String, Object> tokenInfo = new HashMap<>(); rows.forEach( row -> { final String name = (String)DataType.text() .deserialize(row.getBytes("column1"), ProtocolVersion.NEWEST_SUPPORTED); final Object value = deserializeColumnValue(name, row.getBytes("value")); if (value == null){ throw new RuntimeException("error deserializing token info for property: "+name); } tokenInfo.put(name, value); }); logger.trace("getTokenInfo, info: {}", tokenInfo); return tokenInfo; }
public static void insertIntoTableClusteringKeysInequality(CassandraSession session, SchemaTableName table, Date date, int rowsCount) { for (Integer rowNumber = 1; rowNumber <= rowsCount; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("key", "key_1") .value("clust_one", "clust_one") .value("clust_two", rowNumber) .value("clust_three", date.getTime() + rowNumber * 10); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), rowsCount); }
@Override public Result<AuditLog> getAuditLogs( UUID messageId ) { Statement query = QueryBuilder.select().all().from(TABLE_AUDIT_LOG) .where( QueryBuilder.eq( COLUMN_MESSAGE_ID, messageId ) ); ResultSet rs = cassandraClient.getApplicationSession().execute( query ); final List<AuditLog> auditLogs = rs.all().stream().map( row -> new AuditLog( AuditLog.Action.valueOf( row.getString( COLUMN_ACTION )), AuditLog.Status.valueOf( row.getString( COLUMN_STATUS )), row.getString( COLUMN_QUEUE_NAME ), row.getString( COLUMN_REGION ), row.getUUID( COLUMN_MESSAGE_ID ), row.getUUID( COLUMN_QUEUE_MESSAGE_ID ), row.getLong( COLUMN_TRANSFER_TIME ) ) ).collect( Collectors.toList() ); return new Result<AuditLog>() { @Override public PagingState getPagingState() { return null; // no paging } @Override public List<AuditLog> getEntities() { return auditLogs; } }; }
private static void insertTestData(CassandraSession session, SchemaTableName table, Date date, int rowsCount) { for (Integer rowNumber = 1; rowNumber <= rowsCount; rowNumber++) { Insert insert = QueryBuilder.insertInto(table.getSchemaName(), table.getTableName()) .value("key", "key " + rowNumber.toString()) .value("typeuuid", UUID.fromString(String.format("00000000-0000-0000-0000-%012d", rowNumber))) .value("typeinteger", rowNumber) .value("typelong", rowNumber.longValue() + 1000) .value("typebytes", ByteBuffer.wrap(Ints.toByteArray(rowNumber)).asReadOnlyBuffer()) .value("typetimestamp", date) .value("typeansi", "ansi " + rowNumber) .value("typeboolean", rowNumber % 2 == 0) .value("typedecimal", new BigDecimal(Math.pow(2, rowNumber))) .value("typedouble", Math.pow(4, rowNumber)) .value("typefloat", (float) Math.pow(8, rowNumber)) .value("typeinet", InetAddresses.forString("127.0.0.1")) .value("typevarchar", "varchar " + rowNumber) .value("typevarint", BigInteger.TEN.pow(rowNumber)) .value("typetimeuuid", UUID.fromString(String.format("d2177dd0-eaa2-11de-a572-001b779c76e%d", rowNumber))) .value("typelist", ImmutableList.of("list-value-1" + rowNumber, "list-value-2" + rowNumber)) .value("typemap", ImmutableMap.of(rowNumber, rowNumber + 1L, rowNumber + 2, rowNumber + 3L)) .value("typeset", ImmutableSet.of(false, true)); session.execute(insert); } assertEquals(session.execute("SELECT COUNT(*) FROM " + table).all().get(0).getLong(0), rowsCount); } }
private void advance(){ Clause queueNameClause = QueryBuilder.eq( ShardSerializationImpl.COLUMN_QUEUE_NAME, queueName); Clause regionClause = QueryBuilder.eq( ShardSerializationImpl.COLUMN_REGION, region); Clause activeClause = QueryBuilder.eq( ShardSerializationImpl.COLUMN_ACTIVE, 1); Clause shardIdClause; if (nextStart == 0L && lastShardId.isPresent()) { shardIdClause = QueryBuilder.gt( ShardSerializationImpl.COLUMN_SHARD_ID, lastShardId.get() ); } else if (nextStart == 0L && !lastShardId.isPresent()) { shardIdClause = QueryBuilder.gte( ShardSerializationImpl.COLUMN_SHARD_ID, 0L ); } else { shardIdClause = QueryBuilder.gt( ShardSerializationImpl.COLUMN_SHARD_ID, nextStart ); } Statement query = QueryBuilder.select().all().from(ShardSerializationImpl.getTableName(shardType)) .where(queueNameClause) .and(regionClause) .and(activeClause) .and(shardIdClause) .limit(PAGE_SIZE); List<Row> rows = cassandraClient.getQueueMessageSession().execute(query).all(); currentIterator = getIteratorFromRows(rows); }
List<Row> rows = cassandraClient.getQueueMessageSession().execute(query).all();
/** Ensures that a custom payload is propagated throughout pages. */ @Test(groups = "short") public void should_echo_custom_payload_when_paginating() throws Exception { session().execute("INSERT INTO t1 (c1, c2) VALUES (1, 'a')"); session().execute("INSERT INTO t1 (c1, c2) VALUES (1, 'b')"); Statement statement = new SimpleStatement("SELECT c2 FROM t1 where c1 = 1"); statement.setFetchSize(1); statement.setOutgoingPayload(payload1); ResultSet rows = session().execute(statement); rows.all(); assertThat(rows.getAllExecutionInfo()).extracting("incomingPayload").containsOnly(payload1); }