/** * @return cql query statement to retrieve the workflow_id for a particular task_id from the "task_lookup" table */ public String getSelectTaskFromLookupTableStatement() { return QueryBuilder.select(WORKFLOW_ID_KEY) .from(keyspace, TABLE_TASK_LOOKUP) .where(eq(TASK_ID_KEY, bindMarker())) .getQueryString(); }
private V readValueCQL(CacheScope scope, K key, TypeReference typeRef){ Preconditions.checkNotNull(scope, "scope is required"); Preconditions.checkNotNull(key, "key is required"); final String rowKeyString = scope.getApplication().getUuid().toString(); final int bucket = BUCKET_LOCATOR.getCurrentBucket(rowKeyString); // determine column name based on K key to string final String columnName = key.toString(); final Clause inKey = QueryBuilder.eq("key", getPartitionKey(scope, rowKeyString, bucket) ); final Clause inColumn = QueryBuilder.eq("column1", DataType.text().serialize(columnName, ProtocolVersion.NEWEST_SUPPORTED) ); final Statement statement = QueryBuilder.select().all().from(SCOPED_CACHE_TABLE) .where(inKey) .and(inColumn) .setConsistencyLevel(cassandraConfig.getDataStaxReadCl()); final ResultSet resultSet = session.execute(statement); final com.datastax.driver.core.Row row = resultSet.one(); if (row == null){ if(logger.isDebugEnabled()){ logger.debug("Cache value not found for key {}", key ); } return null; } try { return MAPPER.readValue(row.getBytes("value").array(), typeRef); } catch (IOException ioe) { logger.error("Unable to read cached value", ioe); throw new RuntimeException("Unable to read cached value", ioe); } }
@Override public List<SizeEstimate> getSizeEstimates(String keyspaceName, String tableName) { checkSizeEstimatesTableExist(); Statement statement = select("range_start", "range_end", "mean_partition_size", "partitions_count") .from(SYSTEM, SIZE_ESTIMATES) .where(eq("keyspace_name", keyspaceName)) .and(eq("table_name", tableName)); ResultSet result = executeWithSession(session -> session.execute(statement)); ImmutableList.Builder<SizeEstimate> estimates = ImmutableList.builder(); for (Row row : result.all()) { SizeEstimate estimate = new SizeEstimate( row.getString("range_start"), row.getString("range_end"), row.getLong("mean_partition_size"), row.getLong("partitions_count")); estimates.add(estimate); } return estimates.build(); }
private <T> T getValuesCQL( final MapScope scope, final Collection<String> keys, final ResultsBuilderCQL<T> builder ) { final List<ByteBuffer> serializedKeys = new ArrayList<>(); keys.forEach(key -> serializedKeys.add(getMapEntryPartitionKey(scope,key))); Clause in = QueryBuilder.in("key", serializedKeys ); Statement statement = QueryBuilder.select().all().from(MAP_ENTRIES_TABLE) .where(in); ResultSet resultSet = session.execute(statement); return builder.buildResultsCQL( resultSet ); }
selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); stmt = session.prepare(selectBuilder.from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())) .limit(1)); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { logger.debug("key = {}", key); ResultSet rs = session.execute(stmt.bind(key)); if (rs.isExhausted()) { return Status.NOT_FOUND; Row row = rs.one(); ColumnDefinitions cd = row.getColumnDefinitions();
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.getAllSubscriptions", absolute = true) @Override public Iterable<OwnedSubscription> getAllSubscriptions() { return () -> { ResultSet resultSet = _keyspace.getCqlSession().execute( select(subscriptionNameColumn(), subscriptionColumn()) .from(CF_NAME) .where(eq(rowkeyColumn(), ROW_KEY)) .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM) .setFetchSize(200)); return StreamSupport.stream(resultSet.spliterator(), false).map(this::rowToOwnedSubscription).iterator(); }; }
@Test(groups = "short") public void should_handle_contains_on_list_with_index() { PreparedStatement byBuyer = session() .prepare( select("id", "description", "buyers") .from("products") .where(contains("buyers", bindMarker("buyer")))); ResultSet results = session().execute(byBuyer.bind().setInt("buyer", 4)); Row row = results.one(); assertThat(row).isNotNull(); assertThat(row.getInt("id")).isEqualTo(38471); assertThat(row.getList("buyers", Integer.class)).contains(4); }
.execute( select("a", "b", "e", count("b"), max("e")) .from(table) .where(eq("b", 2)) .groupBy("a", "b") .allowFiltering())) .containsExactly(row(1, 2, 6, 2L, 12), row(2, 2, 6, 1L, 6)); .execute( select("a", "b", "e", count("b"), max("e")) .from(table) .groupBy("a", "b") .execute( select("a", "b", "e", count("b"), max("e")) .from(table) .from(table) assertThat(session().execute(select("a", count("a")).distinct().from(table).groupBy("a"))) .containsExactly(row(1, 1L), row(2, 1L), row(4, 1L)); session().execute(select("a", count("a")).distinct().from(table).groupBy("a").limit(2))) .containsExactly(row(1, 1L), row(2, 1L)); .from(table) .where(eq("a", 1)) .groupBy("a", "b", "c"))) .containsExactly(row(1, 2, 6, 1L, 6), row(1, 2, 12, 1L, 12), row(1, 4, 12, 2L, 24));
select = select().all().from("foo").where(eq("k", 4)).and(gt("c", "a")).and(lte("c", "z")); assertEquals(select.toString(), query); select().all().from("foo").where().and(eq("k", 4)).and(gt("c", "a")).and(lte("c", "z")); assertEquals(select.toString(), query); .from("foo") .where(in("a", InetAddress.getByName("127.0.0.1"), InetAddress.getByName("127.0.0.3"))) .from("foo") select = select("a").from("foo").where(in("k")); assertEquals(select.toString(), query); select = select("a").from("foo").where(in("k", bindMarker())); assertEquals(select.toString(), query); select = select("a").distinct().from("foo").where(eq("k", 1)); assertEquals(select.toString(), query); select = select("a", "b").distinct().from("foo").where(eq("k", 1)); assertEquals(select.toString(), query); select = select().countAll().from("foo"); assertEquals(select.toString(), query); select = select().all().from("foo").where(gt("k", 42)).limit(42); assertEquals(select.toString(), query);
selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); Select selectStmt = selectBuilder.from(table); scanStmt.append(QueryBuilder.bindMarker()); stmt = session.prepare(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { logger.debug("startKey = {}, recordcount = {}", startkey, recordcount); ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount))); while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap<String, ByteIterator>();
@Override public void removeTransferLog( String queueName, String source, String dest, UUID messageId ) throws QakkaException { Statement query = QueryBuilder.select().all().from(TABLE_TRANSFER_LOG) .where( QueryBuilder.eq( COLUMN_QUEUE_NAME, queueName )) .and( QueryBuilder.eq( COLUMN_DEST_REGION, dest )) .and( QueryBuilder.eq( COLUMN_MESSAGE_ID, messageId )); ResultSet rs = cassandraClient.getApplicationSession().execute( query ); if ( rs.getAvailableWithoutFetching() == 0 ) { StringBuilder sb = new StringBuilder(); sb.append( "Transfer log entry not found for queueName=" ).append( queueName ); sb.append( " dest=" ).append( dest ); sb.append( " messageId=" ).append( messageId ); throw new QakkaException( sb.toString() ); } Statement deleteQuery = QueryBuilder.delete().from(TABLE_TRANSFER_LOG) .where( QueryBuilder.eq( COLUMN_QUEUE_NAME, queueName )) .and( QueryBuilder.eq( COLUMN_DEST_REGION, dest )) .and( QueryBuilder.eq( COLUMN_MESSAGE_ID, messageId )); cassandraClient.getApplicationSession().execute( deleteQuery ); }
@Test(groups = "short") public void should_echo_custom_payload_when_building_statement() throws Exception { Statement statement = select("c2").from("t1").where(eq("c1", 1)).setOutgoingPayload(payload1); ResultSet rows = session().execute(statement); Map<String, ByteBuffer> actual = rows.getExecutionInfo().getIncomingPayload(); assertThat(actual).isEqualTo(payload1); }
@Test(groups = "short") public void should_support_per_partition_limit() throws Exception { assertThat(session().execute(select().all().from("test_ppl").perPartitionLimit(2))) .contains( row(0, 0, 0), assertThat(session().execute(select().all().from("test_ppl").perPartitionLimit(2).limit(6))) .hasSize(6); assertThat(session().execute(select().all().from("test_ppl").perPartitionLimit(2).limit(5))) .contains(row(0, 0, 0), row(0, 1, 1), row(1, 0, 0), row(1, 1, 1), row(2, 0, 0)); .execute(select().all().from("test_ppl").where(in("a", 2, 3)).perPartitionLimit(2))) .contains(row(2, 0, 0), row(2, 1, 1), row(3, 0, 0), row(3, 1, 1)); assertThat( .from("test_ppl") .where(in("a", 2, 3)) .perPartitionLimit(bindMarker()) .from("test_ppl") .where(in("a", 1, 2, 3)) .perPartitionLimit(bindMarker()) .from("test_ppl") .from("test_ppl") .from("test_ppl")
@Test(groups = "short") public void should_handle_contains_key_on_map_with_index() { PreparedStatement byFeatures = session() .prepare( select("id", "description", "features_keys") .from("products") .where(containsKey("features_keys", bindMarker("feature")))); ResultSet results = session().execute(byFeatures.bind().setString("feature", "refresh-rate")); Row row = results.one(); assertThat(row).isNotNull(); assertThat(row.getInt("id")).isEqualTo(34134); assertThat(row.getMap("features_keys", String.class, String.class)) .containsEntry("refresh-rate", "400hz"); } }
private Statement createQueryToSeeIfMemberOfOrg(String organizationId, String userId) { UUID orgUuid = UUID.fromString(organizationId); UUID userUuid = UUID.fromString(userId); return queryBuilder .select() .countAll() .from(Organizations.TABLE_NAME_MEMBERS) .where(eq(ORG_ID, orgUuid)) .and(eq(USER_ID, userUuid)); }
@Test(groups = "short") public void should_handle_contains_on_set_with_index() { PreparedStatement byCategory = session() .prepare( select("id", "description", "categories") .from("products") .where(contains("categories", bindMarker("category")))); ResultSet results = session().execute(byCategory.bind().setString("category", "hdtv")); assertThat(results.getAvailableWithoutFetching()).isEqualTo(2); for (Row row : results) { assertThat(row.getSet("categories", String.class)).contains("hdtv"); } }
.execute( createTable(table) .addPartitionKey("k", DataType.text()) .addColumn("n", DataType.text())); session() .execute( String.format( "CREATE CUSTOM INDEX on %s (n) USING 'org.apache.cassandra.index.sasi.SASIIndex';", table)); session().execute(insertInto(table).value("k", "a").value("cc", 0).value("n", "Hello World")); session().execute(insertInto(table).value("k", "a").value("cc", 1).value("n", "Goodbye World")); session().execute(insertInto(table).value("k", "b").value("cc", 2).value("n", "Hello Moon")); BuiltStatement query = select("n").from(table).where(like("n", "Hello%")); ResultSet r = session().execute(query); assertThat(r.getAvailableWithoutFetching()).isEqualTo(2); assertThat(r.all()) .extracting( new Extractor<Row, String>() {
@Override protected DataSet materializeMainSchemaTable(Table table, List<Column> columns, int maxRows) { final Select query = QueryBuilder.select().all().from(keySpaceName, table.getName()); if (limitMaxRowsIsSet(maxRows)) { query.limit(maxRows); } final ResultSet resultSet = cassandraCluster.connect().execute(query); final Iterator<Row> response = resultSet.iterator(); return new CassandraDataSet(response, columns); }
@Override public OptionalLong analysesCount(Identifier accountId) { Statement statement = QueryBuilder.select().countAll() .from(TABLE_NAME).where(eq("account_id", accountId.toString())); ResultSet rs = connector.session().execute(statement); Iterator<Row> r = rs.iterator(); if (r.hasNext()) { return OptionalLong.of(r.next().getLong(0)); } else { return OptionalLong.empty(); } }
@Override public Result<TransferLog> getAllTransferLogs(PagingState pagingState, int fetchSize ) { Statement query = QueryBuilder.select().all().from(TABLE_TRANSFER_LOG); ResultSet rs = cassandraClient.getApplicationSession().execute( query ); final PagingState newPagingState = rs.getExecutionInfo().getPagingState(); int numReturned = rs.getAvailableWithoutFetching(); for ( int i=0; i<numReturned; i++ ) { Row row = rs.one(); TransferLog tlog = new TransferLog( row.getString( COLUMN_QUEUE_NAME ),