Refine search
/** * Retrieve the Cassandra release version. * * @param session must not be {@literal null}. * @return the release {@link Version}. */ public static Version getReleaseVersion(Session session) { Assert.notNull(session, "Session must not be null"); ResultSet resultSet = session.execute("SELECT release_version FROM system.local;"); Row row = resultSet.one(); return Version.parse(row.getString(0)); } }
@VisibleForTesting String lookupWorkflowIdFromTaskId(String taskId) { try { ResultSet resultSet = session.execute(selectTaskLookupStatement.bind(UUID.fromString(taskId))); return Optional.ofNullable(resultSet.one()) .map(row -> row.getUUID(WORKFLOW_ID_KEY).toString()) .orElse(null); } catch (Exception e) { Monitors.error(CLASS_NAME, "lookupWorkflowIdFromTaskId"); String errorMsg = String.format("Failed to lookup workflowId from taskId: %s", taskId); LOGGER.error(errorMsg, e); throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, errorMsg, e); } } }
@Override public VersionNumber getCassandraVersion() { ResultSet result = executeWithSession(session -> session.execute("select release_version from system.local")); Row versionRow = result.one(); if (versionRow == null) { throw new PrestoException(CASSANDRA_VERSION_ERROR, "The cluster version is not available. " + "Please make sure that the Cassandra cluster is up and running, " + "and that the contact points are specified correctly."); } return VersionNumber.parse(versionRow.getString("release_version")); }
@VisibleForTesting WorkflowMetadata getWorkflowMetadata(String workflowId) { ResultSet resultSet = session.execute(selectTotalStatement.bind(UUID.fromString(workflowId))); recordCassandraDaoRequests("getWorkflowMetadata"); return Optional.ofNullable(resultSet.one()) .map(row -> { WorkflowMetadata workflowMetadata = new WorkflowMetadata(); workflowMetadata.setTotalTasks(row.getInt(TOTAL_TASKS_KEY)); workflowMetadata.setTotalPartitions(row.getInt(TOTAL_PARTITIONS_KEY)); return workflowMetadata; }).orElseThrow(() -> new ApplicationException(ApplicationException.Code.NOT_FOUND, String.format("Workflow with id: %s not found in data store", workflowId))); }
@Override public Task getTask(String taskId) { try { String workflowId = lookupWorkflowIdFromTaskId(taskId); if (workflowId == null) { return null; } // TODO: implement for query against multiple shards ResultSet resultSet = session.execute(selectTaskStatement.bind(UUID.fromString(workflowId), DEFAULT_SHARD_ID, taskId)); return Optional.ofNullable(resultSet.one()) .map(row -> { Task task = readValue(row.getString(PAYLOAD_KEY), Task.class); recordCassandraDaoRequests("getTask", task.getTaskType(), task.getWorkflowType()); recordCassandraDaoPayloadSize("getTask", toJson(task).length(), task.getTaskType(), task.getWorkflowType()); return task; }) .orElse(null); } catch (Exception e) { Monitors.error(CLASS_NAME, "getTask"); String errorMsg = String.format("Error getting task by id: %s", taskId); LOGGER.error(errorMsg, e); throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, errorMsg); } }
@Override public DatabaseQueueMessageBody loadMessageData(final UUID messageId ){ logger.trace("loadMessageData {}", messageId); Clause messageIdClause = QueryBuilder.eq( COLUMN_MESSAGE_ID, messageId ); Statement select = QueryBuilder.select().from( TABLE_MESSAGE_DATA).where(messageIdClause); Row row = cassandraClient.getApplicationSession().execute(select).one(); if ( row == null ) { return null; } return new DatabaseQueueMessageBody( row.getBytes( COLUMN_MESSAGE_DATA), row.getString( COLUMN_CONTENT_TYPE)); }
ResultSet resultSet; if (includeTasks) { resultSet = session.execute(selectWorkflowWithTasksStatement.bind(UUID.fromString(workflowId), DEFAULT_SHARD_ID)); List<Task> tasks = new ArrayList<>(); resultSet = session.execute(selectWorkflowStatement.bind(UUID.fromString(workflowId))); workflow = Optional.ofNullable(resultSet.one()) .map(row -> { Workflow wf = readValue(row.getString(PAYLOAD_KEY), Workflow.class);
private V readValueCQL(CacheScope scope, K key, TypeReference typeRef){ Preconditions.checkNotNull(scope, "scope is required"); Preconditions.checkNotNull(key, "key is required"); final String rowKeyString = scope.getApplication().getUuid().toString(); final int bucket = BUCKET_LOCATOR.getCurrentBucket(rowKeyString); // determine column name based on K key to string final String columnName = key.toString(); final Clause inKey = QueryBuilder.eq("key", getPartitionKey(scope, rowKeyString, bucket) ); final Clause inColumn = QueryBuilder.eq("column1", DataType.text().serialize(columnName, ProtocolVersion.NEWEST_SUPPORTED) ); final Statement statement = QueryBuilder.select().all().from(SCOPED_CACHE_TABLE) .where(inKey) .and(inColumn) .setConsistencyLevel(cassandraConfig.getDataStaxReadCl()); final ResultSet resultSet = session.execute(statement); final com.datastax.driver.core.Row row = resultSet.one(); if (row == null){ if(logger.isDebugEnabled()){ logger.debug("Cache value not found for key {}", key ); } return null; } try { return MAPPER.readValue(row.getBytes("value").array(), typeRef); } catch (IOException ioe) { logger.error("Unable to read cached value", ioe); throw new RuntimeException("Unable to read cached value", ioe); } }
@Override public Session get(final Builder builder) { ResultSet rs = session .execute(new BoundStatement(selectSQL.apply(tableName)).bind(builder.sessionId())); return Optional.ofNullable(rs.one()) .map(row -> { long createdAt = row.getTimestamp(CREATED_AT).getTime(); long accessedAt = row.getTimestamp(ACCESSED_AT).getTime(); long savedAt = row.getTimestamp(SAVED_AT).getTime(); Map<String, String> attributes = row.getMap(ATTRIBUTES, String.class, String.class); Session session = builder .accessedAt(accessedAt) .createdAt(createdAt) .savedAt(savedAt) .set(attributes) .build(); // touch ttl if (timeout > 0) { save(session); } return session; }) .orElse(null); }
private ByteBuffer getValueCQL( MapScope scope, String key, final ConsistencyLevel consistencyLevel ) { Clause in = QueryBuilder.in("key", getMapEntryPartitionKey(scope, key) ); Statement statement = QueryBuilder.select().all().from(MAP_ENTRIES_TABLE) .where(in) .setConsistencyLevel(consistencyLevel); ResultSet resultSet = session.execute(statement); com.datastax.driver.core.Row row = resultSet.one(); return row != null ? row.getBytes("value") : null; }
private void createTable(TableDefinition tableDefinition, boolean forceCheckSchema) throws Exception { boolean exists; if(!forceCheckSchema){ exists = dataStaxCluster.getClusterSession().getCluster() .getMetadata() .getKeyspace(CQLUtils.quote( tableDefinition.getKeyspace() ) ) .getTable( tableDefinition.getTableName() ) != null; }else{ exists = dataStaxCluster.getClusterSession() .execute("select * from system.schema_columnfamilies where keyspace_name='"+tableDefinition.getKeyspace() +"' and columnfamily_name='"+CQLUtils.unquote(tableDefinition.getTableName())+"'").one() != null; } if( exists ){ logger.info("Not creating table {}, it already exists.", tableDefinition.getTableName()); return; } String CQL = tableDefinition.getTableCQL(cassandraFig, TableDefinition.ACTION.CREATE); if (logger.isDebugEnabled()) { logger.debug(CQL); } if ( tableDefinition.getKeyspace().equals( cassandraFig.getApplicationKeyspace() )) { dataStaxCluster.getApplicationSession().execute( CQL ); } else { dataStaxCluster.getApplicationLocalSession().execute( CQL ); } logger.info("Created table: {} in keyspace {}", tableDefinition.getTableName(), tableDefinition.getKeyspace()); }
/** * Execute CQL that will create the keyspace if it doesn't exist and alter it if it does. * @throws Exception * @param forceCheck */ @Override public synchronized void createApplicationKeyspace(boolean forceCheck) throws Exception { boolean exists; if(!forceCheck) { // this gets info from client's metadata exists = getClusterSession().getCluster().getMetadata() .getKeyspace(CQLUtils.quote(cassandraConfig.getApplicationKeyspace())) != null; }else{ exists = getClusterSession() .execute("select * from system.schema_keyspaces where keyspace_name = '"+cassandraConfig.getApplicationKeyspace()+"'") .one() != null; } if(exists){ logger.info("Not creating keyspace {}, it already exists.", cassandraConfig.getApplicationKeyspace()); return; } final String createApplicationKeyspace = String.format( "CREATE KEYSPACE IF NOT EXISTS %s WITH replication = %s", CQLUtils.quote( cassandraConfig.getApplicationKeyspace()), CQLUtils.getFormattedReplication( cassandraConfig.getStrategy(), cassandraConfig.getStrategyOptions()) ); getClusterSession().execute(createApplicationKeyspace); waitForSchemaAgreement(); logger.info("Created keyspace: {}", cassandraConfig.getApplicationKeyspace()); }
@Override public DatabaseQueue getQueue(String name) { logger.trace( "getQueue " + name ); Clause queueNameClause = QueryBuilder.eq(COLUMN_QUEUE_NAME, name); Statement query = QueryBuilder.select().all().from(TABLE_QUEUES) .where(queueNameClause); Row row = cassandraClient.getApplicationSession().execute(query).one(); if(row == null){ return null; } final String queueName = row.getString(COLUMN_QUEUE_NAME); final String regions = row.getString(COLUMN_REGIONS); final String defaultDestinations = row.getString(COLUMN_DEFAULT_DESTINATIONS); final long defaultDelayMs = row.getLong(COLUMN_DEFAULT_DELAY_MS); final int retryCount = row.getInt(COLUMN_RETRY_COUNT); final int handlingTimeoutSec = row.getInt(COLUMN_HANDLING_TIMEOUT_SEC); final String deadLetterQueue = row.getString(COLUMN_DEAD_LETTER_QUEUE); return new DatabaseQueue( queueName, regions, defaultDestinations, defaultDelayMs, retryCount, handlingTimeoutSec, deadLetterQueue); }
ResultSet rs = cassandraClient.getApplicationSession().execute( query ); final PagingState newPagingState = rs.getExecutionInfo().getPagingState(); int numReturned = rs.getAvailableWithoutFetching(); for ( int i=0; i<numReturned; i++ ) { Row row = rs.one(); TransferLog tlog = new TransferLog( row.getString( COLUMN_QUEUE_NAME ),
public Shard loadShard(final Shard shard){ Clause queueNameClause = QueryBuilder.eq(COLUMN_QUEUE_NAME, shard.getQueueName()); Clause regionClause = QueryBuilder.eq(COLUMN_REGION, shard.getRegion()); Clause activeClause = QueryBuilder.eq(COLUMN_ACTIVE, 1); Clause shardIdClause = QueryBuilder.eq(COLUMN_SHARD_ID, shard.getShardId()); Statement select = QueryBuilder.select().from(getTableName(shard.getType())) .where(queueNameClause) .and(regionClause) .and(activeClause) .and(shardIdClause); Row row = cassandraClient.getQueueMessageSession().execute(select).one(); if (row == null){ return null; } final String queueName = row.getString(COLUMN_QUEUE_NAME); final String region = row.getString(COLUMN_REGION); final long shardId = row.getLong(COLUMN_SHARD_ID); final UUID pointer = row.getUUID(COLUMN_POINTER); return new Shard(queueName, region, shard.getType(), shardId, pointer); }
@Test(groups = "short") public void should_use_server_side_timestamp_if_none_specified() { timestampFromGenerator = Long.MIN_VALUE; long clientTime = System.currentTimeMillis() * 1000; String query = "INSERT INTO foo (k, v) VALUES (1, 1)"; session().execute(query); long writeTime = session().execute("SELECT writeTime(v) FROM foo WHERE k = 1").one().getLong(0); assertTrue(writeTime >= clientTime); }