public void addMetricName(String metricName) { m_newMetrics.add(metricName); BoundStatement bs = new BoundStatement(m_clusterConnection.psStringIndexInsert); bs.setBytesUnsafe(0, ByteBuffer.wrap(ROW_KEY_METRIC_NAMES.getBytes(UTF_8))); bs.setString(1, metricName); bs.setConsistencyLevel(m_consistencyLevel); metricNamesBatch.add(bs); }
public void deleteDataPoint(DataPointsRowKey rowKey, int columnTime) throws IOException { BoundStatement boundStatement = new BoundStatement(m_clusterConnection.psDataPointsDelete); boundStatement.setBytesUnsafe(0, DATA_POINTS_ROW_KEY_SERIALIZER.toByteBuffer(rowKey)); ByteBuffer b = ByteBuffer.allocate(4); b.putInt(columnTime); b.rewind(); boundStatement.setBytesUnsafe(1, b); boundStatement.setConsistencyLevel(m_consistencyLevel); boundStatement.setIdempotent(true); addBoundStatement(boundStatement); }
private List<Long> createQueryKeyList(ClusterConnection cluster, String metricName, long startTime, long endTime) { List<Long> ret = new ArrayList<>(); if (cluster.psRowKeyTimeQuery != null) //cluster may be old { BoundStatement statement = new BoundStatement(cluster.psRowKeyTimeQuery); statement.setString(0, metricName); statement.setTimestamp(1, new Date(CassandraDatastore.calculateRowTime(startTime))); statement.setTimestamp(2, new Date(endTime)); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); //printHosts(m_loadBalancingPolicy.newQueryPlan(m_keyspace, statement)); ResultSet rows = cluster.execute(statement); while (!rows.isExhausted()) { ret.add(rows.one().getTimestamp(0).getTime()); } } return ret; }
private Iterable<String> queryStringIndex(final String key) throws DatastoreException { List<ResultSetFuture> futures = queryClusters((cluster) -> { BoundStatement boundStatement = new BoundStatement(cluster.psStringIndexQuery); boundStatement.setBytesUnsafe(0, serializeString(key)); boundStatement.setConsistencyLevel(cluster.getReadConsistencyLevel()); return cluster.executeAsync(boundStatement); }); ListenableFuture<List<ResultSet>> listListenableFuture = Futures.allAsList(futures); Set<String> ret = new HashSet<String>(); try { Iterator<ResultSet> iterator = listListenableFuture.get().iterator(); while (iterator.hasNext()) { ResultSet resultSet = iterator.next(); while (!resultSet.isExhausted()) { Row row = resultSet.one(); ret.add(row.getString(0)); } } } catch (Exception e) { throw new DatastoreException("CQL Query failure", e); } return ret; }
negStatement.setBytesUnsafe(0, CassandraDatastore.serializeString(metricName)); setStartEndKeys(negStatement, metricName, startTime, -1L); negStatement.setConsistencyLevel(cluster.getReadConsistencyLevel()); posStatement.setBytesUnsafe(0, CassandraDatastore.serializeString(metricName)); setStartEndKeys(posStatement, metricName, 0L, endTime); posStatement.setConsistencyLevel(cluster.getReadConsistencyLevel()); statement.setBytesUnsafe(0, CassandraDatastore.serializeString(metricName)); setStartEndKeys(statement, metricName, startTime, endTime); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); statement.setString(0, metricName); statement.setTimestamp(1, new Date(keyTime)); statement.setConsistencyLevel(cluster.getReadConsistencyLevel());
public void addDataPoint(DataPointsRowKey rowKey, int columnTime, DataPoint dataPoint, int ttl) throws IOException { KDataOutput kDataOutput = new KDataOutput(); dataPoint.writeValueToBuffer(kDataOutput); BoundStatement boundStatement = new BoundStatement(m_clusterConnection.psDataPointsInsert); boundStatement.setBytesUnsafe(0, DATA_POINTS_ROW_KEY_SERIALIZER.toByteBuffer(rowKey)); ByteBuffer b = ByteBuffer.allocate(4); b.putInt(columnTime); b.rewind(); boundStatement.setBytesUnsafe(1, b); boundStatement.setBytesUnsafe(2, ByteBuffer.wrap(kDataOutput.getBytes())); boundStatement.setInt(3, ttl); boundStatement.setLong(4, m_now); boundStatement.setConsistencyLevel(m_consistencyLevel); boundStatement.setIdempotent(true); addBoundStatement(boundStatement); }
@Override public Iterable<String> listServiceKeys(String service) throws DatastoreException { List<String> ret = new ArrayList<>(); if (m_metaCluster.psServiceIndexListServiceKeys == null) { throw new DatastoreException("List Service Keys is not available on this version of Cassandra."); } BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexListServiceKeys); statement.setString(0, service); statement.setConsistencyLevel(m_metaCluster.getReadConsistencyLevel()); ResultSet resultSet = m_metaCluster.execute(statement); while (!resultSet.isExhausted()) { ret.add(resultSet.one().getString(0)); } return ret; }
@Override public Iterable<String> listKeys(String service, String serviceKey) throws DatastoreException { List<String> ret = new ArrayList<>(); BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexListKeys); statement.setString(0, service); statement.setString(1, serviceKey); statement.setConsistencyLevel(m_metaCluster.getReadConsistencyLevel()); ResultSet resultSet = m_metaCluster.execute(statement); while (!resultSet.isExhausted()) { String key = resultSet.one().getString(0); if (key != null) { // The last row for the primary key doesn't get deleted and has a null key and isExhausted still return false. So check for null ret.add(key); } } return ret; }
boundStatement.setBytesUnsafe(1, serializeString(prefix)); boundStatement.setBytesUnsafe(2, serializeEndString(prefix)); boundStatement.setConsistencyLevel(cluster.getReadConsistencyLevel()); return cluster.executeAsync(boundStatement); });
@Override public ServiceKeyValue getValue(String service, String serviceKey, String key) throws DatastoreException { BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexGet); statement.setString(0, service); statement.setString(1, serviceKey); statement.setString(2, key); statement.setConsistencyLevel(m_metaCluster.getReadConsistencyLevel()); ResultSet resultSet = m_metaCluster.execute(statement); Row row = resultSet.one(); if (row != null) return new ServiceKeyValue(row.getString(0), new Date(row.getTime(1))); return null; }
private void deletePartialRow(DataPointsRowKey rowKey, long start, long end) throws DatastoreException { queryClusters((cluster) -> { if (cluster.psDataPointsDeleteRange != null) { BoundStatement statement = new BoundStatement(cluster.psDataPointsDeleteRange); statement.setBytesUnsafe(0, DATA_POINTS_ROW_KEY_SERIALIZER.toByteBuffer(rowKey)); ByteBuffer b = ByteBuffer.allocate(4); b.putInt(getColumnName(rowKey.getTimestamp(), start)); b.rewind(); statement.setBytesUnsafe(1, b); b = ByteBuffer.allocate(4); b.putInt(getColumnName(rowKey.getTimestamp(), end)); b.rewind(); statement.setBytesUnsafe(2, b); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); cluster.executeAsync(statement); } else { //note, with multiple old clusters this query could be done multiple times DatastoreMetricQuery deleteQuery = new QueryMetric(start, end, 0, rowKey.getMetricName()); cqlQueryWithRowKeys(deleteQuery, new DeletingCallback(deleteQuery.getName()), Collections.singletonList(rowKey).iterator()); } return null; }); }
@Override public void setValue(String service, String serviceKey, String key, String value) throws DatastoreException { BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexInsert); statement.setString(0, service); statement.setString(1, serviceKey); statement.setString(2, key); statement.setString(3, value); statement.setConsistencyLevel(m_metaCluster.getWriteConsistencyLevel()); m_metaCluster.execute(statement); }
@Override public Iterable<String> listKeys(String service, String serviceKey, String keyStartsWith) throws DatastoreException { String begin = keyStartsWith; String end = keyStartsWith + Character.MAX_VALUE; List<String> ret = new ArrayList<>(); BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexListKeysPrefix); statement.setString(0, service); statement.setString(1, serviceKey); statement.setString(2, begin); statement.setString(3, end); statement.setConsistencyLevel(m_metaCluster.getReadConsistencyLevel()); ResultSet resultSet = m_metaCluster.execute(statement); while (!resultSet.isExhausted()) { String key = resultSet.one().getString(0); if (key != null) { // The last row for the primary key doesn't get deleted and has a null key and isExhausted still return false. So check for null ret.add(key); } } return ret; }
boundStatement.setInt(3, query.getLimit()); boundStatement.setConsistencyLevel(cluster.getReadConsistencyLevel());
@Override public void deleteKey(String service, String serviceKey, String key) throws DatastoreException { BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexDeleteKey); statement.setString(0, service); statement.setString(1, serviceKey); statement.setString(2, key); statement.setConsistencyLevel(m_metaCluster.getWriteConsistencyLevel()); m_metaCluster.execute(statement); // Update modification time statement = new BoundStatement(m_metaCluster.psServiceIndexInsertModifiedTime); statement.setString(0, service); statement.setString(1, serviceKey); m_metaCluster.execute(statement); }
statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); cluster.execute(statement); statement.setBytesUnsafe(0, serializeString(rowKey.getMetricName())); statement.setBytesUnsafe(1, DATA_POINTS_ROW_KEY_SERIALIZER.toByteBuffer(rowKey)); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); cluster.execute(statement); statement.setString(2, rowKey.getDataType()); statement.setMap(3, rowKey.getTags()); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); cluster.execute(statement); statement.setString(0, rowKey.getMetricName()); statement.setTimestamp(1, new Date(rowKey.getTimestamp())); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); cluster.execute(statement); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); cluster.executeAsync(statement); statement.setBytesUnsafe(0, serializeString(ROW_KEY_METRIC_NAMES)); statement.setBytesUnsafe(1, serializeString(deleteQuery.getName())); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); cluster.executeAsync(statement); return null;
public PreparedStatementExecution executePrepared( Session session, String statement, ConsistencyLevel level, ConsistencyLevel serialLevel) { PreparedStatement ps = session.prepare(statement); BoundStatement bound = ps.bind(); if (level != null) { bound.setConsistencyLevel(level); } if (serialLevel != null) { bound.setSerialConsistencyLevel(serialLevel); } session.execute(bound); List<PreparedStatementExecution> pses = sCluster.node(1).activityClient().retrievePreparedStatementExecutions(); PreparedStatementExecution pse = pses.get(0); assertTrue(pse.getPreparedStatementText().equals(statement)); return pse; }
@Test(groups = "short") public void should_execute_prepared_statements() throws Exception { // Simple calls to all versions of the execute/executeAsync methods for prepared statements // Note: the goal is only to exercice the Session methods, PreparedStatementTest have better // prepared statement tests. String key = "execute_prepared_test"; ResultSet rs = session() .execute( String.format( Locale.US, "INSERT INTO %s (k, t, i, f) VALUES ('%s', '%s', %d, %f)", TABLE2, key, "foo", 42, 24.03f)); assertThat(rs.isExhausted()).isTrue(); PreparedStatement p = session().prepare(String.format(TestUtils.SELECT_ALL_FORMAT + " WHERE k = ?", TABLE2)); BoundStatement bs = p.bind(key); // executePrepared checkExecuteResultSet(session().execute(bs), key); checkExecuteResultSet(session().execute(bs.setConsistencyLevel(ConsistencyLevel.ONE)), key); // executePreparedAsync checkExecuteResultSet(session().executeAsync(bs).getUninterruptibly(), key); checkExecuteResultSet( session().executeAsync(bs.setConsistencyLevel(ConsistencyLevel.ONE)).getUninterruptibly(), key); }
boundStatement.setConsistencyLevel(ConsistencyLevel.valueOf(this.consistencyLevel.name()));
/** * Creates a new {@code BoundStatement} from the provided prepared statement. * * @param statement the prepared statement from which to create a {@code BoundStatement}. */ public BoundStatement(PreparedStatement statement) { this.statement = statement; this.wrapper = new DataWrapper(this, statement.getVariables().size()); for (int i = 0; i < wrapper.values.length; i++) { wrapper.values[i] = UNSET; } if (statement.getConsistencyLevel() != null) this.setConsistencyLevel(statement.getConsistencyLevel()); if (statement.getSerialConsistencyLevel() != null) this.setSerialConsistencyLevel(statement.getSerialConsistencyLevel()); if (statement.isTracing()) this.enableTracing(); if (statement.getRetryPolicy() != null) this.setRetryPolicy(statement.getRetryPolicy()); if (statement.getOutgoingPayload() != null) this.setOutgoingPayload(statement.getOutgoingPayload()); else // propagate incoming payload as outgoing payload, if no outgoing payload has been explicitly // set this.setOutgoingPayload(statement.getIncomingPayload()); this.codecRegistry = statement.getCodecRegistry(); if (statement.isIdempotent() != null) { this.setIdempotent(statement.isIdempotent()); } }