@Override public void delete(final String id) { session.execute(new BoundStatement(deleteSQL.apply(tableName)).bind(id)); }
public void addMetricName(String metricName) { m_newMetrics.add(metricName); BoundStatement bs = new BoundStatement(m_clusterConnection.psStringIndexInsert); bs.setBytesUnsafe(0, ByteBuffer.wrap(ROW_KEY_METRIC_NAMES.getBytes(UTF_8))); bs.setString(1, metricName); bs.setConsistencyLevel(m_consistencyLevel); metricNamesBatch.add(bs); }
public void deleteDataPoint(DataPointsRowKey rowKey, int columnTime) throws IOException { BoundStatement boundStatement = new BoundStatement(m_clusterConnection.psDataPointsDelete); boundStatement.setBytesUnsafe(0, DATA_POINTS_ROW_KEY_SERIALIZER.toByteBuffer(rowKey)); ByteBuffer b = ByteBuffer.allocate(4); b.putInt(columnTime); b.rewind(); boundStatement.setBytesUnsafe(1, b); boundStatement.setConsistencyLevel(m_consistencyLevel); boundStatement.setIdempotent(true); addBoundStatement(boundStatement); }
@Override public void save(final Session session) { this.session.execute(new BoundStatement(insertSQL.apply(tableName)) .bind( session.id(), new Date(session.createdAt()), new Date(session.accessedAt()), new Date(session.savedAt()), session.attributes())); }
@Override public Date getServiceKeyLastModifiedTime(String service, String serviceKey) throws DatastoreException { BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexModificationTime); statement.setString(0, service); statement.setString(1, serviceKey); ResultSet resultSet = m_metaCluster.execute(statement); Row row = resultSet.one(); if (row != null) return new Date(UUIDs.unixTimestamp(row.getUUID(0))); return new Date(0L); }
private List<Long> createQueryKeyList(ClusterConnection cluster, String metricName, long startTime, long endTime) { List<Long> ret = new ArrayList<>(); if (cluster.psRowKeyTimeQuery != null) //cluster may be old { BoundStatement statement = new BoundStatement(cluster.psRowKeyTimeQuery); statement.setString(0, metricName); statement.setTimestamp(1, new Date(CassandraDatastore.calculateRowTime(startTime))); statement.setTimestamp(2, new Date(endTime)); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); //printHosts(m_loadBalancingPolicy.newQueryPlan(m_keyspace, statement)); ResultSet rows = cluster.execute(statement); while (!rows.isExhausted()) { ret.add(rows.one().getTimestamp(0).getTime()); } } return ret; }
private Iterable<String> queryStringIndex(final String key) throws DatastoreException { List<ResultSetFuture> futures = queryClusters((cluster) -> { BoundStatement boundStatement = new BoundStatement(cluster.psStringIndexQuery); boundStatement.setBytesUnsafe(0, serializeString(key)); boundStatement.setConsistencyLevel(cluster.getReadConsistencyLevel()); return cluster.executeAsync(boundStatement); }); ListenableFuture<List<ResultSet>> listListenableFuture = Futures.allAsList(futures); Set<String> ret = new HashSet<String>(); try { Iterator<ResultSet> iterator = listListenableFuture.get().iterator(); while (iterator.hasNext()) { ResultSet resultSet = iterator.next(); while (!resultSet.isExhausted()) { Row row = resultSet.one(); ret.add(row.getString(0)); } } } catch (Exception e) { throw new DatastoreException("CQL Query failure", e); } return ret; }
BoundStatement negStatement = new BoundStatement(cluster.psRowKeyIndexQuery); negStatement.setBytesUnsafe(0, CassandraDatastore.serializeString(metricName)); setStartEndKeys(negStatement, metricName, startTime, -1L); BoundStatement posStatement = new BoundStatement(cluster.psRowKeyIndexQuery); posStatement.setBytesUnsafe(0, CassandraDatastore.serializeString(metricName)); setStartEndKeys(posStatement, metricName, 0L, endTime); BoundStatement statement = new BoundStatement(cluster.psRowKeyIndexQuery); statement.setBytesUnsafe(0, CassandraDatastore.serializeString(metricName)); setStartEndKeys(statement, metricName, startTime, endTime); BoundStatement statement = new BoundStatement(cluster.psRowKeyQuery); statement.setString(0, metricName); statement.setTimestamp(1, new Date(keyTime));
@Override public void deleteKey(String service, String serviceKey, String key) throws DatastoreException { BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexDeleteKey); statement.setString(0, service); statement.setString(1, serviceKey); statement.setString(2, key); statement.setConsistencyLevel(m_metaCluster.getWriteConsistencyLevel()); m_metaCluster.execute(statement); // Update modification time statement = new BoundStatement(m_metaCluster.psServiceIndexInsertModifiedTime); statement.setString(0, service); statement.setString(1, serviceKey); m_metaCluster.execute(statement); }
@Override public Iterable<String> listKeys(String service, String serviceKey) throws DatastoreException { List<String> ret = new ArrayList<>(); BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexListKeys); statement.setString(0, service); statement.setString(1, serviceKey); statement.setConsistencyLevel(m_metaCluster.getReadConsistencyLevel()); ResultSet resultSet = m_metaCluster.execute(statement); while (!resultSet.isExhausted()) { String key = resultSet.one().getString(0); if (key != null) { // The last row for the primary key doesn't get deleted and has a null key and isExhausted still return false. So check for null ret.add(key); } } return ret; }
public void addDataPoint(DataPointsRowKey rowKey, int columnTime, DataPoint dataPoint, int ttl) throws IOException { KDataOutput kDataOutput = new KDataOutput(); dataPoint.writeValueToBuffer(kDataOutput); BoundStatement boundStatement = new BoundStatement(m_clusterConnection.psDataPointsInsert); boundStatement.setBytesUnsafe(0, DATA_POINTS_ROW_KEY_SERIALIZER.toByteBuffer(rowKey)); ByteBuffer b = ByteBuffer.allocate(4); b.putInt(columnTime); b.rewind(); boundStatement.setBytesUnsafe(1, b); boundStatement.setBytesUnsafe(2, ByteBuffer.wrap(kDataOutput.getBytes())); boundStatement.setInt(3, ttl); boundStatement.setLong(4, m_now); boundStatement.setConsistencyLevel(m_consistencyLevel); boundStatement.setIdempotent(true); addBoundStatement(boundStatement); }
@Override public Iterable<String> listServiceKeys(String service) throws DatastoreException { List<String> ret = new ArrayList<>(); if (m_metaCluster.psServiceIndexListServiceKeys == null) { throw new DatastoreException("List Service Keys is not available on this version of Cassandra."); } BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexListServiceKeys); statement.setString(0, service); statement.setConsistencyLevel(m_metaCluster.getReadConsistencyLevel()); ResultSet resultSet = m_metaCluster.execute(statement); while (!resultSet.isExhausted()) { ret.add(resultSet.one().getString(0)); } return ret; }
@Override public Session get(final Builder builder) { ResultSet rs = session .execute(new BoundStatement(selectSQL.apply(tableName)).bind(builder.sessionId())); return Optional.ofNullable(rs.one()) .map(row -> { long createdAt = row.getTimestamp(CREATED_AT).getTime(); long accessedAt = row.getTimestamp(ACCESSED_AT).getTime(); long savedAt = row.getTimestamp(SAVED_AT).getTime(); Map<String, String> attributes = row.getMap(ATTRIBUTES, String.class, String.class); Session session = builder .accessedAt(accessedAt) .createdAt(createdAt) .savedAt(savedAt) .set(attributes) .build(); // touch ttl if (timeout > 0) { save(session); } return session; }) .orElse(null); }
@Override public ServiceKeyValue getValue(String service, String serviceKey, String key) throws DatastoreException { BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexGet); statement.setString(0, service); statement.setString(1, serviceKey); statement.setString(2, key); statement.setConsistencyLevel(m_metaCluster.getReadConsistencyLevel()); ResultSet resultSet = m_metaCluster.execute(statement); Row row = resultSet.one(); if (row != null) return new ServiceKeyValue(row.getString(0), new Date(row.getTime(1))); return null; }
private Iterable<String> queryStringIndex(final String key, final String prefix) throws DatastoreException BoundStatement boundStatement = new BoundStatement(cluster.psStringIndexPrefixQuery); boundStatement.setBytesUnsafe(0, serializeString(key)); boundStatement.setBytesUnsafe(1, serializeString(prefix));
private void deletePartialRow(DataPointsRowKey rowKey, long start, long end) throws DatastoreException { queryClusters((cluster) -> { if (cluster.psDataPointsDeleteRange != null) { BoundStatement statement = new BoundStatement(cluster.psDataPointsDeleteRange); statement.setBytesUnsafe(0, DATA_POINTS_ROW_KEY_SERIALIZER.toByteBuffer(rowKey)); ByteBuffer b = ByteBuffer.allocate(4); b.putInt(getColumnName(rowKey.getTimestamp(), start)); b.rewind(); statement.setBytesUnsafe(1, b); b = ByteBuffer.allocate(4); b.putInt(getColumnName(rowKey.getTimestamp(), end)); b.rewind(); statement.setBytesUnsafe(2, b); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); cluster.executeAsync(statement); } else { //note, with multiple old clusters this query could be done multiple times DatastoreMetricQuery deleteQuery = new QueryMetric(start, end, 0, rowKey.getMetricName()); cqlQueryWithRowKeys(deleteQuery, new DeletingCallback(deleteQuery.getName()), Collections.singletonList(rowKey).iterator()); } return null; }); }
@Override public void setValue(String service, String serviceKey, String key, String value) throws DatastoreException { BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexInsert); statement.setString(0, service); statement.setString(1, serviceKey); statement.setString(2, key); statement.setString(3, value); statement.setConsistencyLevel(m_metaCluster.getWriteConsistencyLevel()); m_metaCluster.execute(statement); }
boundStatement = new BoundStatement(cluster.psDataPointsQueryAscLimit); else boundStatement = new BoundStatement(cluster.psDataPointsQueryDescLimit); boundStatement = new BoundStatement(cluster.psDataPointsQueryAsc); else boundStatement = new BoundStatement(cluster.psDataPointsQueryDesc);
@Override public Iterable<String> listKeys(String service, String serviceKey, String keyStartsWith) throws DatastoreException { String begin = keyStartsWith; String end = keyStartsWith + Character.MAX_VALUE; List<String> ret = new ArrayList<>(); BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexListKeysPrefix); statement.setString(0, service); statement.setString(1, serviceKey); statement.setString(2, begin); statement.setString(3, end); statement.setConsistencyLevel(m_metaCluster.getReadConsistencyLevel()); ResultSet resultSet = m_metaCluster.execute(statement); while (!resultSet.isExhausted()) { String key = resultSet.one().getString(0); if (key != null) { // The last row for the primary key doesn't get deleted and has a null key and isExhausted still return false. So check for null ret.add(key); } } return ret; }
BoundStatement statement = new BoundStatement(cluster.psDataPointsDeleteRow); statement.setBytesUnsafe(0, DATA_POINTS_ROW_KEY_SERIALIZER.toByteBuffer(rowKey)); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); statement = new BoundStatement(cluster.psRowKeyIndexDelete); statement.setBytesUnsafe(0, serializeString(rowKey.getMetricName())); statement.setBytesUnsafe(1, DATA_POINTS_ROW_KEY_SERIALIZER.toByteBuffer(rowKey)); cluster.execute(statement); statement = new BoundStatement(cluster.psRowKeyDelete); statement.setString(0, rowKey.getMetricName()); statement.setTimestamp(1, new Date(rowKey.getTimestamp())); statement = new BoundStatement(cluster.psRowKeyTimeDelete); statement.setString(0, rowKey.getMetricName()); statement.setTimestamp(1, new Date(rowKey.getTimestamp())); BoundStatement statement = new BoundStatement(cluster.psRowKeyIndexDeleteRow); statement.setBytesUnsafe(0, serializeString(deleteQuery.getName())); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); statement = new BoundStatement(cluster.psStringIndexDelete); statement.setBytesUnsafe(0, serializeString(ROW_KEY_METRIC_NAMES)); statement.setBytesUnsafe(1, serializeString(deleteQuery.getName()));