@Override public boolean reachedEnd() throws IOException { return resultSet.isExhausted(); }
@Override public boolean advanceNextPosition() { if (!rs.isExhausted()) { currentRow = rs.one(); count++; return true; } return false; }
@Override public boolean hasNext() { if (m_nextKey != null) return true; while (m_currentResultSet != null && (!m_currentResultSet.isExhausted() || m_resultSets.hasNext())) { m_nextKey = nextKeyFromIterator(m_currentResultSet); if (m_nextKey != null) break; if (m_resultSets.hasNext()) m_currentResultSet = m_resultSets.next(); } if (m_nextKey == null) { //todo make this a common atomic value ThreadReporter.addDataPoint(CassandraDatastore.RAW_ROW_KEY_COUNT, m_rawRowKeyCount); } return (m_nextKey != null); }
private Iterable<String> queryStringIndex(final String key) throws DatastoreException { List<ResultSetFuture> futures = queryClusters((cluster) -> { BoundStatement boundStatement = new BoundStatement(cluster.psStringIndexQuery); boundStatement.setBytesUnsafe(0, serializeString(key)); boundStatement.setConsistencyLevel(cluster.getReadConsistencyLevel()); return cluster.executeAsync(boundStatement); }); ListenableFuture<List<ResultSet>> listListenableFuture = Futures.allAsList(futures); Set<String> ret = new HashSet<String>(); try { Iterator<ResultSet> iterator = listListenableFuture.get().iterator(); while (iterator.hasNext()) { ResultSet resultSet = iterator.next(); while (!resultSet.isExhausted()) { Row row = resultSet.one(); ret.add(row.getString(0)); } } } catch (Exception e) { throw new DatastoreException("CQL Query failure", e); } return ret; }
private List<Long> createQueryKeyList(ClusterConnection cluster, String metricName, long startTime, long endTime) { List<Long> ret = new ArrayList<>(); if (cluster.psRowKeyTimeQuery != null) //cluster may be old { BoundStatement statement = new BoundStatement(cluster.psRowKeyTimeQuery); statement.setString(0, metricName); statement.setTimestamp(1, new Date(CassandraDatastore.calculateRowTime(startTime))); statement.setTimestamp(2, new Date(endTime)); statement.setConsistencyLevel(cluster.getReadConsistencyLevel()); //printHosts(m_loadBalancingPolicy.newQueryPlan(m_keyspace, statement)); ResultSet rows = cluster.execute(statement); while (!rows.isExhausted()) { ret.add(rows.one().getTimestamp(0).getTime()); } } return ret; }
while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap<String, ByteIterator>();
@Override public Iterable<String> listKeys(String service, String serviceKey) throws DatastoreException { List<String> ret = new ArrayList<>(); BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexListKeys); statement.setString(0, service); statement.setString(1, serviceKey); statement.setConsistencyLevel(m_metaCluster.getReadConsistencyLevel()); ResultSet resultSet = m_metaCluster.execute(statement); while (!resultSet.isExhausted()) { String key = resultSet.one().getString(0); if (key != null) { // The last row for the primary key doesn't get deleted and has a null key and isExhausted still return false. So check for null ret.add(key); } } return ret; }
while (!iterator.isExhausted())
@Override public Iterable<String> listServiceKeys(String service) throws DatastoreException { List<String> ret = new ArrayList<>(); if (m_metaCluster.psServiceIndexListServiceKeys == null) { throw new DatastoreException("List Service Keys is not available on this version of Cassandra."); } BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexListServiceKeys); statement.setString(0, service); statement.setConsistencyLevel(m_metaCluster.getReadConsistencyLevel()); ResultSet resultSet = m_metaCluster.execute(statement); while (!resultSet.isExhausted()) { ret.add(resultSet.one().getString(0)); } return ret; }
while (!resultSet.isExhausted())
if (rs.isExhausted()) { return Status.NOT_FOUND;
@Override public Iterable<String> listKeys(String service, String serviceKey, String keyStartsWith) throws DatastoreException { String begin = keyStartsWith; String end = keyStartsWith + Character.MAX_VALUE; List<String> ret = new ArrayList<>(); BoundStatement statement = new BoundStatement(m_metaCluster.psServiceIndexListKeysPrefix); statement.setString(0, service); statement.setString(1, serviceKey); statement.setString(2, begin); statement.setString(3, end); statement.setConsistencyLevel(m_metaCluster.getReadConsistencyLevel()); ResultSet resultSet = m_metaCluster.execute(statement); while (!resultSet.isExhausted()) { String key = resultSet.one().getString(0); if (key != null) { // The last row for the primary key doesn't get deleted and has a null key and isExhausted still return false. So check for null ret.add(key); } } return ret; }
if (result.isExhausted()) return; dataPointFactory = m_kairosDataPointFactory.getFactoryForDataStoreType(m_rowKey.getDataType()); while (!result.isExhausted())
@Override protected String pageState() { PagingState page = this.results.getExecutionInfo().getPagingState(); if (page == null || this.results.isExhausted()) { return null; } return page.toString(); } }
@Test(groups = "short") public void singleUpdateTest() { session().execute("TRUNCATE test"); session().execute("INSERT INTO test (k1, k2, v) VALUES (1, 1, 1)"); ResultSet rs = session().execute("UPDATE test SET v = 3 WHERE k1 = 1 AND k2 = 1 IF v = 2"); assertFalse(rs.wasApplied()); // Ensure that reading the status does not consume a row: assertFalse(rs.isExhausted()); rs = session().execute("UPDATE test SET v = 3 WHERE k1 = 1 AND k2 = 1 IF v = 1"); assertTrue(rs.wasApplied()); assertFalse(rs.isExhausted()); // Non-conditional statement rs = session().execute("UPDATE test SET v = 4 WHERE k1 = 1 AND k2 = 1"); assertTrue(rs.wasApplied()); }
static void checkExecuteResultSet(ResultSet rs, String key) { assertThat(rs.isExhausted()).isFalse(); Row row = rs.one(); assertThat(rs.isExhausted()).isTrue(); assertThat(row.getString("k")).isEqualTo(key); assertThat(row.getString("t")).isEqualTo("foo"); assertThat(row.getInt("i")).isEqualTo(42); assertThat(row.getFloat("f")).isEqualTo(24.03f, offset(0.1f)); }
assertFalse(rs.isExhausted()); assertEquals(rs.getAvailableWithoutFetching(), 5 - (i % 5)); assertEquals(rs.one().getInt(0), i); assertTrue(rs.isExhausted()); assertTrue(rs.isFullyFetched());
@Override protected void doHealthCheck(Health.Builder builder) throws Exception { Select select = QueryBuilder.select("release_version").from("system", "local"); ResultSet results = this.cassandraOperations.getCqlOperations() .queryForResultSet(select); if (results.isExhausted()) { builder.up(); return; } String version = results.one().getString(0); builder.up().withDetail("version", version); }
@Test(groups = "short") public void prepareWithNullValuesTest() throws Exception { PreparedStatement ps = session().prepare("INSERT INTO " + SIMPLE_TABLE2 + "(k, v) VALUES (?, ?)"); session().execute(ps.bind("prepWithNull1", null)); BoundStatement bs = ps.bind(); bs.setString("k", "prepWithNull2"); bs.setString("v", null); session().execute(bs); ResultSet rs = session() .execute( "SELECT * FROM " + SIMPLE_TABLE2 + " WHERE k IN ('prepWithNull1', 'prepWithNull2')"); Row r1 = rs.one(); Row r2 = rs.one(); assertTrue(rs.isExhausted()); assertEquals(r1.getString("k"), "prepWithNull1"); assertEquals(r1.getString("v"), null); assertEquals(r2.getString("k"), "prepWithNull2"); assertEquals(r2.getString("v"), null); }
@Test(groups = "short") public void simpleBatchTest() { try { PreparedStatement st = session().prepare("INSERT INTO test (k, v) VALUES (?, ?)"); BatchStatement batch = new BatchStatement(); batch.add(new SimpleStatement("INSERT INTO test (k, v) VALUES (?, ?)", "key1", 0)); batch.add(st.bind("key1", 1)); batch.add(st.bind("key2", 0)); assertEquals(3, batch.size()); session().execute(batch); ResultSet rs = session().execute("SELECT * FROM test"); Row r; r = rs.one(); assertEquals(r.getString("k"), "key1"); assertEquals(r.getInt("v"), 0); r = rs.one(); assertEquals(r.getString("k"), "key1"); assertEquals(r.getInt("v"), 1); r = rs.one(); assertEquals(r.getString("k"), "key2"); assertEquals(r.getInt("v"), 0); assertTrue(rs.isExhausted()); } catch (UnsupportedFeatureException e) { // This is expected when testing the protocol v1 assertEquals( cluster().getConfiguration().getProtocolOptions().getProtocolVersion(), ProtocolVersion.V1); } }