public static Map<String, Object> deserializeEntityProperties( Row<UUID, String, ByteBuffer> row ) { if ( row == null ) { return null; } ColumnSlice<String, ByteBuffer> slice = row.getColumnSlice(); if ( slice == null ) { return null; } return deserializeEntityProperties( slice.getColumns(), true, false ); }
/** * This method intentionally swallows ordered execution issues. For some reason, our Time UUID ordering does * not agree with the cassandra comparator as our micros get very close * @param query * @param <K> * @param <UUID> * @param <V> * @return */ protected static <K, UUID, V> List<HColumn<UUID, V>> swallowOrderedExecution( final SliceQuery<K, UUID, V> query ) { try { return query.execute().get().getColumns(); } catch ( HInvalidRequestException e ) { //invalid request. Occasionally we get order issues when there shouldn't be, disregard them. final Throwable invalidRequestException = e.getCause(); if ( invalidRequestException instanceof InvalidRequestException //we had a range error && ( ( InvalidRequestException ) invalidRequestException ).getWhy().contains( "range finish must come after start in the order of traversal" )) { return Collections.emptyList(); } throw e; } }
QueryResult<ColumnSlice<N, V>> r = q.execute(); ColumnSlice<N, V> slice = r.get(); List<HColumn<N, V>> results = slice.getColumns();
List<HColumn<N, V>> results = slice.getColumns();
/** Load the messages into an array list */ protected List<Message> loadMessages( Collection<UUID> messageIds, boolean reversed ) { Rows<UUID, String, ByteBuffer> messageResults = createMultigetSliceQuery( ko, ue, se, be ).setColumnFamily( MESSAGE_PROPERTIES.getColumnFamily() ) .setKeys( messageIds ) .setRange( null, null, false, ALL_COUNT ).execute().get(); List<Message> messages = new ArrayList<Message>( messageIds.size() ); for ( Row<UUID, String, ByteBuffer> row : messageResults ) { Message message = deserializeMessage( row.getColumnSlice().getColumns() ); if ( message != null ) { messages.add( message ); } } Collections.sort( messages, new RequestedOrderComparator( messageIds ) ); return messages; }
@Override public Set<String> getQueueCounterNames( String queuePath ) throws Exception { Set<String> names = new HashSet<String>(); Keyspace ko = cass.getApplicationKeyspace( applicationId ); SliceQuery<String, String, ByteBuffer> q = createSliceQuery( ko, se, se, be ); q.setColumnFamily( QueuesCF.QUEUE_DICTIONARIES.toString() ); q.setKey( CassandraPersistenceUtils.key( getQueueId( queuePath ), DICTIONARY_COUNTERS ).toString() ); q.setRange( null, null, false, ALL_COUNT ); List<HColumn<String, ByteBuffer>> columns = q.execute().get().getColumns(); for ( HColumn<String, ByteBuffer> column : columns ) { names.add( column.getName() ); } return names; }
public boolean hasOutstandingTransactions( UUID queueId, UUID consumerId ) { SliceQuery<ByteBuffer, UUID, UUID> q = createSliceQuery( ko, be, ue, ue ); q.setColumnFamily( CONSUMER_QUEUE_TIMEOUTS.getColumnFamily() ); q.setKey( getQueueClientTransactionKey( queueId, consumerId ) ); q.setRange( null, null, false, 1 ); return q.execute().get().getColumns().size() > 0; }
QueryResult<ColumnSlice<ByteBuffer, ByteBuffer>> r = q.execute(); ColumnSlice<ByteBuffer, ByteBuffer> slice = r.get(); List<HColumn<ByteBuffer, ByteBuffer>> results = slice.getColumns();
@Override public QueueSet getSubscriptions( String subscriberQueuePath, String firstSubscriptionQueuePath, int limit ) { UUID subscriberQueueId = getQueueId( subscriberQueuePath ); Keyspace ko = cass.getApplicationKeyspace( applicationId ); if ( firstSubscriptionQueuePath != null ) { limit += 1; } List<HColumn<String, UUID>> columns = createSliceQuery( ko, ue, se, ue ).setKey( subscriberQueueId ) .setColumnFamily( QUEUE_SUBSCRIPTIONS.getColumnFamily() ) .setRange( normalizeQueuePath( firstSubscriptionQueuePath ), null, false, limit + 1 ).execute().get() .getColumns(); QueueSet queues = new QueueSet(); int count = Math.min( limit, columns.size() ); if ( columns != null ) { for ( int i = firstSubscriptionQueuePath != null ? 1 : 0; i < count; i++ ) { HColumn<String, UUID> column = columns.get( i ); queues.addQueue( column.getName(), column.getValue() ); } } if ( columns.size() > limit ) { queues.setMore( true ); } return queues; }
@Override public QueueSet getSubscribers( String publisherQueuePath, String firstSubscriberQueuePath, int limit ) { UUID publisherQueueId = getQueueId( publisherQueuePath ); Keyspace ko = cass.getApplicationKeyspace( applicationId ); if ( firstSubscriberQueuePath != null ) { limit += 1; } List<HColumn<String, UUID>> columns = createSliceQuery( ko, ue, se, ue ).setKey( publisherQueueId ) .setColumnFamily( QUEUE_SUBSCRIBERS.getColumnFamily() ) .setRange( normalizeQueuePath( firstSubscriberQueuePath ), null, false, limit + 1 ).execute().get() .getColumns(); QueueSet queues = new QueueSet(); int count = Math.min( limit, columns.size() ); if ( columns != null ) { for ( int i = firstSubscriberQueuePath != null ? 1 : 0; i < count; i++ ) { HColumn<String, UUID> column = columns.get( i ); queues.addQueue( column.getName(), column.getValue() ); } } if ( columns.size() > limit ) { queues.setMore( true ); } return queues; }
@Override public Message getMessage( UUID messageId ) { SliceQuery<UUID, String, ByteBuffer> q = createSliceQuery( cass.getApplicationKeyspace( applicationId ), ue, se, be ); q.setColumnFamily( MESSAGE_PROPERTIES.getColumnFamily() ); q.setKey( messageId ); q.setRange( null, null, false, ALL_COUNT ); QueryResult<ColumnSlice<String, ByteBuffer>> r = q.execute(); ColumnSlice<String, ByteBuffer> slice = r.get(); List<HColumn<String, ByteBuffer>> results = slice.getColumns(); return deserializeMessage( results ); }
public Queue getQueue( String queuePath, UUID queueId ) { SliceQuery<UUID, String, ByteBuffer> q = createSliceQuery( cass.getApplicationKeyspace( applicationId ), ue, se, be ); q.setColumnFamily( QUEUE_PROPERTIES.getColumnFamily() ); q.setKey( queueId ); q.setRange( null, null, false, ALL_COUNT ); QueryResult<ColumnSlice<String, ByteBuffer>> r = q.execute(); ColumnSlice<String, ByteBuffer> slice = r.get(); List<HColumn<String, ByteBuffer>> results = slice.getColumns(); return deserializeQueue( results ); }
.setRange( DynamicComposite.toByteBuffer( entryName ), setGreaterThanEqualityFlag( new DynamicComposite( entryName ) ).serialize(), false, INDEX_ENTRY_LIST_COUNT ).execute().get().getColumns();
if ( results != null ) { values = new HashMap<String, Object>(); for ( HColumn<ByteBuffer, ByteBuffer> result : results.getColumns() ) { String name = entityHasDictionary ? string( result.getName() ) : DynamicComposite.fromByteBuffer( result.getName() ).get( 0, se );
createSliceQuery( ko, be, be, be ).setColumnFamily( PROPERTY_INDEX.getColumnFamily() ) .setKey( bytebuffer( key( queueId, current_ts_shard, slice.getPropertyName() ) ) ) .setRange( start, finish, false, DEFAULT_SEARCH_COUNT ).execute().get().getColumns();
private Map<String, String> getResults(QueryResult<ColumnSlice<String, String>> queryResult) { Map<String, String> result = Maps.newHashMap(); for (HColumn<String, String> col : queryResult.get().getColumns()) { result.put(col.getName(), col.getValue()); } return result; }
@Override public QueryResult<HColumn<N, V>> execute() { Assert.isTrue(subSliceQuery.getColumnNames().size() == 1, "There should be exactly one column name set. Call setColumn"); QueryResult<ColumnSlice<N, V>> r = subSliceQuery.execute(); ColumnSlice<N, V> slice = r.get(); List<HColumn<N,V>> columns = slice.getColumns(); HColumn<N, V> column = columns.size() == 0 ? null : columns.get(0); return new QueryResultImpl<HColumn<N,V>>( new ExecutionResult<HColumn<N,V>>(column, r.getExecutionTimeNano(), r.getHostUsed()), this); } }
.setColumnFamily( PROPERTY_INDEX.getColumnFamily() ) .setKey( bytebuffer( key( publisherQueueId, slice.getPropertyName() ) ) ) .setRange( start, finish, slice.isReversed(), count ).execute().get().getColumns();
@Override public boolean hasNext() { if (iterator == null) { iterator = Iterators.peekingIterator(query.execute().get().getColumns().iterator()); } else if (!iterator.hasNext() && columns == count) { // only need to do another query if maximum columns were retrieved refresh(); } while(filter != null && iterator != null && iterator.hasNext() && !filter.accept(iterator.peek())) { next(); if(!iterator.hasNext() && columns == count) { refresh(); } } return iterator.hasNext(); }
@Test public void testKeysOnlyPredicate() { RangeSlicesQuery<String, String, Long> rangeSlicesQuery = HFactory.createRangeSlicesQuery(keyspace, se, se, le); QueryResult<OrderedRows<String, String, Long>> result = rangeSlicesQuery.setColumnFamily(cf).setKeys("", "").setReturnKeysOnly().execute(); OrderedRows<String, String, Long> orderedRows = result.get(); Row<String, String, Long> row = orderedRows.iterator().next(); assertNotNull(row.getKey()); assertEquals(0,row.getColumnSlice().getColumns().size()); result = rangeSlicesQuery.setColumnNames("birthyear","birthmonth").setRowCount(5).execute(); orderedRows = result.get(); row = orderedRows.iterator().next(); assertNotNull(row.getKey()); assertEquals(2,row.getColumnSlice().getColumns().size()); }