private ExecutionResult<Map<ByteBuffer, List<ColumnOrSuperColumn>>> multigetSliceInternal(final Iterable<K> keys, final HSlicePredicate<N> workingSlicePredicate) { return ((ExecutingKeyspace)keyspace).doExecuteOperation(new Operation<Map<ByteBuffer,List<ColumnOrSuperColumn>>>(OperationType.READ) { @Override public Map<ByteBuffer,List<ColumnOrSuperColumn>> execute(Cassandra.Client cassandra) throws HectorException { try { List<K> keyList = new ArrayList<K>(); Iterators.addAll(keyList, keys.iterator()); return cassandra.multiget_slice(keySerializer.toBytesList(keyList), columnParent, (workingSlicePredicate == null ? activeSlicePredicate.setColumnNames(columnValueSerializers.keySet()).toThrift() : workingSlicePredicate.toThrift()), ThriftConverter.consistencyLevel(consistencyLevelPolicy.get(operationType, columnFamily))); } catch (Exception e) { throw exceptionsTranslator.translate(e); } } }); }
/** * Deletes the columns defined in the HSuperColumn. If there are no HColumns attached, * we delete the whole thing. * */ public <SN,N,V> Mutator<K> addSubDelete(K key, String cf, HSuperColumn<SN,N,V> sc) { return addSubDelete(key, cf, sc, keyspace.createClock()); }
public <T> ExecutionResult<T> doExecute(KeyspaceOperationCallback<T> koc) throws HectorException { return doExecute(koc, null); }
@Override public String toString() { return "Mutator(" + keyspace.toString() + ")"; }
throw keyspace.getExceptionsTranslator().translate(ex);
/** * Creates a Keyspace with the given consistency level, fail over policy * and user credentials. For a reference to the consistency level, please * refer to http://wiki.apache.org/cassandra/API. * * @param keyspace * @param cluster * @param consistencyLevelPolicy * @param credentials * @return */ public static Keyspace createKeyspace(String keyspace, Cluster cluster, ConsistencyLevelPolicy consistencyLevelPolicy, FailoverPolicy failoverPolicy, Map<String, String> credentials) { return new ExecutingKeyspace(keyspace, cluster.getConnectionManager(), consistencyLevelPolicy, failoverPolicy, credentials); }
@Override public String toString() { return "Mutator(" + keyspace.toString() + ")"; }
@Override public V execute(Client cassandra) throws HectorException { Object queryResult = null; try { CqlResult result = cassandra.execute_cql_query(query, useCompression ? Compression.GZIP : Compression.NONE); if (log.isDebugEnabled()) { log.debug("Found CqlResult: {}", result); } switch (result.getType()) { case VOID: break; case INT: queryResult = result.getNum(); //TODO this may be not correct as there seems to be no way to obtain affected columns/rows break; default: throw new IllegalArgumentException(String.format("query returned result rows. use %s instead", CqlQuery.class)); } } catch (Exception ex) { throw keyspace.getExceptionsTranslator().translate(ex); } return (V) queryResult; } }), this);
/** * Creates a Keyspace with the given consistency level. For a reference * to the consistency level, please refer to http://wiki.apache.org/cassandra/API. * * @param keyspace * @param cluster * @param consistencyLevelPolicy * @param failoverPolicy * @return */ public static Keyspace createKeyspace(String keyspace, Cluster cluster, ConsistencyLevelPolicy consistencyLevelPolicy, FailoverPolicy failoverPolicy) { return new ExecutingKeyspace(keyspace, cluster.getConnectionManager(), consistencyLevelPolicy, failoverPolicy, cluster.getCredentials()); }
private ExecutionResult<Map<ByteBuffer, List<ColumnOrSuperColumn>>> sliceInternal(final K key, final HSlicePredicate<SN> workingSlicePredicate) { return ((ExecutingKeyspace)keyspace).doExecuteOperation(new Operation<Map<ByteBuffer,List<ColumnOrSuperColumn>>>(OperationType.READ) { @Override public Map<ByteBuffer,List<ColumnOrSuperColumn>> execute(Cassandra.Client cassandra) throws HectorException { Map<ByteBuffer,List<ColumnOrSuperColumn>> cosc = new LinkedHashMap<ByteBuffer, List<ColumnOrSuperColumn>>(); try { ByteBuffer sKey = keySerializer.toByteBuffer(key); cosc.put(sKey, cassandra.get_slice(sKey, columnParent, workingSlicePredicate.toThrift(), ThriftConverter.consistencyLevel(consistencyLevelPolicy.get(operationType)))); } catch (Exception e) { throw exceptionsTranslator.translate(e); } return cosc; } }); }
@Override public QueryResult<CounterSlice<N>> execute() { return new QueryResultImpl<CounterSlice<N>>(keyspace.doExecute( new KeyspaceOperationCallback<CounterSlice<N>>() { @Override public CounterSlice<N> doInKeyspace(KeyspaceService ks) throws HectorException { ColumnParent columnParent = new ColumnParent(columnFamilyName); List<CounterColumn> thriftRet = ks.getCounterSlice(keySerializer.toByteBuffer(key), columnParent, getPredicate()); return new CounterSliceImpl<N>(thriftRet, columnNameSerializer); } }, consistency), this); }
/** * {@inheritDoc} */ @Override public <N> Mutator<K> addDeletion(K key, String cf) { addDeletion(key, cf, null, null, keyspace.createClock()); return this; }
@Override public String toString() { return "ThrottlingMutator(" + keyspace.toString() + ")"; }
throw keyspace.getExceptionsTranslator().translate(ex);
/** * Creates a Keyspace with the given consistency level, fail over policy * and user credentials. For a reference to the consistency level, please * refer to http://wiki.apache.org/cassandra/API. * * @param keyspace * @param cluster * @param consistencyLevelPolicy * @param credentials * @return */ public static Keyspace createKeyspace(String keyspace, Cluster cluster, ConsistencyLevelPolicy consistencyLevelPolicy, FailoverPolicy failoverPolicy, Map<String, String> credentials) { return new ExecutingKeyspace(keyspace, cluster.getConnectionManager(), consistencyLevelPolicy, failoverPolicy, credentials); }
private ExecutionResult<Map<ByteBuffer, List<ColumnOrSuperColumn>>> multigetSliceInternal(final List<K> keys, final ColumnParent workingColumnParent, final HSlicePredicate<SN> workingSlicePredicate) { return ((ExecutingKeyspace)keyspace).doExecuteOperation(new Operation<Map<ByteBuffer,List<ColumnOrSuperColumn>>>(OperationType.READ) { @Override public Map<ByteBuffer,List<ColumnOrSuperColumn>> execute(Cassandra.Client cassandra) throws HectorException { Map<ByteBuffer,List<ColumnOrSuperColumn>> cosc; try { List<ByteBuffer> sKeys = keySerializer.toBytesList(keys); cosc = cassandra.multiget_slice(sKeys, workingColumnParent, workingSlicePredicate.toThrift(), ThriftConverter.consistencyLevel(consistencyLevelPolicy.get(operationType))); } catch (Exception e) { throw exceptionsTranslator.translate(e); } return cosc; } }); }
@Override public QueryResult<ColumnSlice<N, V>> execute() { return new QueryResultImpl<ColumnSlice<N, V>>(keyspace.doExecute( new KeyspaceOperationCallback<ColumnSlice<N, V>>() { @Override public ColumnSlice<N, V> doInKeyspace(KeyspaceService ks) throws HectorException { ColumnParent columnParent = new ColumnParent(columnFamilyName); List<Column> thriftRet = ks.getSlice(keySerializer.toByteBuffer(key), columnParent, getPredicate()); return new ColumnSliceImpl<N, V>(thriftRet, columnNameSerializer, valueSerializer); } }, consistency), this); }
/** * {@inheritDoc} */ @Override public <N> Mutator<K> addDeletion(K key, String cf, N columnName, Serializer<N> nameSerializer) { addDeletion(key, cf, columnName, nameSerializer, keyspace.createClock()); return this; }
@Override public String toString() { return "Mutator(" + keyspace.toString() + ")"; }
throw keyspace.getExceptionsTranslator().translate(ex);