@Override public Map<ByteBuffer, SuperColumn> multigetSuperColumn(List<ByteBuffer> keys, ColumnPath columnPath, boolean reversed, int size) throws HectorException { //valideSuperColumnPath(columnPath); // only can get supercolumn by multigetSuperSlice ColumnParent clp = new ColumnParent(columnPath.getColumn_family()); clp.setSuper_column(columnPath.getSuper_column()); SliceRange sr = new SliceRange(ByteBuffer.wrap(new byte[0]), ByteBuffer.wrap(new byte[0]), reversed, size); SlicePredicate sp = new SlicePredicate(); sp.setSlice_range(sr); Map<ByteBuffer, List<SuperColumn>> sclist = multigetSuperSlice(keys, clp, sp); if (sclist == null || sclist.isEmpty()) { return Collections.emptyMap(); } Map<ByteBuffer, SuperColumn> result = new HashMap<ByteBuffer, SuperColumn>(keys.size() * 2); for (Map.Entry<ByteBuffer, List<SuperColumn>> entry : sclist.entrySet()) { List<SuperColumn> sclistByKey = entry.getValue(); if (sclistByKey.size() > 0) { result.put(entry.getKey(), sclistByKey.get(0)); } } return result; }
throws HectorException { try { Map<ByteBuffer, List<ColumnOrSuperColumn>> cfmap = cassandra.multiget_slice( keys, columnParent, predicate, getThriftCl(OperationType.READ)); if (!columnParent.isSetSuper_column()) { Map<ByteBuffer, List<SuperColumn>> result = new HashMap<ByteBuffer, List<SuperColumn>>(); for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> entry : cfmap.entrySet()) { Map<ByteBuffer, List<SuperColumn>> result = new HashMap<ByteBuffer, List<SuperColumn>>(); for (Map.Entry<ByteBuffer, List<ColumnOrSuperColumn>> entry : cfmap.entrySet()) { SuperColumn spc = new SuperColumn(ByteBuffer.wrap(columnParent.getSuper_column()), getColumnList(entry.getValue())); ArrayList<SuperColumn> spclist = new ArrayList<SuperColumn>(1);
public LucandraTermInfo[] loadFilteredTerms(Term term, List<ByteBuffer> docNums) throws IOException { long start = System.currentTimeMillis(); ColumnParent parent = new ColumnParent(); parent.setColumn_family(CassandraUtils.termVecColumnFamily); ByteBuffer key; try { key = CassandraUtils.hashKeyBytes(indexName.getBytes("UTF-8"), CassandraUtils.delimeterBytes, term.field() .getBytes("UTF-8"), CassandraUtils.delimeterBytes, term.text().getBytes("UTF-8")); } catch (UnsupportedEncodingException e2) { throw new RuntimeException("JVM doesn't support UTF-8", e2); } ReadCommand rc = new SliceByNamesReadCommand(CassandraUtils.keySpace, key, parent, docNums); List<Row> rows = CassandraUtils.robustRead(CassandraUtils.consistency, rc); LucandraTermInfo[] termInfo = null; if (rows != null && rows.size() > 0 && rows.get(0) != null && rows.get(0).cf != null) { termInfo = TermCache.convertTermInfo(rows.get(0).cf.getSortedColumns()); } long end = System.currentTimeMillis(); if (logger.isDebugEnabled()) logger.debug("loadFilterdTerms: " + term + "(" + termInfo == null ? 0 : termInfo.length + ") took " + (end - start) + "ms"); return termInfo; }
@Override public Integer doInKeyspace(KeyspaceService ks) throws HectorException { ColumnParent columnParent = new ColumnParent(columnFamily); columnParent.setSuper_column(superNameSerializer.toByteBuffer(superColumnName)); Integer count = ks.getCount(keySerializer.toByteBuffer(key), columnParent, slicePredicate.toThrift()); return count; } }, consistency), this);
public void read(org.apache.thrift.protocol.TProtocol iprot, MultiSliceRequest struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) schemeField = iprot.readFieldBegin(); if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { break; case 1: // KEY if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.key = iprot.readBinary(); struct.setKeyIsSet(true); } else { case 2: // COLUMN_PARENT if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.column_parent = new ColumnParent(); struct.column_parent.read(iprot); struct.setColumn_parentIsSet(true); } else { case 6: // CONSISTENCY_LEVEL if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); struct.setConsistency_levelIsSet(true); } else {
public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnParent struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.column_family != null) { oprot.writeFieldBegin(COLUMN_FAMILY_FIELD_DESC); oprot.writeString(struct.column_family); oprot.writeFieldEnd(); } if (struct.super_column != null) { if (struct.isSetSuper_column()) { oprot.writeFieldBegin(SUPER_COLUMN_FIELD_DESC); oprot.writeBinary(struct.super_column); oprot.writeFieldEnd(); } } oprot.writeFieldStop(); oprot.writeStructEnd(); }
List<String> columns, List<IndexExpression> conditions, int maxResults) throws Exception SlicePredicate slicePredicate = new SlicePredicate(); slicePredicate.setColumn_names(asList); SliceRange sliceRange = new SliceRange(); sliceRange.setStart(ByteBufferUtil.EMPTY_BYTE_BUFFER); sliceRange.setFinish(ByteBufferUtil.EMPTY_BYTE_BUFFER); slicePredicate.setSlice_range(sliceRange); KeyRange keyRange = new KeyRange(maxResults); keyRange.setStart_key(minVal == null ? "".getBytes() : minVal); keyRange.setEnd_key(maxVal == null ? "".getBytes() : maxVal); ColumnParent cp = new ColumnParent(m.getTableName()); List<KeySlice> keys = conn.getClient().get_range_slices(cp, slicePredicate, keyRange, getConsistencyLevel());
private List<KeySlice> getRangeSlices(org.apache.cassandra.thrift.KeyRange keyRange, @Nullable SliceQuery sliceQuery) throws BackendException { SliceRange sliceRange = new SliceRange(); sliceRange.setStart(ArrayUtils.EMPTY_BYTE_ARRAY) .setFinish(ArrayUtils.EMPTY_BYTE_ARRAY) .setCount(5); } else { connection.getClient().get_range_slices(new ColumnParent(columnFamily), new SlicePredicate() .setSlice_range(sliceRange), keyRange, ConsistencyLevel.QUORUM);
@Test public void testGetSuperRangeSlices() throws HectorException { for (int i = 0; i < 10; i++) { ColumnPath cp = new ColumnPath("Super1"); cp.setSuper_column(bytes("SuperColumn_1")); cp.setColumn(bytes("testGetSuperRangeSlices_" + i)); keyspace.insert("testGetSuperRangeSlices0", cp, StringSerializer.get().toByteBuffer("testGetSuperRangeSlices_Value_" + i)); keyspace.insert("testGetSuperRangeSlices1", cp, StringSerializer.get().toByteBuffer("testGetSuperRangeSlices_Value_" + i)); } // get value ColumnParent clp = new ColumnParent("Super1"); SliceRange sr = new SliceRange(ByteBuffer.wrap(new byte[0]), ByteBuffer.wrap(new byte[0]), false, 150); SlicePredicate sp = new SlicePredicate(); sp.setSlice_range(sr); KeyRange range = new KeyRange(); range.setStart_key( "".getBytes()); range.setEnd_key( "".getBytes()); Map<String, List<SuperColumn>> keySlices = se.fromBytesMap(keyspace.getSuperRangeSlices(clp, sp, range)); assertNotNull(keySlices); assertNotNull("testGetSuperRangSlices0 is null", keySlices.get("testGetSuperRangeSlices0")); assertEquals("testGetSuperRangeSlices_Value_0", string(keySlices.get("testGetSuperRangeSlices0").get(0).getColumns().get(0).getValue())); assertEquals(1, keySlices.get("testGetSuperRangeSlices1").size()); assertEquals(10, keySlices.get("testGetSuperRangeSlices1").get(0).getColumns().size()); ColumnPath cp = new ColumnPath("Super1"); keyspace.remove("testGetSuperRangeSlices0", cp); keyspace.remove("testGetSuperRangeSlices1", cp); }
SlicePredicate predicate = new SlicePredicate(); SliceRange sliceRange = new SliceRange(); sliceRange.setStart(new byte[0]).setFinish(new byte[0]); sliceRange.setCount(columnCount); sliceRange.setReversed(reversed); predicate.setSlice_range(sliceRange); KeyRange range = new KeyRange(limitCount); AbstractType<?> keyComparator = this.cfKeysComparators.get(columnFamily); ByteBuffer startKey = rawStartKey.isEmpty() ? ByteBufferUtil.EMPTY_BYTE_BUFFER : getBytesAccordingToType(rawStartKey, keyComparator); ByteBuffer endKey = rawEndKey.isEmpty() ? ByteBufferUtil.EMPTY_BYTE_BUFFER : getBytesAccordingToType(rawEndKey, keyComparator); range.setStart_key(startKey).setEnd_key(endKey); ColumnParent columnParent = new ColumnParent(columnFamily); List<KeySlice> keySlices = thriftClient.get_range_slices(columnParent, predicate, range, consistencyLevel); printSliceList(columnFamilyDef, keySlices); elapsedTime(startTime);
"range", range.toString(), "start_column", ByteBufferUtil.bytesToHex(start_column), "consistency_level", consistency_level.name()); Tracing.instance.begin("get_paged_slice", traceParameters); consistencyLevel.validateForRead(keyspace); SlicePredicate predicate = new SlicePredicate().setSlice_range(new SliceRange(start_column, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, -1)); return thriftifyKeySlices(rows, new ColumnParent(column_family), predicate, now);
SlicePredicate predicate = new SlicePredicate(); List<ByteBuffer> columnNames = new ArrayList<ByteBuffer>(); for (String superColumnName : superColumnNames) predicate.setColumn_names(columnNames); ColumnParent parent = new ColumnParent(columnFamily); List<ColumnOrSuperColumn> coscList; Connection conn = null; coscList = conn.getClient().get_slice(ByteBuffer.wrap(rowKey), parent, predicate, getConsistencyLevel());
/** * Construct a Hector ColumnParent based on the information in the query and * the type of column family being queried. * * @param <K> * @param columnFamily * @param path * @return * @throws BadRequestException */ public static <K> ColumnParent getColumnParent(ColumnFamily<?, ?> columnFamily, ColumnPath<?> path) throws BadRequestException { ColumnParent cp = new ColumnParent(); cp.setColumn_family(columnFamily.getName()); if (path != null) { Iterator<ByteBuffer> columns = path.iterator(); if (columnFamily.getType() == ColumnType.SUPER && columns.hasNext()) { cp.setSuper_column(columns.next()); } } return cp; }
private static Collection<IColumn> getFieldCacheEntries(IndexReader indexReader, String field) throws IOException { String indexName = SolandraCoreContainer.coreInfo.get().indexName + "~" + SolandraCoreContainer.coreInfo.get().shard; byte[] indexNameBytes = indexName.getBytes("UTF-8"); if(logger.isDebugEnabled()) logger.debug("Loading field cache from " + indexName + " " + field); ColumnParent fieldCacheParent = new ColumnParent(CassandraUtils.fieldCacheColumnFamily); ByteBuffer fieldCacheKey = CassandraUtils.hashKeyBytes(indexNameBytes, CassandraUtils.delimeterBytes, field .getBytes()); List<Row> rows = CassandraUtils.robustRead(CassandraUtils.consistency, new SliceFromReadCommand( CassandraUtils.keySpace, fieldCacheKey, fieldCacheParent, ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE)); if (rows.isEmpty()) return Collections.emptyList(); Row row = rows.get(0); if (row.cf == null) return Collections.emptyList(); return row.cf.getSortedColumns(); }
if (startSessionIfRequested()) Map<String, String> traceParameters = ImmutableMap.of("column_parent", column_parent.toString(), "index_clause", index_clause.toString(), "slice_predicate", column_predicate.toString(), "consistency_level", consistency_level.name()); Tracing.instance.begin("get_indexed_slices", traceParameters); ColumnFilter columns = makeColumnFilter(metadata, column_parent, column_predicate); ClusteringIndexFilter filter = toInternalFilter(metadata, column_parent, column_predicate); DataLimits limits = getLimits(index_clause.count, metadata.isSuper() && !column_parent.isSetSuper_column(), column_predicate);
public void setFieldValue(_Fields field, Object value) { switch (field) { case COLUMN_FAMILY: if (value == null) { unsetColumn_family(); } else { setColumn_family((String)value); } break; case SUPER_COLUMN: if (value == null) { unsetSuper_column(); } else { setSuper_column((ByteBuffer)value); } break; } }
struct.column_parent = new ColumnParent(); struct.column_parent.read(iprot); struct.setColumn_parentIsSet(true); struct.consistency_level = ConsistencyLevel.findByValue(iprot.readI32()); struct.setConsistency_levelIsSet(true);
/** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ public boolean isSet(_Fields field) { if (field == null) { throw new IllegalArgumentException(); } switch (field) { case COLUMN_FAMILY: return isSetColumn_family(); case SUPER_COLUMN: return isSetSuper_column(); } throw new IllegalStateException(); }
public void insert(ByteBuffer key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level) throws InvalidRequestException, UnavailableException, TimedOutException { long queryStartNanoTime = System.nanoTime(); if (startSessionIfRequested()) { Map<String, String> traceParameters = ImmutableMap.of("key", ByteBufferUtil.bytesToHex(key), "column_parent", column_parent.toString(), "column", column.toString(), "consistency_level", consistency_level.name()); Tracing.instance.begin("insert", traceParameters); } else { logger.trace("insert"); } try { internal_insert(key, column_parent, column, consistency_level, queryStartNanoTime); } catch (RequestValidationException e) { throw ThriftConversion.toThrift(e); } finally { Tracing.instance.stopSession(); } }