private static String encodeQueries(List<SliceQuery> queries) { List<String> queryStrings = new ArrayList<>(queries.size()); for (SliceQuery query : queries) { String start = Hex.bytesToHex(query.getSliceStart().as(StaticBuffer.ARRAY_FACTORY)); String end = Hex.bytesToHex(query.getSliceEnd().as(StaticBuffer.ARRAY_FACTORY)); final int limit; if (query.hasLimit()) { limit = query.getLimit(); } else { limit = -1; } queryStrings.add(String.format("%s/%s/%d", start, end, limit)); } return Joiner.on(":").join(queryStrings); }
public SliceQuery(final SliceQuery query) { this(query.getSliceStart(), query.getSliceEnd()); setLimit(query.getLimit()); }
public static boolean matches(SliceQuery query, StaticBuffer column) { return query.getSliceStart().compareTo(column)<=0 && query.getSliceEnd().compareTo(column)>0; }
Preconditions.checkArgument(start.equals(BufferUtil.zeroBuffer(1)), "Expected start of first query to be a single 0s: %s",start); StaticBuffer end = ground.getSliceEnd(); Preconditions.checkArgument(end.equals(BufferUtil.oneBuffer(end.length())), "Expected end of first query to be all 1s: %s",end);
public FilterExpressionBuilder range(final SliceQuery slice) { this.startValue = slice.getSliceStart(); this.endValue = slice.getSliceEnd(); return this; }
private EntryList createEntryListFromItems(final List<Map<String, AttributeValue>> items, final SliceQuery sliceQuery) { final List<Entry> entries = new ArrayList<>(items.size()); for (Map<String, AttributeValue> item : items) { final Entry entry = new EntryBuilder(item).slice(sliceQuery.getSliceStart(), sliceQuery.getSliceEnd()) .build(); if (null != entry) { entries.add(entry); } } return StaticArrayEntryList.of(entries); }
protected void finishSetup(ModifiableHadoopConfiguration scanConf, Configuration graphConf) { jobConf = getJobConfiguration(scanConf); Preconditions.checkNotNull(metrics); // Allowed to be null for jobs that specify no configuration and no configuration root //Preconditions.checkNotNull(jobConf); Preconditions.checkNotNull(job); job.workerIterationStart(jobConf, graphConf, metrics); keyFilter = job.getKeyFilter(); List<SliceQuery> sliceQueries = job.getQueries(); Preconditions.checkArgument(null != sliceQueries, "Job cannot specify null query list"); Preconditions.checkArgument(0 < sliceQueries.size(), "Job must specify at least one query"); // Assign head of getQueries() to "initialQuery" initialQuery = sliceQueries.get(0); // Assign tail of getQueries() to "subsequentQueries" subsequentQueries = new ArrayList<>(sliceQueries.subList(1,sliceQueries.size())); Preconditions.checkState(sliceQueries.size() == subsequentQueries.size() + 1); Preconditions.checkNotNull(initialQuery); if (0 < subsequentQueries.size()) { //It is assumed that the first query is the grounding query if multiple queries exist StaticBuffer start = initialQuery.getSliceStart(); Preconditions.checkArgument(start.equals(BufferUtil.zeroBuffer(1)), "Expected start of first query to be all 0s: %s", start); StaticBuffer end = initialQuery.getSliceEnd(); Preconditions.checkArgument(end.equals(BufferUtil.oneBuffer(end.length())), "Expected end of first query to be all 1s: %s", end); } }
protected void finishSetup(ModifiableHadoopConfiguration scanConf, Configuration graphConf) { jobConf = getJobConfiguration(scanConf); Preconditions.checkNotNull(metrics); // Allowed to be null for jobs that specify no configuration and no configuration root //Preconditions.checkNotNull(jobConf); Preconditions.checkNotNull(job); job.workerIterationStart(jobConf, graphConf, metrics); keyFilter = job.getKeyFilter(); List<SliceQuery> sliceQueries = job.getQueries(); Preconditions.checkArgument(null != sliceQueries, "Job cannot specify null query list"); Preconditions.checkArgument(0 < sliceQueries.size(), "Job must specify at least one query"); // Assign head of getQueries() to "initialQuery" initialQuery = sliceQueries.get(0); // Assign tail of getQueries() to "subsequentQueries" subsequentQueries = new ArrayList<>(sliceQueries.subList(1,sliceQueries.size())); Preconditions.checkState(sliceQueries.size() == subsequentQueries.size() + 1); Preconditions.checkNotNull(initialQuery); if (0 < subsequentQueries.size()) { //It is assumed that the first query is the grounding query if multiple queries exist StaticBuffer start = initialQuery.getSliceStart(); Preconditions.checkArgument(start.equals(BufferUtil.zeroBuffer(1)), "Expected start of first query to be all 0s: %s", start); StaticBuffer end = initialQuery.getSliceEnd(); Preconditions.checkArgument(end.equals(BufferUtil.oneBuffer(end.length())), "Expected end of first query to be all 1s: %s", end); } }
@Override public Map<StaticBuffer, EntryList> getSlice(final List<StaticBuffer> keys, final SliceQuery query, final StoreTransaction txh) throws BackendException { log.debug("Entering getSliceMultiSliceQuery table:{} keys:{} query:{} txh:{}", getTableName(), encodeForLog(keys), encodeForLog(query), txh); final Map<StaticBuffer, EntryList> entries = //convert keys to get item workers and get the items client.getDelegate().parallelGetItem(keys.stream().map(this::createGetItemWorker).collect(Collectors.toList())) .entrySet() .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> extractEntriesFromGetItemResult(entry.getValue(), query.getSliceStart(), query.getSliceEnd(), query.getLimit()))); log.debug("Exiting getSliceMultiSliceQuery table:{} keys:{} query:{} txh:{} returning:{}", getTableName(), encodeForLog(keys), encodeForLog(query), txh, entries.size()); return entries; }
private List<Entry> decodeSlice(final Map<String, AttributeValue> item) { final List<Entry> entries = new EntryBuilder(item).buildAll(); final Entry sliceStartEntry = StaticArrayEntry.of(sliceQuery.getSliceStart(), BufferUtil.emptyBuffer()); final Entry sliceEndEntry = StaticArrayEntry.of(sliceQuery.getSliceEnd(), BufferUtil.emptyBuffer()); final List<Entry> filteredEntries = new ArrayList<>(entries.size()); for (Entry entry : entries) { if (entry.compareTo(sliceStartEntry) >= 0 && entry.compareTo(sliceEndEntry) < 0) { filteredEntries.add(entry); } } return filteredEntries.subList(0, Math.min(filteredEntries.size(), sliceQuery.getLimit())); }
private StaticRecordIterator buildRecordIteratorFromQueryResult(final QueryResult queryResult) { final List<Entry> entries = Lists.newLinkedList(); for (Map<String, AttributeValue> item : queryResult.getItems()) { // DynamoDB's between includes the end of the range, but Titan's slice queries expect the end key to be exclusive final Entry entry = new EntryBuilder(item).slice(rangeKeySliceQuery.getSliceStart(), rangeKeySliceQuery.getSliceEnd()) .build(); if (entry != null) { entries.add(entry); } } return new StaticRecordIterator(entries); }
protected String encodeForLog(final SliceQuery query) { return "slice[rk:" + encodeKeyForLog(query.getSliceStart()) + " -> " + encodeKeyForLog(query.getSliceEnd()) + " limit:" + query.getLimit() + "]"; }
public static Filter getFilter(SliceQuery query) { byte[] colStartBytes = query.getSliceStart().length() > 0 ? query.getSliceStart().as(StaticBuffer.ARRAY_FACTORY) : null; byte[] colEndBytes = query.getSliceEnd().length() > 0 ? query.getSliceEnd().as(StaticBuffer.ARRAY_FACTORY) : null; Filter filter = new ColumnRangeFilter(colStartBytes, true, colEndBytes, false); if (query.hasLimit()) { filter = new FilterList(FilterList.Operator.MUST_PASS_ALL, filter, new ColumnPaginationFilter(query.getLimit(), 0)); } logger.debug("Generated HBase Filter {}", filter); return filter; }
EntryRecordIterator(final SliceQuery sliceQuery, final CQLColValGetter getter, final Iterator<Row> iterator, final StaticBuffer key) { this.getter = getter; final StaticBuffer sliceEnd = sliceQuery.getSliceEnd(); this.iterator = iterator .<Tuple3<StaticBuffer, StaticBuffer, Row>> map(row -> Tuple.of( StaticArrayBuffer.of(row.getBytes(CQLKeyColumnValueStore.COLUMN_COLUMN_NAME)), StaticArrayBuffer.of(row.getBytes(CQLKeyColumnValueStore.VALUE_COLUMN_NAME)), row)) .takeWhile(tuple -> key.equals(StaticArrayBuffer.of(tuple._3.getBytes(CQLKeyColumnValueStore.KEY_COLUMN_NAME))) && !sliceEnd.equals(tuple._1)) .take(sliceQuery.getLimit()); }
private SliceRange getSliceRange(final int limit) { final SliceRange sliceRange = new SliceRange(); sliceRange.setStart(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceStart().asByteBuffer()); sliceRange.setFinish(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceEnd().asByteBuffer()); sliceRange.setCount(Math.min(limit, JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getLimit())); return sliceRange; } }
private SliceRange getSliceRange(final int limit) { final SliceRange sliceRange = new SliceRange(); sliceRange.setStart(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceStart().asByteBuffer()); sliceRange.setFinish(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceEnd().asByteBuffer()); sliceRange.setCount(Math.min(limit, JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getLimit())); return sliceRange; } }
private SliceRange getSliceRange(final int limit) { final SliceRange sliceRange = new SliceRange(); sliceRange.setStart(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceStart().asByteBuffer()); sliceRange.setFinish(JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getSliceEnd().asByteBuffer()); sliceRange.setCount(Math.min(limit, JanusGraphHadoopSetupCommon.DEFAULT_SLICE_QUERY.getLimit())); return sliceRange; } }
sliceQuery.getSliceEnd()) .build();
private QueryRequest createQueryRequest(final StaticBuffer hashKey, final SliceQuery rangeQuery) { final Expression keyConditionExpression = new ConditionExpressionBuilder().hashKey(hashKey) .rangeKey(rangeQuery.getSliceStart(), rangeQuery.getSliceEnd()) .build(); return super.createQueryRequest() .withKeyConditionExpression(keyConditionExpression.getConditionExpression()) .withExpressionAttributeValues(keyConditionExpression.getAttributeValues()); }
@Override public KeyIterator getKeys(final SliceQuery query, final StoreTransaction txh) throws BackendException { if (this.storeManager.getFeatures().hasOrderedScan()) { throw new PermanentBackendException("This operation is only allowed when a random partitioner (md5 or murmur3) is used."); } return Try.of(() -> new CQLResultSetKeyIterator( query, this.getter, this.session.execute(this.getKeysAll.bind() .setBytes(SLICE_START_BINDING, query.getSliceStart().asByteBuffer()) .setBytes(SLICE_END_BINDING, query.getSliceEnd().asByteBuffer()) .setFetchSize(this.storeManager.getPageSize()) .setConsistencyLevel(getTransaction(txh).getReadConsistencyLevel())))) .getOrElseThrow(EXCEPTION_MAPPER); } }