/** * Clears the subspace used by this RangeSet instance. This will delete the records of any * data used by this set. * @param tc transaction or database in which to run operation * @return a future that is completed when the range has been cleared */ @Nonnull public CompletableFuture<Void> clear(@Nonnull TransactionContext tc) { return tc.runAsync(tr -> { tr.clear(subspace.range()); return CompletableFuture.completedFuture(null); }); }
private CompletableFuture<Optional<KeyValue>> currentCounter() { return transaction.snapshot().getRange(counterSubspace.range(), 1, true) .asList() .thenApply(list -> list.isEmpty() ? Optional.empty() : Optional.of(list.get(0))); }
/** * Read the deeper, likely empty, levels to get them into the RYW cache, since individual lookups may only * add pieces, requiring additional requests as keys increase. * @param tr the transaction to use to access the database * @return a future that is complete when the deeper levels have been loaded */ public CompletableFuture<Void> preloadForLookup(ReadTransaction tr) { return tr.getRange(subspace.range(), nlevels, true).asList().thenApply(l -> null); }
/** * Clears the entire set. * @param tc the transaction to use to access the database * @return a future that completes when the ranked set has been cleared */ public CompletableFuture<Void> clear(TransactionContext tc) { Range range = subspace.range(); return tc.runAsync(tr -> { tr.clear(range); return initLevels(tr); }); }
@Nonnull public CompletableFuture<Void> rebuildAllIndexes() { Transaction tr = ensureContextActive(); tr.clear(getSubspace().range(Tuple.from(INDEX_KEY))); tr.clear(getSubspace().range(Tuple.from(INDEX_SECONDARY_SPACE_KEY))); tr.clear(getSubspace().range(Tuple.from(INDEX_RANGE_SPACE_KEY))); tr.clear(getSubspace().range(Tuple.from(INDEX_STATE_SPACE_KEY))); tr.clear(getSubspace().range(Tuple.from(INDEX_UNIQUENESS_VIOLATIONS_KEY))); List<CompletableFuture<Void>> work = new LinkedList<>(); addRebuildRecordCountsJob(work); return rebuildIndexes(getRecordMetaData().getIndexesSince(-1), Collections.emptyMap(), work, RebuildIndexReason.REBUILD_ALL, null); }
void clearIndexData(@Nonnull Index index) { Transaction tr = ensureContextActive(); tr.clear(Range.startsWith(indexSubspace(index).pack())); // startsWith to handle ungrouped aggregate indexes tr.clear(indexSecondarySubspace(index).range()); tr.clear(indexRangeSubspace(index).range()); tr.clear(indexUniquenessViolationsSubspace(index).range()); }
/** * Count the items in the set. * @param tc the transaction to use to access the database * @return a future that completes to the number of items in the set */ public CompletableFuture<Long> size(ReadTransactionContext tc) { Range r = subspace.get(nlevels - 1).range(); return tc.readAsync(tr -> AsyncUtil.mapIterable(tr.getRange(r), keyValue -> decodeLong(keyValue.getValue())) .asList() .thenApply(longs -> longs.stream().reduce(0L, Long::sum))); }
@Override public void deleteAllRecords() { preloadCache.invalidateAll(); Transaction tr = ensureContextActive(); tr.clear(recordsSubspace().getKey(), getSubspace().range().end); }
private CompletableFuture<Long> countRange(ReadTransactionContext tc, int level, byte[] beginKey, byte[] endKey) { return tc.readAsync(tr -> AsyncUtil.mapIterable(tr.getRange(beginKey == null ? subspace.range(Tuple.from(level)).begin : subspace.pack(Tuple.from(level, beginKey)), endKey == null ? subspace.range(Tuple.from(level)).end : subspace.pack(Tuple.from(level, endKey))), keyValue -> decodeLong(keyValue.getValue())) .asList() .thenApply(longs -> longs.stream().reduce(0L, Long::sum))); }
private void clearAndPopulateMulti() { db.run(tr -> { tr.clear(bmSubspace.range()); for (int i = 0; i < keys.size(); i++) { map.put(tr, subSubspaces.get(i % subSubspaces.size()), keys.get(i), value).join(); } return null; }); }
public void addConflictForSubspace(boolean write) { final Range range = getSubspace().range(); final Transaction tr = context.ensureActive(); if (write) { tr.addWriteConflictRange(range.begin, range.end); } else { tr.addReadConflictRange(range.begin, range.end); } }
private void validateAllocation(FDBRecordContext context, HighContentionAllocator hca, Map<Long, String> allocations) { Subspace allocationSubspace = hca.getAllocationSubspace(); Transaction transaction = context.ensureActive(); List<KeyValue> keyValueList = transaction.getRange(allocationSubspace.range()).asList().join(); Map<Long, String> storedAllocations = keyValueList.stream() .collect(Collectors.toMap(kv -> extractKey(allocationSubspace, kv), this::extractValue)); assertThat("we see the allocated keys in the subspace", allocations.entrySet(), containsInAnyOrder(storedAllocations.entrySet().toArray())); }
private void clearAndPopulate() { // Populate data db.run(tr -> { tr.clear(bmSubspace.range()); keys.forEach(k -> map.put(tr, bmSubspace, k, value).join()); return null; }); }
@Nonnull private List<Pair<Tuple, Integer>> scanTokenizerVersions(@Nonnull FDBRecordStore store, @Nonnull Index index) throws ExecutionException, InterruptedException { final Subspace tokenizerVersionSubspace = store.indexSecondarySubspace(index).subspace(TextIndexMaintainer.TOKENIZER_VERSION_SUBSPACE_TUPLE); return recordStore.ensureContextActive().getRange(tokenizerVersionSubspace.range()).asList().get().stream() .map(kv -> Pair.of(tokenizerVersionSubspace.unpack(kv.getKey()), (int)Tuple.fromBytes(kv.getValue()).getLong(0))) .collect(Collectors.toList()); }
@BeforeEach public void clear() { db.run(tr -> { tr.clear(bmSubspace.range()); return null; }); }
@Test public void clear() { db.run(tr -> { tr.set(rsSubspace.pack(new byte[]{(byte)0xde, (byte)0xad}), new byte[]{(byte)0xc0, (byte)0xde}); return null; }); assertTrue(db.readAsync(tr -> tr.getRange(rsSubspace.range()).asList()).join().size() == 1, "Key does not appear to be added"); rs.clear(db).join(); assertTrue(db.readAsync(tr -> tr.getRange(rsSubspace.range()).asList()).join().isEmpty(), "Clear did not remove key"); }
@BeforeEach void setup() { database = FDBDatabaseFactory.instance().getDatabase(); try (FDBRecordContext context = database.openContext()) { context.ensureActive().clear(testSubspace.range()); context.commit(); } }
@BeforeEach public void setup() { fdb = FDBDatabaseFactory.instance().getDatabase(); fdb.run(context -> { openMetaDataStore(context); context.ensureActive().clear(metaDataStore.getSubspace().range()); return null; }); }
private void checkIncreasing() { List<KeyValue> kvs = db.readAsync(tr -> tr.getRange(rsSubspace.range()).asList()).join(); byte[] last = null; for (KeyValue kv : kvs) { byte[] key = rsSubspace.unpack(kv.getKey()).getBytes(0); assertTrue(compareUnsigned(key, kv.getValue()) < 0, "Key " + printable(key) + " is not less than value " + printable(kv.getValue())); if (last != null) { assertTrue(compareUnsigned(last, key) <= 0, "Last value " + printable(last) + " is after key " + printable(key)); } last = kv.getValue(); } }