@Override public void removeInode(Long key) { try { mBatch.delete(mInodesColumn, Longs.toByteArray(key)); } catch (RocksDBException e) { throw new RuntimeException(e); } }
/** * Delete blind should be call as the last op in the delete operations. * Since we need to modify endKey to make {@link WriteBatch#deleteRange(byte[], byte[])} * delete the end key. */ void deleteBlind(WriteBatch batch, byte[] key, @Nullable byte[] endKey) { try { if (null == endKey) { batch.delete(key); } else { Pair<byte[], byte[]> realRange = getRealRange(key, endKey); endKey = realRange.getRight(); ++endKey[endKey.length - 1]; batch.deleteRange(realRange.getLeft(), endKey); } } catch (RocksDBException e) { throw new StateStoreRuntimeException(e); } }
@Override public void removeChild(Long parentId, String childName) { try { mBatch.delete(mEdgesColumn, RocksUtils.toByteArray(parentId, childName)); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public void remove(byte[] key) throws IOException { try { writeBatch.delete(key); } catch (RocksDBException e) { throw new IOException("Failed to flush RocksDB batch", e); } }
@Override public void delete(H hashKey, R rangeKey) { try { writeBatch.delete(Util.combine(hashKeySerde, rangeKeySerde, hashKey, rangeKey)); } catch (RocksDBException e) { throw new RuntimeException(e); } }
private void restoreAllInternal(final Collection<KeyValue<byte[], byte[]>> records) { try (final WriteBatch batch = new WriteBatch()) { for (final KeyValue<byte[], byte[]> record : records) { if (record.value == null) { batch.delete(record.key); } else { batch.put(record.key, record.value); } } write(batch); } catch (final RocksDBException e) { throw new ProcessorStateException("Error restoring batch to store " + name, e); } }
@Override public void putAll(final List<KeyValue<Bytes, byte[]>> entries) { try (final WriteBatch batch = new WriteBatch()) { for (final KeyValue<Bytes, byte[]> entry : entries) { Objects.requireNonNull(entry.key, "key cannot be null"); if (entry.value == null) { batch.delete(entry.key.get()); } else { batch.put(entry.key.get(), entry.value); } } write(batch); } catch (final RocksDBException e) { throw new ProcessorStateException("Error while batch writing to store " + name, e); } }
public @Override void clear() { try (WriteBatch batch = new WriteBatch()) { try (RocksIterator it = db.newIterator()) { it.seekToFirst(); while (it.isValid()) { byte[] key = it.key(); batch.delete(key); it.next(); } } try (WriteOptions opts = new WriteOptions()) { db.write(opts, batch); } } catch (RocksDBException e) { throw new RuntimeException(e); } finally { size.set(0); } }
final WriteBatch batch = writeBatchMap.computeIfAbsent(segment, s -> new WriteBatch()); if (record.value == null) { batch.delete(record.key); } else { batch.put(record.key, record.value);
batch.delete(rawKey); } catch (RocksDBException e) { throw new StateStoreRuntimeException(e);
@Override public void deleteAll(Iterator<ObjectId> ids, BulkOpListener listener) { checkNotNull(ids, "argument objectId is null"); checkNotNull(listener, "argument listener is null"); checkWritable(); final boolean checkExists = !BulkOpListener.NOOP_LISTENER.equals(listener); byte[] keybuff = new byte[ObjectId.NUM_BYTES]; try (RocksDBReference dbRef = dbhandle.getReference(); ReadOptions ro = new ReadOptions()) { ro.setFillCache(false); ro.setVerifyChecksums(false); try (WriteOptions writeOps = new WriteOptions(); // WriteBatch batch = new WriteBatch()) { writeOps.setSync(true); while (ids.hasNext()) { ObjectId id = ids.next(); id.getRawValue(keybuff); if (!checkExists || exists(dbRef, ro, keybuff)) { batch.delete(keybuff); listener.deleted(id); } else { listener.notFound(id); } } dbRef.db().write(writeOps, batch); } catch (RocksDBException e) { throw new RuntimeException(e); } } }
private void putRaw(K key, byte[] keyBytes, V value, long revision) { try { WriteBatch batch = new WriteBatch(); if (revision > 0) { // last revision has been set to revision bytes batch.put(metaCfHandle, LAST_REVISION, lastRevisionBytes); } if (null == value) { // delete a key if value is null batch.delete(dataCfHandle, keyBytes); } else { byte[] valBytes = valCoder.encode(value); batch.put(dataCfHandle, keyBytes, valBytes); } db.write(writeOpts, batch); } catch (RocksDBException e) { throw new StateStoreRuntimeException("Error while updating key " + key + " to value " + value + " from store " + name, e); } }