@Override public void remove(Long inodeId) { try { byte[] id = Longs.toByteArray(inodeId); mDb.delete(mInodesColumn, mDisableWAL, id); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public void removeBlock(long id) { try { mDb.delete(mBlockMetaColumn, Longs.toByteArray(id)); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public void removeLocation(long blockId, long workerId) { byte[] key = RocksUtils.toByteArray(blockId, workerId); try { mDb.delete(mBlockLocationsColumn, key); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public void removeChild(long parentId, String name) { try { mDb.delete(mEdgesColumn, mDisableWAL, RocksUtils.toByteArray(parentId, name)); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public void delete(byte[] key) { resetDbLock.readLock().lock(); try { if (logger.isTraceEnabled()) logger.trace("~> RocksDbDataSource.delete(): " + name + ", key: " + toHexString(key)); db.delete(key); if (logger.isTraceEnabled()) logger.trace("<~ RocksDbDataSource.delete(): " + name + ", key: " + toHexString(key)); } catch (RocksDBException e) { logger.error("Failed to delete from db '{}'", name, e); throw new RuntimeException(e); } finally { resetDbLock.readLock().unlock(); } }
@Override public void clear() { try { backend.db.delete(columnFamily, writeOptions, serializeCurrentKeyWithGroupAndNamespace()); } catch (RocksDBException e) { throw new FlinkRuntimeException("Error while removing entry from RocksDB", e); } }
@Override public void remove() { if (currentEntry == null || currentEntry.deleted) { throw new IllegalStateException("The remove operation must be called after a valid next operation."); } currentEntry.remove(); }
@Override public Status delete(final String table, final String key) { try { if (!COLUMN_FAMILIES.containsKey(table)) { createColumnFamily(table); } final ColumnFamilyHandle cf = COLUMN_FAMILIES.get(table).getHandle(); rocksDb.delete(cf, key.getBytes(UTF_8)); return Status.OK; } catch(final RocksDBException e) { LOGGER.error(e.getMessage(), e); return Status.ERROR; } }
@Override public void put(byte[] key, byte[] val) { resetDbLock.readLock().lock(); try { if (logger.isTraceEnabled()) logger.trace("~> RocksDbDataSource.put(): " + name + ", key: " + toHexString(key) + ", " + (val == null ? "null" : val.length)); if (val != null) { db.put(key, val); } else { db.delete(key); } if (logger.isTraceEnabled()) logger.trace("<~ RocksDbDataSource.put(): " + name + ", key: " + toHexString(key) + ", " + (val == null ? "null" : val.length)); } catch (RocksDBException e) { logger.error("Failed to put into db '{}'", name, e); hintOnTooManyOpenFiles(e); throw new RuntimeException(e); } finally { resetDbLock.readLock().unlock(); } }
@Override public void mergeNamespaces(N target, Collection<N> sources) { if (sources == null || sources.isEmpty()) { return; } try { // create the target full-binary-key setCurrentNamespace(target); final byte[] targetKey = serializeCurrentKeyWithGroupAndNamespace(); // merge the sources to the target for (N source : sources) { if (source != null) { setCurrentNamespace(source); final byte[] sourceKey = serializeCurrentKeyWithGroupAndNamespace(); byte[] valueBytes = backend.db.get(columnFamily, sourceKey); backend.db.delete(columnFamily, writeOptions, sourceKey); if (valueBytes != null) { backend.db.merge(columnFamily, writeOptions, targetKey, valueBytes); } } } } catch (Exception e) { throw new FlinkRuntimeException("Error while merging state in RocksDB", e); } }
final byte[] sourceKey = serializeCurrentKeyWithGroupAndNamespace(); final byte[] valueBytes = backend.db.get(columnFamily, sourceKey); backend.db.delete(columnFamily, writeOptions, sourceKey);
final byte[] sourceKey = serializeCurrentKeyWithGroupAndNamespace(); final byte[] valueBytes = backend.db.get(columnFamily, sourceKey); backend.db.delete(columnFamily, writeOptions, sourceKey);
public @Override String remove(Object key) { final String value = get(key); if (value != null) { try { db.delete(writeOptions, ((String) key).getBytes(Charsets.UTF_8)); } catch (RocksDBException e) { throw new RuntimeException(e); } } return value; }
@Override public void removeConflict(@Nullable String txId, String path) { Optional<RocksDBReference> dbRefOpt = getDb(txId); if (!dbRefOpt.isPresent()) { return; } try (RocksDBReference dbRef = dbRefOpt.get()) { dbRef.db().delete(key(path)); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public void delete(H hashKey, R rangeKey) { try { this.db.delete(Util.combine(hashKeySerde, rangeKeySerde, hashKey, rangeKey)); } catch (RocksDBException e) { throw new DbException(e); } }
@Override public void removeBlob(String namespace, String path) { try (RocksDBReference dbRef = db()) { dbRef.db().delete(key(namespace, path)); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public void remove(Key key) { ensureInitializedAndNotClosed(); try { this.database.get().delete(this.writeOptions, key.serialize()); } catch (RocksDBException ex) { throw convert(ex, "remove key '%s'", key); } }
@Override public void delete(ObjectId objectId) { checkNotNull(objectId, "argument objectId is null"); checkWritable(); byte[] key = objectId.getRawValue(); try (RocksDBReference dbRef = dbhandle.getReference()) { dbRef.db().delete(key); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public void clear() { try { writeCurrentKeyWithGroupAndNamespace(); byte[] key = dataOutputView.getCopyOfBuffer(); backend.db.delete(columnFamily, writeOptions, key); } catch (IOException | RocksDBException e) { throw new FlinkRuntimeException("Error while removing entry from RocksDB", e); } }
@Override public void delete(byte[] key) { try (AutoCloseableLock ac = sharedLock(key)) { throwIfClosed(); db.delete(handle, key); } catch (RocksDBException e) { throw wrap(e); } }