public RocksDBWriteBatchWrapper(@Nonnull RocksDB rocksDB, @Nullable WriteOptions options, int capacity) { Preconditions.checkArgument(capacity >= MIN_CAPACITY && capacity <= MAX_CAPACITY, "capacity should be between " + MIN_CAPACITY + " and " + MAX_CAPACITY); this.db = rocksDB; this.options = options; this.capacity = capacity; this.batch = new WriteBatch(this.capacity * PER_RECORD_BYTES); }
@Override public void putBatch(Map<K, V> batch) throws IOException { try { WriteBatch writeBatch = new WriteBatch(); for (Map.Entry<K, V> entry : batch.entrySet()) { writeBatch.put( columnFamily, serializer.serialize(entry.getKey()), serializer.serialize(entry.getValue())); } rocksDb.write(new WriteOptions(), writeBatch); } catch (RocksDBException e) { throw new IOException(String.format("Failed to put batch=%s", batch), e); } }
private void processBatchInsert(TreeMap<RocksDbKey, RocksDbValue> batchMap) throws MetricException { try (WriteBatch writeBatch = new WriteBatch()) { // take the batched metric data and write to the database for (RocksDbKey k : batchMap.keySet()) { RocksDbValue v = batchMap.get(k); writeBatch.put(k.getRaw(), v.getRaw()); } store.db.write(writeOpts, writeBatch); } catch (Exception e) { String message = "Failed to store data to RocksDB"; LOG.error(message, e); throw new MetricException(message, e); } }
@Override public void updateBatch(Map<byte[], byte[]> rows) { resetDbLock.readLock().lock(); try { if (logger.isTraceEnabled()) logger.trace("~> RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); try { try (WriteBatch batch = new WriteBatch(); WriteOptions writeOptions = new WriteOptions()) { for (Map.Entry<byte[], byte[]> entry : rows.entrySet()) { if (entry.getValue() == null) { batch.remove(entry.getKey()); } else { batch.put(entry.getKey(), entry.getValue()); } } db.write(writeOptions, batch); } if (logger.isTraceEnabled()) logger.trace("<~ RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); } catch (RocksDBException e) { logger.error("Error in batch update on db '{}'", name, e); hintOnTooManyOpenFiles(e); throw new RuntimeException(e); } } finally { resetDbLock.readLock().unlock(); } }
try { writeOpts = new WriteOptions(); writeBatch = new WriteBatch();
@Override public void putBatch(Map<String, Object> map) { WriteOptions writeOpts = null; WriteBatch writeBatch = null; try { writeOpts = new WriteOptions(); writeBatch = new WriteBatch(); for (Entry<String, Object> entry : map.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); byte[] data = serialize(value); if (StringUtils.isBlank(key) || data == null || data.length == 0) { continue; } byte[] keyByte = key.getBytes(); writeBatch.put(keyByte, data); } db.write(writeOpts, writeBatch); } catch (Exception e) { LOG.error("Failed to putBatch into DB, " + map.keySet(), e); } finally { if (writeOpts != null) { writeOpts.dispose(); } if (writeBatch != null) { writeBatch.dispose(); } } }
void deleteMetrics(FilterOptions filter) throws MetricException { try (WriteBatch writeBatch = new WriteBatch(); WriteOptions writeOps = new WriteOptions()) { scanRaw(filter, (RocksDbKey key, RocksDbValue value) -> { writeBatch.remove(key.getRaw()); return true; }); if (writeBatch.count() > 0) { LOG.info("Deleting {} metrics", writeBatch.count()); try { db.write(writeOps, writeBatch); } catch (Exception e) { String message = "Failed delete metrics"; LOG.error(message, e); if (this.failureMeter != null) { this.failureMeter.mark(); } throw new MetricException(message, e); } } } }
@Override public void clear() { try { try (RocksIteratorWrapper iterator = RocksDBKeyedStateBackend.getRocksIterator(backend.db, columnFamily); WriteBatch writeBatch = new WriteBatch(128)) { final byte[] keyPrefixBytes = serializeCurrentKeyWithGroupAndNamespace(); iterator.seek(keyPrefixBytes); while (iterator.isValid()) { byte[] keyBytes = iterator.key(); if (startWithKeyPrefix(keyPrefixBytes, keyBytes)) { writeBatch.remove(columnFamily, keyBytes); } else { break; } iterator.next(); } backend.db.write(writeOptions, writeBatch); } } catch (Exception e) { LOG.warn("Error while cleaning the state.", e); } }
try (WriteBatch writeBatch = new WriteBatch(); WriteOptions writeOps = new WriteOptions()) {
public StdSession(HugeConfig conf) { this.closed = false; boolean bulkload = conf.get(RocksDBOptions.BULKLOAD_MODE); this.batch = new WriteBatch(); this.writeOptions = new WriteOptions(); this.writeOptions.setDisableWAL(bulkload); //this.writeOptions.setSync(false); }
/** * <p>Constructor of BatchResult class.</p> * * @param sequenceNumber related to this BatchResult instance. * @param nativeHandle to {@link org.rocksdb.WriteBatch} * native instance. */ public BatchResult(final long sequenceNumber, final long nativeHandle) { sequenceNumber_ = sequenceNumber; writeBatch_ = new WriteBatch(nativeHandle, true); }
public RocksDBWriteBatchWrapper(@Nonnull RocksDB rocksDB, @Nullable WriteOptions options, int capacity) { Preconditions.checkArgument(capacity >= MIN_CAPACITY && capacity <= MAX_CAPACITY, "capacity should be between " + MIN_CAPACITY + " and " + MAX_CAPACITY); this.db = rocksDB; this.options = options; this.capacity = capacity; this.batch = new WriteBatch(this.capacity * PER_RECORD_BYTES); }
public RocksDBWriteBatchWrapper(@Nonnull RocksDB rocksDB, @Nullable WriteOptions options, int capacity) { Preconditions.checkArgument(capacity >= MIN_CAPACITY && capacity <= MAX_CAPACITY, "capacity should be between " + MIN_CAPACITY + " and " + MAX_CAPACITY); this.db = rocksDB; this.options = options; this.capacity = capacity; this.batch = new WriteBatch(this.capacity * PER_RECORD_BYTES); }
public EzRocksDbBatch(RocksDB db, Serde<H> hashKeySerde, Serde<R> rangeKeySerde, Serde<V> valueSerde) { this.writeOptions = new WriteOptions(); this.db = db; this.writeBatch = new WriteBatch(); this.hashKeySerde = hashKeySerde; this.rangeKeySerde = rangeKeySerde; this.valueSerde = valueSerde; }
/** * Get the Commit time Write Batch. * * @return the commit time write batch. */ public WriteBatch getCommitTimeWriteBatch() { assert(isOwningHandle()); final WriteBatch writeBatch = new WriteBatch(getCommitTimeWriteBatch(nativeHandle_)); return writeBatch; }
public StdSession(HugeConfig conf) { this.closed = false; boolean bulkload = conf.get(RocksDBOptions.BULKLOAD_MODE); this.batch = new WriteBatch(); this.writeOptions = new WriteOptions(); this.writeOptions.setDisableWAL(bulkload); //this.writeOptions.setSync(false); }
private void restoreAllInternal(final Collection<KeyValue<byte[], byte[]>> records) { try (final WriteBatch batch = new WriteBatch()) { for (final KeyValue<byte[], byte[]> record : records) { if (record.value == null) { batch.delete(record.key); } else { batch.put(record.key, record.value); } } write(batch); } catch (final RocksDBException e) { throw new ProcessorStateException("Error restoring batch to store " + name, e); } }
@Override public void putMetadataForTables(Map<TableReference, byte[]> tableRefToMetadata) { try (Disposer d = new Disposer(); ColumnFamily metadataTable = columnFamilies.get(METADATA_TABLE_NAME.getQualifiedName())) { WriteOptions options = d.register(new WriteOptions().setSync(true)); WriteBatch batch = d.register(new WriteBatch()); for (Entry<TableReference, byte[]> entry : tableRefToMetadata.entrySet()) { batch.put(metadataTable.getHandle(), entry.getKey().getQualifiedName().getBytes(Charsets.UTF_8), entry.getValue()); } getDb().write(options, batch); } catch (RocksDBException e) { throw Throwables.propagate(e); } }
@Override public void putAll(final List<KeyValue<Bytes, byte[]>> entries) { try (final WriteBatch batch = new WriteBatch()) { for (final KeyValue<Bytes, byte[]> entry : entries) { Objects.requireNonNull(entry.key, "key cannot be null"); if (entry.value == null) { batch.delete(entry.key.get()); } else { batch.put(entry.key.get(), entry.value); } } write(batch); } catch (final RocksDBException e) { throw new ProcessorStateException("Error while batch writing to store " + name, e); } }
@Override public void put(TableReference tableRef, Map<Cell, byte[]> values, long timestamp) { try (Disposer d = new Disposer(); ColumnFamily table = columnFamilies.get(tableRef.getQualifiedName())) { WriteOptions options = d.register(new WriteOptions().setSync(writeOptions.fsyncPut())); WriteBatch batch = d.register(new WriteBatch()); for (Entry<Cell, byte[]> entry : values.entrySet()) { byte[] key = RocksDbKeyValueServices.getKey(entry.getKey(), timestamp); batch.put(table.getHandle(), key, entry.getValue()); } getDb().write(options, batch); } catch (RocksDBException e) { throw Throwables.propagate(e); } }