public void flush() throws RocksDBException { if (options != null) { db.write(options, batch); } else { // use the default WriteOptions, if wasn't provided. try (WriteOptions writeOptions = new WriteOptions()) { db.write(writeOptions, batch); } } batch.clear(); }
@Override public void putBatch(Map<K, V> batch) throws IOException { try { WriteBatch writeBatch = new WriteBatch(); for (Map.Entry<K, V> entry : batch.entrySet()) { writeBatch.put( columnFamily, serializer.serialize(entry.getKey()), serializer.serialize(entry.getValue())); } rocksDb.write(new WriteOptions(), writeBatch); } catch (RocksDBException e) { throw new IOException(String.format("Failed to put batch=%s", batch), e); } }
@Override public void updateBatch(Map<byte[], byte[]> rows) { resetDbLock.readLock().lock(); try { if (logger.isTraceEnabled()) logger.trace("~> RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); try { try (WriteBatch batch = new WriteBatch(); WriteOptions writeOptions = new WriteOptions()) { for (Map.Entry<byte[], byte[]> entry : rows.entrySet()) { if (entry.getValue() == null) { batch.remove(entry.getKey()); } else { batch.put(entry.getKey(), entry.getValue()); } } db.write(writeOptions, batch); } if (logger.isTraceEnabled()) logger.trace("<~ RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); } catch (RocksDBException e) { logger.error("Error in batch update on db '{}'", name, e); hintOnTooManyOpenFiles(e); throw new RuntimeException(e); } } finally { resetDbLock.readLock().unlock(); } }
writeOpts = new WriteOptions(); writeBatch = new WriteBatch();
@Override public void putBatch(Map<String, Object> map) { WriteOptions writeOpts = null; WriteBatch writeBatch = null; try { writeOpts = new WriteOptions(); writeBatch = new WriteBatch(); for (Entry<String, Object> entry : map.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); byte[] data = serialize(value); if (StringUtils.isBlank(key) || data == null || data.length == 0) { continue; } byte[] keyByte = key.getBytes(); writeBatch.put(keyByte, data); } db.write(writeOpts, writeBatch); } catch (Exception e) { LOG.error("Failed to putBatch into DB, " + map.keySet(), e); } finally { if (writeOpts != null) { writeOpts.dispose(); } if (writeBatch != null) { writeBatch.dispose(); } } }
@Override public void put(String key, Object value) throws FailStoreException { String valueString = JSON.toJSONString(value); WriteOptions writeOpts = new WriteOptions(); try { writeOpts.setSync(true); writeOpts.setDisableWAL(true); db.put(writeOpts, key.getBytes("UTF-8"), valueString.getBytes("UTF-8")); } catch (Exception e) { throw new FailStoreException(e); } finally { writeOpts.dispose(); } }
@Override public void put(String key, Object value) throws FailStoreException { String valueString = JSON.toJSONString(value); WriteOptions writeOpts = new WriteOptions(); try { writeOpts.setSync(true); writeOpts.setDisableWAL(true); db.put(writeOpts, key.getBytes("UTF-8"), valueString.getBytes("UTF-8")); } catch (Exception e) { throw new FailStoreException(e); } finally { writeOpts.dispose(); } }
void deleteMetrics(FilterOptions filter) throws MetricException { try (WriteBatch writeBatch = new WriteBatch(); WriteOptions writeOps = new WriteOptions()) { scanRaw(filter, (RocksDbKey key, RocksDbValue value) -> { writeBatch.remove(key.getRaw()); return true; }); if (writeBatch.count() > 0) { LOG.info("Deleting {} metrics", writeBatch.count()); try { db.write(writeOps, writeBatch); } catch (Exception e) { String message = "Failed delete metrics"; LOG.error(message, e); if (this.failureMeter != null) { this.failureMeter.mark(); } throw new MetricException(message, e); } } } }
/** * Creates and initializes a rocks block store. * * @param args inode store arguments */ public RocksInodeStore(InodeStoreArgs args) { mConf = args.getConf(); mBaseDir = mConf.get(PropertyKey.MASTER_METASTORE_DIR); RocksDB.loadLibrary(); mDisableWAL = new WriteOptions().setDisableWAL(true); mReadPrefixSameAsStart = new ReadOptions().setPrefixSameAsStart(true); try { initDb(); } catch (RocksDBException e) { throw new RuntimeException(e); } }
WriteOptions writeOps = new WriteOptions()) {
this.kvStateInformation = new LinkedHashMap<>(); this.writeOptions = new WriteOptions().setDisableWAL(true);
public StdSession(HugeConfig conf) { this.closed = false; boolean bulkload = conf.get(RocksDBOptions.BULKLOAD_MODE); this.batch = new WriteBatch(); this.writeOptions = new WriteOptions(); this.writeOptions.setDisableWAL(bulkload); //this.writeOptions.setSync(false); }
private WriteOptions dataWriteOptions() { if (dataWriteOptions == null) dataWriteOptions = new WriteOptions().setDisableWAL(false); return dataWriteOptions; }
public RocksdbMap() { dbDir = Files.createTempDir(); try { this.db = RocksDB.open(dbDir.getAbsolutePath()); writeOptions = new WriteOptions(); writeOptions.setDisableWAL(true); writeOptions.setSync(false); } catch (RocksDBException e) { throw new RuntimeException(e); } }
/** * Creates the RocksDB WriteOptions to use. Since we use RocksDB as an in-process cache with disk spillover, * we do not care about the data being persisted to disk for recovery purposes. As such: * * Write-Ahead-Log is disabled (2.8x performance improvement) * * Sync is disabled - does not wait for a disk flush before returning from the write call (50x or more improvement). */ private WriteOptions createWriteOptions() { return new WriteOptions() .setDisableWAL(true) .setSync(false); }
/** * Return the WriteOptions that will be used during {@link #commit()}. * * @return the WriteOptions that will be used */ public WriteOptions getWriteOptions() { assert(isOwningHandle()); final WriteOptions writeOptions = new WriteOptions(getWriteOptions(nativeHandle_)); return writeOptions; }
public void flush() throws RocksDBException { if (options != null) { db.write(options, batch); } else { // use the default WriteOptions, if wasn't provided. try (WriteOptions writeOptions = new WriteOptions()) { db.write(writeOptions, batch); } } batch.clear(); }
public StdSession(HugeConfig conf) { this.closed = false; boolean bulkload = conf.get(RocksDBOptions.BULKLOAD_MODE); this.batch = new WriteBatch(); this.writeOptions = new WriteOptions(); this.writeOptions.setDisableWAL(bulkload); //this.writeOptions.setSync(false); }
private RocksDB db() { if (_db == null) { writeOptions = new WriteOptions(); writeOptions.setDisableWAL(true); writeOptions.setSync(false); readOptions = new ReadOptions(); readOptions.setFillCache(true).setVerifyChecksums(false); _db = _dbSupplier.get(); } return _db; }
private RocksDB db() { if (_db == null) { writeOptions = new WriteOptions(); writeOptions.setDisableWAL(true); writeOptions.setSync(false); readOptions = new ReadOptions(); readOptions.setFillCache(false).setVerifyChecksums(false); _db = _dbSupplier.get(); } return _db; }