@Override public void commit() { try { mDb.write(mDisableWAL, mBatch); } catch (RocksDBException e) { throw new RuntimeException(e); } } }
private void processBatchInsert(TreeMap<RocksDbKey, RocksDbValue> batchMap) throws MetricException { try (WriteBatch writeBatch = new WriteBatch()) { // take the batched metric data and write to the database for (RocksDbKey k : batchMap.keySet()) { RocksDbValue v = batchMap.get(k); writeBatch.put(k.getRaw(), v.getRaw()); } store.db.write(writeOpts, writeBatch); } catch (Exception e) { String message = "Failed to store data to RocksDB"; LOG.error(message, e); throw new MetricException(message, e); } }
@Override public void putBatch(Map<K, V> batch) throws IOException { try { WriteBatch writeBatch = new WriteBatch(); for (Map.Entry<K, V> entry : batch.entrySet()) { writeBatch.put( columnFamily, serializer.serialize(entry.getKey()), serializer.serialize(entry.getValue())); } rocksDb.write(new WriteOptions(), writeBatch); } catch (RocksDBException e) { throw new IOException(String.format("Failed to put batch=%s", batch), e); } }
@Override public void updateBatch(Map<byte[], byte[]> rows) { resetDbLock.readLock().lock(); try { if (logger.isTraceEnabled()) logger.trace("~> RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); try { try (WriteBatch batch = new WriteBatch(); WriteOptions writeOptions = new WriteOptions()) { for (Map.Entry<byte[], byte[]> entry : rows.entrySet()) { if (entry.getValue() == null) { batch.remove(entry.getKey()); } else { batch.put(entry.getKey(), entry.getValue()); } } db.write(writeOptions, batch); } if (logger.isTraceEnabled()) logger.trace("<~ RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); } catch (RocksDBException e) { logger.error("Error in batch update on db '{}'", name, e); hintOnTooManyOpenFiles(e); throw new RuntimeException(e); } } finally { resetDbLock.readLock().unlock(); } }
@Override public void putBatch(Map<String, Object> map) { WriteOptions writeOpts = null; WriteBatch writeBatch = null; try { writeOpts = new WriteOptions(); writeBatch = new WriteBatch(); for (Entry<String, Object> entry : map.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); byte[] data = serialize(value); if (StringUtils.isBlank(key) || data == null || data.length == 0) { continue; } byte[] keyByte = key.getBytes(); writeBatch.put(keyByte, data); } db.write(writeOpts, writeBatch); } catch (Exception e) { LOG.error("Failed to putBatch into DB, " + map.keySet(), e); } finally { if (writeOpts != null) { writeOpts.dispose(); } if (writeBatch != null) { writeBatch.dispose(); } } }
void deleteMetrics(FilterOptions filter) throws MetricException { try (WriteBatch writeBatch = new WriteBatch(); WriteOptions writeOps = new WriteOptions()) { scanRaw(filter, (RocksDbKey key, RocksDbValue value) -> { writeBatch.remove(key.getRaw()); return true; }); if (writeBatch.count() > 0) { LOG.info("Deleting {} metrics", writeBatch.count()); try { db.write(writeOps, writeBatch); } catch (Exception e) { String message = "Failed delete metrics"; LOG.error(message, e); if (this.failureMeter != null) { this.failureMeter.mark(); } throw new MetricException(message, e); } } } }
@Override public void clear() { try { try (RocksIteratorWrapper iterator = RocksDBKeyedStateBackend.getRocksIterator(backend.db, columnFamily); WriteBatch writeBatch = new WriteBatch(128)) { final byte[] keyPrefixBytes = serializeCurrentKeyWithGroupAndNamespace(); iterator.seek(keyPrefixBytes); while (iterator.isValid()) { byte[] keyBytes = iterator.key(); if (startWithKeyPrefix(keyPrefixBytes, keyBytes)) { writeBatch.remove(columnFamily, keyBytes); } else { break; } iterator.next(); } backend.db.write(writeOptions, writeBatch); } } catch (Exception e) { LOG.warn("Error while cleaning the state.", e); } }
LOG.info("Deleting {} metadata strings", writeBatch.count()); try { db.write(writeOps, writeBatch); } catch (Exception e) { String message = "Failed delete metadata strings";
/** * Commit all updates(put/delete) to DB */ @Override public Integer commit() { int count = this.batch.count(); if (count <= 0) { return 0; } try { rocksdb().write(this.writeOptions, this.batch); } catch (RocksDBException e) { //this.batch.rollbackToSavePoint(); throw new BackendException(e); } // Clear batch if write() successfully (retained if failed) this.batch.clear(); return count; }
void write(final WriteBatch batch) throws RocksDBException { db.write(wOptions, batch); }
@Override public void flush() throws IOException { try { db.write(optionSync, writeBatch); } catch (RocksDBException e) { throw new IOException("Failed to flush RocksDB batch", e); } } }
@Override public void sync() throws IOException { try { db.write(optionSync, emptyBatch); } catch (RocksDBException e) { throw new IOException(e); } }
private void executeBatch(WriteBatch batch) { try { db.write(writeOpts, batch); } catch (RocksDBException e) { throw new StateStoreRuntimeException("Error while executing a multi operation from state store " + name, e); } }
@Override public void flush() { try { db.write(writeOptions, writeBatch); } catch (RocksDBException e) { throw new DbException(e); } }
/** * See {@link RocksDB#write(WriteOptions, WriteBatch)}. * * @param writeOptions * @param batch * @throws RocksDbException */ public void write(WriteOptions writeOptions, WriteBatch batch) throws RocksDbException { try { rocksDb.write(writeOptions != null ? writeOptions : this.writeOptions, batch); } catch (Exception e) { throw e instanceof RocksDbException ? (RocksDbException) e : new RocksDbException(e); } }
private void flush() { lock.writeLock().lock(); try { if (batch.count() >= WRITE_THRESHOLD) { db().write(writeOptions, batch); batch.clear(); } } catch (RocksDBException e) { throw new RuntimeException(e); } finally { lock.writeLock().unlock(); } }
@Override public boolean put(ObjectId commitId, ImmutableList<ObjectId> parentIds) { try (WriteBatchWithIndex batch = new WriteBatchWithIndex(); // RocksDBReference dbRef = dbhandle.getReference(); WriteOptions wo = new WriteOptions()) { wo.setSync(true); boolean updated = put(dbRef, commitId, parentIds, batch); dbRef.db().write(wo, batch); return updated; } catch (RocksDBException e) { throw new RuntimeException(e); } }