public void put( @Nonnull ColumnFamilyHandle handle, @Nonnull byte[] key, @Nonnull byte[] value) throws RocksDBException { batch.put(handle, key, value); if (batch.count() == capacity) { flush(); } }
@Override public void addChild(Long parentId, String childName, Long childId) { try { mBatch.put(mEdgesColumn, RocksUtils.toByteArray(parentId, childName), Longs.toByteArray(childId)); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public void putBatch(Map<K, V> batch) throws IOException { try { WriteBatch writeBatch = new WriteBatch(); for (Map.Entry<K, V> entry : batch.entrySet()) { writeBatch.put( columnFamily, serializer.serialize(entry.getKey()), serializer.serialize(entry.getValue())); } rocksDb.write(new WriteOptions(), writeBatch); } catch (RocksDBException e) { throw new IOException(String.format("Failed to put batch=%s", batch), e); } }
private void processBatchInsert(TreeMap<RocksDbKey, RocksDbValue> batchMap) throws MetricException { try (WriteBatch writeBatch = new WriteBatch()) { // take the batched metric data and write to the database for (RocksDbKey k : batchMap.keySet()) { RocksDbValue v = batchMap.get(k); writeBatch.put(k.getRaw(), v.getRaw()); } store.db.write(writeOpts, writeBatch); } catch (Exception e) { String message = "Failed to store data to RocksDB"; LOG.error(message, e); throw new MetricException(message, e); } }
@Override public void updateBatch(Map<byte[], byte[]> rows) { resetDbLock.readLock().lock(); try { if (logger.isTraceEnabled()) logger.trace("~> RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); try { try (WriteBatch batch = new WriteBatch(); WriteOptions writeOptions = new WriteOptions()) { for (Map.Entry<byte[], byte[]> entry : rows.entrySet()) { if (entry.getValue() == null) { batch.remove(entry.getKey()); } else { batch.put(entry.getKey(), entry.getValue()); } } db.write(writeOptions, batch); } if (logger.isTraceEnabled()) logger.trace("<~ RocksDbDataSource.updateBatch(): " + name + ", " + rows.size()); } catch (RocksDBException e) { logger.error("Error in batch update on db '{}'", name, e); hintOnTooManyOpenFiles(e); throw new RuntimeException(e); } } finally { resetDbLock.readLock().unlock(); } }
@Override public void writeInode(MutableInode<?> inode) { try { mBatch.put(mInodesColumn, Longs.toByteArray(inode.getId()), inode.toProto().toByteArray()); } catch (RocksDBException e) { throw new RuntimeException(e); } }
writeBatch.put(putEntry.getValue(), keyByte, data);
@Override public void putBatch(Map<String, Object> map) { WriteOptions writeOpts = null; WriteBatch writeBatch = null; try { writeOpts = new WriteOptions(); writeBatch = new WriteBatch(); for (Entry<String, Object> entry : map.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); byte[] data = serialize(value); if (StringUtils.isBlank(key) || data == null || data.length == 0) { continue; } byte[] keyByte = key.getBytes(); writeBatch.put(keyByte, data); } db.write(writeOpts, writeBatch); } catch (Exception e) { LOG.error("Failed to putBatch into DB, " + map.keySet(), e); } finally { if (writeOpts != null) { writeOpts.dispose(); } if (writeBatch != null) { writeBatch.dispose(); } } }
/** * Add a KV record to a table */ @Override public void put(String table, byte[] key, byte[] value) { this.batch.put(cf(table), key, value); }
/** * Add a KV record to a table */ @Override public void put(String table, byte[] key, byte[] value) { this.batch.put(cf(table), key, value); }
@Override public void put(byte[] key, byte[] value) throws IOException { try { writeBatch.put(key, value); } catch (RocksDBException e) { throw new IOException("Failed to flush RocksDB batch", e); } }
public void put( @Nonnull ColumnFamilyHandle handle, @Nonnull byte[] key, @Nonnull byte[] value) throws RocksDBException { batch.put(handle, key, value); if (batch.count() == capacity) { flush(); } }
public void put( @Nonnull ColumnFamilyHandle handle, @Nonnull byte[] key, @Nonnull byte[] value) throws RocksDBException { batch.put(handle, key, value); if (batch.count() == capacity) { flush(); } }
@Override public void put(H hashKey, R rangeKey, V value) { try { writeBatch.put( Util.combine(hashKeySerde, rangeKeySerde, hashKey, rangeKey), valueSerde.toBytes(value)); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public void put(byte[] key, byte[] value) { key.getClass(); value.getClass(); Preconditions.checkState(!this.closed, "closed"); this.cursorTracker.poll(); if (this.writeBatch != null) { assert RocksDBUtil.isInitialized(this.writeBatch); synchronized (this.writeBatch) { this.writeBatch.put(key, value); } } else { assert RocksDBUtil.isInitialized(this.db); try { this.db.put(key, value); } catch (RocksDBException e) { throw new RuntimeException("RocksDB error", e); } } }
private void restoreAllInternal(final Collection<KeyValue<byte[], byte[]>> records) { try (final WriteBatch batch = new WriteBatch()) { for (final KeyValue<byte[], byte[]> record : records) { if (record.value == null) { batch.delete(record.key); } else { batch.put(record.key, record.value); } } write(batch); } catch (final RocksDBException e) { throw new ProcessorStateException("Error restoring batch to store " + name, e); } }
@Override public void putMetadataForTables(Map<TableReference, byte[]> tableRefToMetadata) { try (Disposer d = new Disposer(); ColumnFamily metadataTable = columnFamilies.get(METADATA_TABLE_NAME.getQualifiedName())) { WriteOptions options = d.register(new WriteOptions().setSync(true)); WriteBatch batch = d.register(new WriteBatch()); for (Entry<TableReference, byte[]> entry : tableRefToMetadata.entrySet()) { batch.put(metadataTable.getHandle(), entry.getKey().getQualifiedName().getBytes(Charsets.UTF_8), entry.getValue()); } getDb().write(options, batch); } catch (RocksDBException e) { throw Throwables.propagate(e); } }
@Override public void putAll(final List<KeyValue<Bytes, byte[]>> entries) { try (final WriteBatch batch = new WriteBatch()) { for (final KeyValue<Bytes, byte[]> entry : entries) { Objects.requireNonNull(entry.key, "key cannot be null"); if (entry.value == null) { batch.delete(entry.key.get()); } else { batch.put(entry.key.get(), entry.value); } } write(batch); } catch (final RocksDBException e) { throw new ProcessorStateException("Error while batch writing to store " + name, e); } }
@Override public void put(TableReference tableRef, Map<Cell, byte[]> values, long timestamp) { try (Disposer d = new Disposer(); ColumnFamily table = columnFamilies.get(tableRef.getQualifiedName())) { WriteOptions options = d.register(new WriteOptions().setSync(writeOptions.fsyncPut())); WriteBatch batch = d.register(new WriteBatch()); for (Entry<Cell, byte[]> entry : values.entrySet()) { byte[] key = RocksDbKeyValueServices.getKey(entry.getKey(), timestamp); batch.put(table.getHandle(), key, entry.getValue()); } getDb().write(options, batch); } catch (RocksDBException e) { throw Throwables.propagate(e); } }
@Override public void putWithTimestamps(TableReference tableRef, Multimap<Cell, Value> cellValues) { try (Disposer d = new Disposer(); ColumnFamily table = columnFamilies.get(tableRef.getQualifiedName())) { WriteOptions options = d.register(new WriteOptions().setSync(writeOptions.fsyncPut())); WriteBatch batch = d.register(new WriteBatch()); for (Entry<Cell, Value> entry : cellValues.entries()) { Value value = entry.getValue(); byte[] key = RocksDbKeyValueServices.getKey(entry.getKey(), value.getTimestamp()); batch.put(table.getHandle(), key, value.getContents()); } getDb().write(options, batch); } catch (RocksDBException e) { throw Throwables.propagate(e); } }