Refine search
@SuppressWarnings("unused") public void initDb(List<Integer> list, Options dbOptions) throws Exception { LOG.info("Begin to init rocksDB of {}", rootDir); try { //List<ColumnFamilyHandle> columnFamilyHandleList = new ArrayList<ColumnFamilyHandle>(); db = RocksDB.open(dbOptions, rootDir); LOG.info("Successfully init rocksDB of {}", rootDir); } finally { if (dbOptions != null) { dbOptions.dispose(); } } }
@Override public void cleanup() { LOG.info("Begin to close rocketDb of {}", rootDir); if (db != null) { db.close(); } LOG.info("Successfully closed rocketDb of {}", rootDir); }
@Override public boolean contains(UK userKey) throws IOException, RocksDBException { byte[] rawKeyBytes = serializeCurrentKeyWithGroupAndNamespacePlusUserKey(userKey, userKeySerializer); byte[] rawValueBytes = backend.db.get(columnFamily, rawKeyBytes); return (rawValueBytes != null); }
public static RocksDB create(Map conf, String rocksDbDir, int ttlTimeSec) throws IOException { Options options = getOptions(conf); try { RocksDB rocksDb = ttlTimeSec > 0 ? TtlDB.open(options, rocksDbDir, ttlTimeSec, false) : RocksDB.open(options, rocksDbDir); LOG.info("Finished loading RocksDB"); // enable compaction rocksDb.compactRange(); return rocksDb; } catch (RocksDBException e) { throw new IOException("Failed to initialize RocksDb.", e); } }
@Override public void put(byte[] key, byte[] val) { resetDbLock.readLock().lock(); try { if (logger.isTraceEnabled()) logger.trace("~> RocksDbDataSource.put(): " + name + ", key: " + toHexString(key) + ", " + (val == null ? "null" : val.length)); if (val != null) { db.put(key, val); } else { db.delete(key); } if (logger.isTraceEnabled()) logger.trace("<~ RocksDbDataSource.put(): " + name + ", key: " + toHexString(key) + ", " + (val == null ? "null" : val.length)); } catch (RocksDBException e) { logger.error("Failed to put into db '{}'", name, e); hintOnTooManyOpenFiles(e); throw new RuntimeException(e); } finally { resetDbLock.readLock().unlock(); } }
public static RocksDB createWithColumnFamily(Map conf, String rocksDbDir, final Map<String, ColumnFamilyHandle> columnFamilyHandleMap, int ttlTimeSec) throws IOException { List<ColumnFamilyDescriptor> columnFamilyDescriptors = getExistingColumnFamilyDesc(conf, rocksDbDir); List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(); DBOptions dbOptions = getDBOptions(conf); try { RocksDB rocksDb = ttlTimeSec > 0 ? TtlDB.open( dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles, getTtlValues(ttlTimeSec, columnFamilyDescriptors), false) : RocksDB.open(dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles); int n = Math.min(columnFamilyDescriptors.size(), columnFamilyHandles.size()); // skip default column columnFamilyHandleMap.put(DEFAULT_COLUMN_FAMILY, rocksDb.getDefaultColumnFamily()); for (int i = 1; i < n; i++) { ColumnFamilyDescriptor descriptor = columnFamilyDescriptors.get(i); columnFamilyHandleMap.put(new String(descriptor.columnFamilyName()), columnFamilyHandles.get(i)); } LOG.info("Finished loading RocksDB with existing column family={}, dbPath={}, ttlSec={}", columnFamilyHandleMap.keySet(), rocksDbDir, ttlTimeSec); // enable compaction rocksDb.compactRange(); return rocksDb; } catch (RocksDBException e) { throw new IOException("Failed to initialize RocksDb.", e); } }
@Override public Collection<K> listKeys() { Collection<K> keys = new ArrayList<>(); RocksIterator itr = rocksDb.newIterator(columnFamily); itr.seekToFirst(); while (itr.isValid()) { keys.add((K) serializer.deserialize(itr.key())); itr.next(); } return keys; }
@Override public List<BlockLocation> getLocations(long id) { RocksIterator iter = mDb.newIterator(mBlockLocationsColumn, new ReadOptions().setPrefixSameAsStart(true)); iter.seek(Longs.toByteArray(id)); List<BlockLocation> locations = new ArrayList<>(); for (; iter.isValid(); iter.next()) { try { locations.add(BlockLocation.parseFrom(iter.value())); } catch (Exception e) { throw new RuntimeException(e); } } return locations; }
RocksDB.loadLibrary(); boolean createIfMissing = ObjectReader.getBoolean(config.get(DaemonConfig.STORM_ROCKSDB_CREATE_IF_MISSING), false); db = RocksDB.open(options, path); } catch (RocksDBException e) { String message = "Error opening RockDB database";
@Override public void put(String key, Object value) { byte[] data = serialize(value); try { db.put(key.getBytes(), data); } catch (Exception e) { LOG.error("Failed to put key into cache, " + key, e); } }
@Override public boolean hasChildren(InodeDirectoryView inode) { RocksIterator iter = mDb.newIterator(mEdgesColumn, mReadPrefixSameAsStart); iter.seek(Longs.toByteArray(inode.getId())); return iter.isValid(); }
setCurrentNamespace(source); final byte[] sourceKey = serializeCurrentKeyWithGroupAndNamespace(); final byte[] valueBytes = backend.db.get(columnFamily, sourceKey); backend.db.delete(columnFamily, writeOptions, sourceKey); final byte[] targetValueBytes = backend.db.get(columnFamily, targetKey); backend.db.put(columnFamily, writeOptions, targetKey, dataOutputView.getCopyOfBuffer());
familyOptions.setNumLevels(levelNum); familyOptions.setLevelZeroFileNumCompactionTrigger(compactionTriggerNum); List<byte[]> families = RocksDB.listColumnFamilies(options, dbPath); List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>(); if (families != null) { db = RocksDB.open(dbOptions, dbPath, columnFamilyDescriptors, columnFamilyHandles); } else { db = RocksDB.open(options, dbPath); db.close();
public void build(ILookupTable srcLookupTable) { File dbFolder = new File(dbPath); if (dbFolder.exists()) { logger.info("remove rocksdb folder:{} to rebuild table cache:{}", dbPath, tableDesc.getIdentity()); FileUtils.deleteQuietly(dbFolder); } else { logger.info("create new rocksdb folder:{} for table cache:{}", dbPath, tableDesc.getIdentity()); dbFolder.mkdirs(); } logger.info("start to build lookup table:{} to rocks db:{}", tableDesc.getIdentity(), dbPath); try (RocksDB rocksDB = RocksDB.open(options, dbPath)) { // todo use batch may improve write performance for (String[] row : srcLookupTable) { KV kv = encoder.encode(row); rocksDB.put(kv.getKey(), kv.getValue()); } } catch (RocksDBException e) { logger.error("error when put data to rocksDB", e); throw new RuntimeException("error when write data to rocks db", e); } logger.info("source table:{} has been written to rocks db:{}", tableDesc.getIdentity(), dbPath); } }
handler2 = handlers.get(2); } else { handler1 = db.createColumnFamily(new ColumnFamilyDescriptor("test1".getBytes())); handler2 = db.createColumnFamily(new ColumnFamilyDescriptor("test2".getBytes())); db.compactRange(); LOG.info("Compaction!"); db.put(handler1, String.valueOf(i % 1000).getBytes(), String.valueOf(startValue1 + i).getBytes()); db.put(handler2, String.valueOf(i % 1000).getBytes(), String.valueOf(startValue2 + i).getBytes()); if (isFlush && flushWaitTime <= System.currentTimeMillis()) { db.flush(new FlushOptions()); if (isCheckpoint) { cp.createCheckpoint(cpPath + "/" + i);
@Override public void mergeNamespaces(N target, Collection<N> sources) { if (sources == null || sources.isEmpty()) { return; } try { // create the target full-binary-key setCurrentNamespace(target); final byte[] targetKey = serializeCurrentKeyWithGroupAndNamespace(); // merge the sources to the target for (N source : sources) { if (source != null) { setCurrentNamespace(source); final byte[] sourceKey = serializeCurrentKeyWithGroupAndNamespace(); byte[] valueBytes = backend.db.get(columnFamily, sourceKey); backend.db.delete(columnFamily, writeOptions, sourceKey); if (valueBytes != null) { backend.db.merge(columnFamily, writeOptions, targetKey, valueBytes); } } } } catch (Exception e) { throw new FlinkRuntimeException("Error while merging state in RocksDB", e); } }
@Override public Iterator<Block> iterator() { RocksIterator iter = mDb.newIterator(mBlockMetaColumn, new ReadOptions().setPrefixSameAsStart(true)); iter.seekToFirst(); return new Iterator<Block>() { @Override public boolean hasNext() { return iter.isValid(); } @Override public Block next() { try { return new Block(Longs.fromByteArray(iter.key()), BlockMeta.parseFrom(iter.value())); } catch (Exception e) { throw new RuntimeException(e); } finally { iter.next(); } } }; }
static RocksIteratorWrapper getRocksIterator( RocksDB db, ColumnFamilyHandle columnFamilyHandle) { return new RocksIteratorWrapper(db.newIterator(columnFamilyHandle)); }
@Override public Iterable<Long> getChildIds(Long inodeId) { RocksIterator iter = mDb.newIterator(mEdgesColumn, mReadPrefixSameAsStart); iter.seek(Longs.toByteArray(inodeId)); return () -> new Iterator<Long>() { @Override public boolean hasNext() { return iter.isValid(); } @Override public Long next() { try { return Longs.fromByteArray(iter.value()); } catch (Exception e) { throw new RuntimeException(e); } finally { iter.next(); } } }; }