public RocksDBLookupTable(TableDesc tableDesc, String[] keyColumns, String dbPath) { this.options = new Options(); this.rowEncoder = new RocksDBLookupRowEncoder(tableDesc, keyColumns); try { this.rocksDB = RocksDB.openReadOnly(options, dbPath); } catch (RocksDBException e) { throw new IllegalStateException("cannot open rocks db in path:" + dbPath, e); } }
public void initDb(List<Integer> list) throws Exception { Options dbOptions = new Options().setCreateMissingColumnFamilies(true).setCreateIfMissing(true); initDb(list, dbOptions); }
boolean createIfMissing = ObjectReader.getBoolean(config.get(DaemonConfig.STORM_ROCKSDB_CREATE_IF_MISSING), false); try (Options options = new Options().setCreateIfMissing(createIfMissing)) {
public RocksDbCacheOperator(TopologyContext context, String cacheDir) { this.stormConf = context.getStormConf(); this.maxFlushSize = ConfigExtension.getTransactionCacheBatchFlushSize(stormConf); Options rocksDbOpt = new Options(); rocksDbOpt.setCreateMissingColumnFamilies(true).setCreateIfMissing(true); long bufferSize = ConfigExtension.getTransactionCacheBlockSize(stormConf) != null ? ConfigExtension.getTransactionCacheBlockSize(stormConf) : (1 * SizeUnit.GB); rocksDbOpt.setWriteBufferSize(bufferSize); int maxBufferNum = ConfigExtension.getTransactionMaxCacheBlockNum(stormConf) != null ? ConfigExtension.getTransactionMaxCacheBlockNum(stormConf) : 3; rocksDbOpt.setMaxWriteBufferNumber(maxBufferNum); // Config for log of RocksDb rocksDbOpt.setMaxLogFileSize(1073741824); // 1G rocksDbOpt.setKeepLogFileNum(1); rocksDbOpt.setInfoLogLevel(InfoLogLevel.WARN_LEVEL); try { Map<Object, Object> conf = new HashMap<Object, Object>(); conf.put(ROCKSDB_ROOT_DIR, cacheDir); conf.put(ROCKSDB_RESET, true); initDir(conf); initDb(null, rocksDbOpt); } catch (Exception e) { throw new RuntimeException(e); } kryo = new Kryo(); output = new Output(200, 2000000000); input = new Input(1); LOG.info("Finished rocksDb cache init: maxFlushSize={}, bufferSize={}, maxBufferNum={}", maxFlushSize, bufferSize, maxBufferNum); }
final Options options = new Options() .optimizeLevelStyleCompaction() .setCreateIfMissing(true)
Options options = new Options(); options.setCreateMissingColumnFamilies(true); options.setCreateIfMissing(true);
public RocksDBLookupBuilder(TableDesc tableDesc, String[] keyColumns, String dbPath) { this.tableDesc = tableDesc; this.encoder = new RocksDBLookupRowEncoder(tableDesc, keyColumns); this.dbPath = dbPath; this.writeBatchSize = 500; this.options = new Options(); options.setCreateIfMissing(true).setWriteBufferSize(8 * SizeUnit.KB).setMaxWriteBufferNumber(3) .setMaxBackgroundCompactions(5).setCompressionType(CompressionType.SNAPPY_COMPRESSION) .setCompactionStyle(CompactionStyle.UNIVERSAL); }
try (Options options = new Options()) {
public static Set<String> listCFs(String path) throws RocksDBException { Set<String> cfs = new HashSet<>(); List<byte[]> oldCFs = RocksDB.listColumnFamilies(new Options(), path); if (oldCFs.isEmpty()) { cfs.add("default"); } else { for (byte[] oldCF : oldCFs) { cfs.add(decode(oldCF)); } } return cfs; }
@Override protected void init() throws FailStoreException { try { options = new Options(); options.setCreateIfMissing(true) .setWriteBufferSize(8 * SizeUnit.KB) .setMaxWriteBufferNumber(3) .setMaxBackgroundCompactions(10) .setCompressionType(CompressionType.SNAPPY_COMPRESSION) .setCompactionStyle(CompactionStyle.UNIVERSAL); Filter bloomFilter = new BloomFilter(10); BlockBasedTableConfig tableConfig = new BlockBasedTableConfig(); tableConfig.setBlockCacheSize(64 * SizeUnit.KB) .setFilter(bloomFilter) .setCacheNumShardBits(6) .setBlockSizeDeviation(5) .setBlockRestartInterval(10) .setCacheIndexAndFilterBlocks(true) .setHashIndexAllowCollision(false) .setBlockCacheCompressedSize(64 * SizeUnit.KB) .setBlockCacheCompressedNumShardBits(10); options.setTableFormatConfig(tableConfig); } catch (Exception e) { throw new FailStoreException(e); } }
@Override protected void init() throws FailStoreException { try { options = new Options(); options.setCreateIfMissing(true) .setWriteBufferSize(8 * SizeUnit.KB) .setMaxWriteBufferNumber(3) .setMaxBackgroundCompactions(10) .setCompressionType(CompressionType.SNAPPY_COMPRESSION) .setCompactionStyle(CompactionStyle.UNIVERSAL); Filter bloomFilter = new BloomFilter(10); BlockBasedTableConfig tableConfig = new BlockBasedTableConfig(); tableConfig.setBlockCacheSize(64 * SizeUnit.KB) .setFilter(bloomFilter) .setCacheNumShardBits(6) .setBlockSizeDeviation(5) .setBlockRestartInterval(10) .setCacheIndexAndFilterBlocks(true) .setHashIndexAllowCollision(false) .setBlockCacheCompressedSize(64 * SizeUnit.KB) .setBlockCacheCompressedNumShardBits(10); options.setTableFormatConfig(tableConfig); } catch (Exception e) { throw new FailStoreException(e); } }
@Override public Options createOptions(Options currentOptions) { if (currentOptions == null) currentOptions = new Options(); currentOptions.setCreateIfMissing(true); currentOptions.setCreateMissingColumnFamilies(true);
@Override public void createTable(String table) throws RocksDBException { EnvOptions env = new EnvOptions(); Options options = new Options(); RocksDBStdSessions.initOptions(this.conf, options, options, options); // NOTE: unset merge op due to SIGSEGV when cf.setMergeOperatorName() options.setMergeOperatorName("not-exist-merge-op"); SstFileWriter sst = new SstFileWriter(env, options); Path path = Paths.get(this.dataPath, table); sst.open(path.toString() + ".sst"); this.tables.put(table, sst); }
public RocksDBStdSessions(HugeConfig config, String dataPath, String walPath, String database, String store) throws RocksDBException { super(database, store); this.conf = config; // Init options Options options = new Options(); RocksDBStdSessions.initOptions(this.conf, options, options, options); options.setWalDir(walPath); /* * Open RocksDB at the first time * Don't merge old CFs, we expect a clear DB when using this one */ this.rocksdb = RocksDB.open(options, dataPath); }
public static Set<String> listCFs(String path) throws RocksDBException { Set<String> cfs = new HashSet<>(); List<byte[]> oldCFs = RocksDB.listColumnFamilies(new Options(), path); if (oldCFs.isEmpty()) { cfs.add("default"); } else { for (byte[] oldCF : oldCFs) { cfs.add(decode(oldCF)); } } return cfs; }
RocksDBNormalizerModel(Path dbPath) { RocksDB.loadLibrary(); try (Options options = new Options().setInfoLogLevel(InfoLogLevel.ERROR_LEVEL)) { db = RocksDB.openReadOnly(options, dbPath.toString()); } catch (RocksDBException e) { throw new RuntimeException(e); } }
@Override public SuffixDataStore createSuffixDataStore(int id) { RocksDB.loadLibrary(); try (Options options = new Options().setCreateIfMissing(true).prepareForBulkLoad()) { Files.createDirectories(dbPath); RocksDB rocksDB = RocksDB.open(options, dbPath.resolve(getSuffixesName(id)).toString()); rocksDBS.add(rocksDB); return new RocksDbSuffixDataStore(rocksDB); } catch (RocksDBException | IOException e) { throw new RuntimeException(e); } }
@BeforeClass static public void createRocksDb() throws IOException, RocksDBException { if (Files.exists(dirPath)) { removeRecursiveDirectory(dirPath); } Files.createDirectories(dirPath); Options options = new Options().setCreateIfMissing(true); db = RocksDB.open(options, dirPath.toString()); db.put("testString".getBytes(), "this is string".getBytes()); db.put(ByteBuffer.allocate(4).putInt(123).array(), ByteBuffer.allocate(4).putInt(456).array()); }
public DBSharder(String dbpath, Long timestamp) throws RocksDBException { log = LoggerFactory.getLogger(DBSharder.class); RocksDB.loadLibrary(); options = new Options(); env = options.getEnv(); env.setBackgroundThreads(2); options.setEnv(env); options.setCreateIfMissing(true); options.setDbLogDir(dbpath + "/logs/"); options.setMergeOperator(new StringAppendOperator()); db = RocksDB.open(options, dbpath + "/" + timestamp); }
public synchronized RocksDBMetadataTable getMetadataTable(final MetadataType type) { if (metadataOptions == null) { RocksDB.loadLibrary(); metadataOptions = new Options().setCreateIfMissing(true).optimizeForSmallDb(); } final String directory = subDirectory + "/" + type.name(); return metadataTableCache.get( keyCache.get(directory, d -> new CacheKey(d, type.equals(MetadataType.STATS)))); }