public static RocksDB createWithColumnFamily(Map conf, String rocksDbDir, final Map<String, ColumnFamilyHandle> columnFamilyHandleMap, int ttlTimeSec) throws IOException { List<ColumnFamilyDescriptor> columnFamilyDescriptors = getExistingColumnFamilyDesc(conf, rocksDbDir); List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(); DBOptions dbOptions = getDBOptions(conf); try { RocksDB rocksDb = ttlTimeSec > 0 ? TtlDB.open( dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles, getTtlValues(ttlTimeSec, columnFamilyDescriptors), false) : RocksDB.open(dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles); int n = Math.min(columnFamilyDescriptors.size(), columnFamilyHandles.size()); // skip default column columnFamilyHandleMap.put(DEFAULT_COLUMN_FAMILY, rocksDb.getDefaultColumnFamily()); for (int i = 1; i < n; i++) { ColumnFamilyDescriptor descriptor = columnFamilyDescriptors.get(i); columnFamilyHandleMap.put(new String(descriptor.columnFamilyName()), columnFamilyHandles.get(i)); } LOG.info("Finished loading RocksDB with existing column family={}, dbPath={}, ttlSec={}", columnFamilyHandleMap.keySet(), rocksDbDir, ttlTimeSec); // enable compaction rocksDb.compactRange(); return rocksDb; } catch (RocksDBException e) { throw new IOException("Failed to initialize RocksDb.", e); } }
private StoreMetadataManagerImpl() { metadataStore = new RocksDBStore(DEFAULT, null /* dropping and creating default is not supported */, db.getDefaultColumnFamily(), db, stripeCount); }
/** * Provides Read-Your-Own-Writes like functionality by * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} * as a delta and baseIterator as a base. Operates on the default column * family. * * @param baseIterator The base iterator, * e.g. {@link org.rocksdb.RocksDB#newIterator()} * @return An iterator which shows a view comprised of both the database * point-in-timefrom baseIterator and modifications made in this write batch. */ public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) { return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), baseIterator); }
/** * ingestExternalFile will load a list of external SST files (1) into the DB * We will try to find the lowest possible level that the file can fit in, and * ingest the file into this level (2). A file that have a key range that * overlap with the memtable key range will require us to Flush the memtable * first before ingesting the file. * * (1) External SST files can be created using {@link SstFileWriter} * (2) We will try to ingest the files to the lowest possible level * even if the file compression doesn't match the level compression * * @param filePathList The list of files to ingest * @param ingestExternalFileOptions the options for the ingestion * * @throws RocksDBException thrown if error happens in underlying * native library. */ public void ingestExternalFile(final List<String> filePathList, final IngestExternalFileOptions ingestExternalFileOptions) throws RocksDBException { ingestExternalFile(nativeHandle_, getDefaultColumnFamily().nativeHandle_, filePathList.toArray(new String[filePathList.size()]), filePathList.size(), ingestExternalFileOptions.nativeHandle_); }
/** * Gets the handle for the default column family * * @return The handle of the default column family */ public ColumnFamilyHandle getDefaultColumnFamily() { final ColumnFamilyHandle cfHandle = new ColumnFamilyHandle(this, getDefaultColumnFamily(nativeHandle_)); cfHandle.disOwnNativeHandle(); return cfHandle; }
/** * Test that {@code ByteStoreManager#start()} waits until RocksDB lock * is released */ @Test public void testConcurrentOpenSleep() throws Exception { String dbPath = temporaryFolder.newFolder().getAbsolutePath(); try(ByteStoreManager bsm = new ByteStoreManager(dbPath, false)) { TestThread tt = new TestThread(bsm); try(RocksDB db = RocksDB.open(new File(dbPath, ByteStoreManager.CATALOG_STORE_NAME).getAbsolutePath()); ColumnFamilyHandle handle = db.getDefaultColumnFamily()) { tt.start(); tt.ready.countDown(); // Wait for multiple attempts TimeUnit.MILLISECONDS.sleep(300); // Lock should still be in place assertEquals(1, tt.started.getCount()); assertFalse("RocksDB lock didn't work", tt.result.get()); } // RocksDB is now closed, lock should be freed tt.started.await(); assertTrue("RocksDB lock not released properly", tt.result.get()); } }
} finally { ColumnFamilyHandle handle = rocksDBResource.get().getDefaultColumnFamily(); store = new RocksDBStore("test", new ColumnFamilyDescriptor("test".getBytes(UTF_8)), handle, rocksDBResource.get(), 4);
void replaySince(final long transactionNumber, ReplayHandler replayHandler) { try (ReplayHandlerAdapter handler = new ReplayHandlerAdapter(db.getDefaultColumnFamily().getID(), replayHandler, handleIdToNameMap); TransactionLogIterator iterator = db.getUpdatesSince(transactionNumber)) { while (iterator.isValid()) { iterator.status(); final TransactionLogIterator.BatchResult result = iterator.getBatch(); // requires isValid and status check LOGGER.debug("Requested sequence number: {}, iterator sequence number: {}", transactionNumber, result.sequenceNumber()); result.writeBatch() .iterate(handler); if (!iterator.isValid()) { break; } iterator.next(); // requires isValid } for (String updatedStore : handler.getUpdatedStores()) { final long latestTransactionNumber = metadataManager.getLatestTransactionNumber(); metadataManager.setLatestTransactionNumber(updatedStore, latestTransactionNumber, latestTransactionNumber); } } catch (RocksDBException e) { throw new DatastoreException(e); } }
@Before public void setUpStore() { ColumnFamilyHandle handle = rocksDBResource.get().getDefaultColumnFamily(); store = new RocksDBStore("test", new ColumnFamilyDescriptor("test".getBytes(UTF_8)), handle, rocksDBResource.get(), 4); // Making sure test is repeatable Random random = new Random(42); for(int i = 0; i < 1 << 16; i++ ) { store.put(newRandomValue(random), newRandomValue(random)); } store.put(specialKey, newRandomValue(random)); }