public static RocksDB create(Map conf, String rocksDbDir, int ttlTimeSec) throws IOException { Options options = getOptions(conf); try { RocksDB rocksDb = ttlTimeSec > 0 ? TtlDB.open(options, rocksDbDir, ttlTimeSec, false) : RocksDB.open(options, rocksDbDir); LOG.info("Finished loading RocksDB"); // enable compaction rocksDb.compactRange(); return rocksDb; } catch (RocksDBException e) { throw new IOException("Failed to initialize RocksDb.", e); } }
public static RocksDB createWithColumnFamily(Map conf, String rocksDbDir, final Map<String, ColumnFamilyHandle> columnFamilyHandleMap, int ttlTimeSec) throws IOException { List<ColumnFamilyDescriptor> columnFamilyDescriptors = getExistingColumnFamilyDesc(conf, rocksDbDir); List<ColumnFamilyHandle> columnFamilyHandles = new ArrayList<>(); DBOptions dbOptions = getDBOptions(conf); try { RocksDB rocksDb = ttlTimeSec > 0 ? TtlDB.open( dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles, getTtlValues(ttlTimeSec, columnFamilyDescriptors), false) : RocksDB.open(dbOptions, rocksDbDir, columnFamilyDescriptors, columnFamilyHandles); int n = Math.min(columnFamilyDescriptors.size(), columnFamilyHandles.size()); // skip default column columnFamilyHandleMap.put(DEFAULT_COLUMN_FAMILY, rocksDb.getDefaultColumnFamily()); for (int i = 1; i < n; i++) { ColumnFamilyDescriptor descriptor = columnFamilyDescriptors.get(i); columnFamilyHandleMap.put(new String(descriptor.columnFamilyName()), columnFamilyHandles.get(i)); } LOG.info("Finished loading RocksDB with existing column family={}, dbPath={}, ttlSec={}", columnFamilyHandleMap.keySet(), rocksDbDir, ttlTimeSec); // enable compaction rocksDb.compactRange(); return rocksDb; } catch (RocksDBException e) { throw new IOException("Failed to initialize RocksDb.", e); } }
db.compactRange(); LOG.info("Compaction!");
private void compact() throws RocksDBException { db.compactRange(handle); }
/** * <p>Range compaction of column family.</p> * <p><strong>Note</strong>: After the database has been compacted, * all data will have been pushed down to the last level containing * any data.</p> * * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance. * @param begin start of key range (included in range) * @param end end of key range (excluded from range) * @param compactRangeOptions options for the compaction * * @throws RocksDBException thrown if an error occurs within the native * part of the library. */ public void compactRange(final ColumnFamilyHandle columnFamilyHandle, final byte[] begin, final byte[] end, CompactRangeOptions compactRangeOptions) throws RocksDBException { compactRange(nativeHandle_, begin, begin.length, end, end.length, compactRangeOptions.nativeHandle_, columnFamilyHandle.nativeHandle_); }
public void flush() { try { db.compactRange(); } catch (final RocksDBException e) { LOGGER.warn("Unable to compact metadata range", e); } }
@Override public void commitCompact(boolean force) throws IOException { try { for (RocksDB db : dbs) db.compactRange(); } catch (RocksDBException e) { throw new IOException(e); } }
compactRange(nativeHandle_, begin, begin.length, end, end.length, false, -1, 0, columnFamilyHandle.nativeHandle_);
compactRange(nativeHandle_, false, -1, 0, columnFamilyHandle.nativeHandle_);
/** * See {@link RocksDB#compactRange()}. * * @throws RocksDbException */ public void compactRange() throws RocksDbException { try { rocksDb.compactRange(); } catch (Exception e) { throw e instanceof RocksDbException ? (RocksDbException) e : new RocksDbException(e); } }
final boolean reduce_level, final int target_level, final int target_path_id) throws RocksDBException { compactRange(nativeHandle_, reduce_level, target_level, target_path_id, columnFamilyHandle.nativeHandle_);
final int target_level, final int target_path_id) throws RocksDBException { compactRange(nativeHandle_, begin, begin.length, end, end.length, reduce_level, target_level, target_path_id, columnFamilyHandle.nativeHandle_);
@Override public void run() { try { this.dbs.compactRange(); SDFSLogger.getLog().info("compaction done"); } catch (RocksDBException e) { SDFSLogger.getLog().warn("unable to compact range", e); } }
@Override public void forceCompaction(String tableName) { try (ColumnFamily cf = cfs.get(tableName)) { db.compactRange(cf.getHandle()); } catch (RocksDBException e) { throw Throwables.propagate(e); } }
@SuppressFBWarnings( justification = "The null check outside of the synchronized block is intentional to minimize the need for synchronization.") public void flush() { // TODO flush batch writes final RocksDB db = getWriteDb(); try { db.compactRange(); } catch (final RocksDBException e) { LOGGER.warn("Unable to compact range", e); } // force re-opening a reader to catch the updates from this write if (readerDirty && (readDb != null)) { synchronized (this) { if (readDb != null) { readDb.close(); readDb = null; } } } }
void toggleDbForBulkLoading(final boolean prepareForBulkload) { if (prepareForBulkload) { // if the store is not empty, we need to compact to get around the num.levels check // for bulk loading final String[] sstFileNames = dbDir.list((dir, name) -> SST_FILE_EXTENSION.matcher(name).matches()); if (sstFileNames != null && sstFileNames.length > 0) { try { db.compactRange(true, 1, 0); } catch (final RocksDBException e) { throw new ProcessorStateException("Error while range compacting during restoring store " + name, e); } } } close(); this.prepareForBulkload = prepareForBulkload; openDB(internalProcessorContext); }
for (RocksDB db : dbs) { SDFSLogger.getLog().info("compacting rocksdb " + i); db.compactRange(); i++;