@Override protected void init() throws FailStoreException { try { String dbName = dbPath.getPath() + "/lts.db"; db = DBMaker.fileDB(new File(dbName)) .closeOnJvmShutdown() .encryptionEnable("lts") .make(); } catch (Exception e) { throw new FailStoreException(e); } }
db = DBMaker .fileDB(cacheFile) .transactionEnable() .fileLockWait(FILE_LOCK_WAIT) .make(); } catch (Exception e) { LOG.error("Could not create cache file (" + cacheFile + "): " + e.getMessage()); db = DBMaker.memoryDB().make(); db = DBMaker.memoryDB().make();
Assert.assertNotNull(user); final SharedContext sharedContext = SharedContext.create(new File("/tmp"), DBMaker.memoryDB().make(), r -> new Thread(r), Collections.emptyList()); sharedContext.add(WebServer.class, new WebServer(sharedContext, jetty, new WebServerConfig(), new EncryptionFactoryAes("secret"))); sharedContext.add(LfsServer.class, new LfsServer("t0ken", 0, 0));
.fileMmapEnableIfSupported() .checksumHeaderBypass() .make()) { final GitConverter converter = new GitConverter(cache, dstPath, globs); dstRepo.create(true);
switch (storageType) { case ONHEAP: db = DBMaker.heapDB().closeOnJvmShutdown().make(); LOG.info("Create ONHEAP mapdb"); break; case MEMORY: db = DBMaker.memoryDB().closeOnJvmShutdown().make(); LOG.info("Create MEMORY mapdb"); break; case DIRECT_MEMORY: db = DBMaker.memoryDirectDB().closeOnJvmShutdown().make(); LOG.info("Create DIRECT_MEMORY mapdb"); break; file.deleteOnExit(); Preconditions.checkNotNull(file, "file is null"); db = DBMaker.fileDB(file).deleteFilesAfterClose().make(); LOG.info("Created FILE_RAF map file at {}", file.getAbsolutePath()); } catch (IOException e) {
private void loadEmbedding(File mapPath, int dimension) throws FileNotFoundException { DB db = DBMaker.memoryDB().make(); vectors = db.hashMap("some_other_map", Serializer.STRING, Serializer.DOUBLE_ARRAY).create(); this.dimension = dimension;
.fileDB(tempFile) .fileMmapEnableIfSupported() .fileMmapPreclearDisable() .cleanerHackEnable() .closeOnJvmShutdown() .closeOnJvmShutdownWeakReference() .fileChannelEnable() .make(); } else { this.db = DBMaker .heapDB() .allocateStartSize(Integer.parseInt(PropertyManager.getInstance().getProperties("luzzu.properties").getProperty("CACHE_SIZE_IN_GB"))) .allocateIncrement(512) .make();
db = DBMaker.memoryDB().make(); } else { db = DBMaker.fileDB(f).closeOnJvmShutdown().transactionEnable() //Default to Write Ahead Log - lower performance, but has crash protection .make();
public HistoDataSource(HistoDbConfiguration config, String name, String prefix, String postfix) throws IOException { this.config = Objects.requireNonNull(config); this.name = Objects.requireNonNull(name); this.prefix = Objects.requireNonNull(prefix); this.postfix = Objects.requireNonNull(postfix); if (config.getMapDb().isPersistent()) { this.storeDir = Files.createDirectories(Paths.get(config.getMapDb().getBasedir(), name, prefix, postfix)); File store = new File(storeDir.toFile(), "mapdb"); DBMaker.Maker maker = DBMaker.fileDB(store); maker.fileMmapEnableIfSupported().fileMmapPreclearDisable() .fileChannelEnable() .closeOnJvmShutdown(); this.db = maker.make(); String net = (String) db.atomicString("referenceNetwotk").createOrOpen().get(); if (net != null) { this.referenceNetwork = Importers.loadNetwork(Paths.get(storeDir.toString(), net), LocalComputationManager.getDefault(), new ImportConfig(), (Properties) null); } } else { this.storeDir = null; DBMaker.Maker maker = DBMaker.memoryDB(); this.db = maker.make(); } }
public MapDBChronoDB(final ChronoDBConfiguration configuration) { super(configuration); this.db = DBMaker.fileDB(configuration.getWorkingFile()).make(); // remember the directory we are working in this.branchManager = new MapDBBranchManager(this); // we use the in-memory solution here; when we allow serialization // customization and configuration, we must use a solution that persists // the configured version in the database. this.serializationManager = new InMemorySerializationManager(); this.indexManager = new DocumentBasedIndexManager(this, new MapDBIndexManagerBackend(this)); this.queryManager = new StandardQueryManager(this); this.maintenanceManager = new MapDBMaintenanceManager(this); this.cache = ChronoDBCache.createCacheForConfiguration(configuration); this.initializeShutdownHook(); // perform the initial commit (primarily contains setup of empty B-Trees) this.db.commit(); }
/** * Create the storage map * * @return */ @Override public Map<Integer, Long> createUpdatedMap() { if (storageFile == null) { //In-Memory Stats Storage db = DBMaker.memoryDB().make(); } else { db = DBMaker.fileDB(storageFile).closeOnJvmShutdown().transactionEnable() //Default to Write Ahead Log - lower performance, but has crash protection .make(); } updated = db.hashMap("updated").keySerializer(Serializer.INTEGER).valueSerializer(Serializer.LONG) .createOrOpen(); return updated; }
/** * Create the storage map * * @return */ @Override public Map<Integer, Long> createUpdatedMap() { if (storageFile == null) { //In-Memory Stats Storage db = DBMaker.memoryDB().make(); } else { db = DBMaker.fileDB(storageFile).closeOnJvmShutdown().transactionEnable() //Default to Write Ahead Log - lower performance, but has crash protection .make(); } updated = db.hashMap("updated").keySerializer(Serializer.INTEGER).valueSerializer(Serializer.LONG) .createOrOpen(); return updated; }
/** * Creates new off-heap map for HZ, called from instrumented code */ public static ConcurrentMap defaultRecordStoreRecords(){ if(!logged.getAndSet(true)) { LOG.info("mapdb-hz-offheap: MapDB HashMap instantiated. It works!"); } return DBMaker .memoryDirectDB() .transactionDisable() .make() .hashMapCreate("recods") .keySerializer(new MapDBDataSerializer()) .valueSerializer(new MapDBDataRecordSerializer()) .counterEnable() .make(); }
/** * Create the storage map * * @return */ @Override public Map<Integer, Long> createUpdatedMap() { if (storageFile == null) { //In-Memory Stats Storage db = DBMaker.memoryDB().make(); } else { db = DBMaker.fileDB(storageFile).closeOnJvmShutdown().transactionEnable() //Default to Write Ahead Log - lower performance, but has crash protection .make(); } updated = db.hashMap("updated").keySerializer(Serializer.INTEGER).valueSerializer(Serializer.LONG) .createOrOpen(); return updated; }
@Override public PersistenceBackend createPersistentBackend(File directory, Map<?, ?> options) throws InvalidDataStoreException { MapDbPersistenceBackend backend; File dbFile = FileUtils.getFile(MapDbURI.createURI(URI.createFileURI(directory.getAbsolutePath()).appendSegment("neoemf.mapdb")).toFileString()); if (!dbFile.getParentFile().exists()) { try { Files.createDirectories(dbFile.getParentFile().toPath()); } catch (IOException e) { NeoLogger.error(e); } } DB db = DBMaker.fileDB(dbFile).fileMmapEnableIfSupported().make(); backend = new MapDbPersistenceBackend(db); processGlobalConfiguration(directory); return backend; }
@Test public void testOnHeapDB() { DB db = DBMaker.heapDB().make(); BTreeMap<Long, String> map = db.treeMap("btree").keySerializer(Serializer.LONG).valueSerializer(Serializer.STRING).create(); Assert.assertFalse(map.putIfAbsentBoolean(1L, "val_1")); Assert.assertTrue(map.putIfAbsentBoolean(1L, "val_2")); Assert.assertTrue(map.putIfAbsentBoolean(1L, "val_3")); Assert.assertFalse(map.putIfAbsentBoolean(2L, "val_4")); Assert.assertEquals("val_1", map.get(1L)); Assert.assertEquals("val_4", map.get(2L)); Assert.assertTrue(map.replace(2L, "val_4", "val_5")); Assert.assertEquals("val_5", map.get(2L)); map.close(); db.close(); } }
@NotNull @Override public DB createCache(@NotNull File basePath) { final File cacheBase = ConfigHelper.joinPath(basePath, path); //noinspection ResultOfMethodCallIgnored cacheBase.getParentFile().mkdirs(); try { return DBMaker.fileDB(cacheBase) .closeOnJvmShutdown() .fileMmapEnableIfSupported() .make(); } catch (DBException e) { throw new DBException(String.format("Failed to open %s: %s", cacheBase, e.getMessage()), e); } } }
@Nonnull @Override protected Backend createLocalBackend(Path directory, MapDbConfig config) throws Exception { if (!directory.toFile().exists()) { Files.createDirectories(directory); } DBMaker.Maker dbBuilder = DBMaker .fileDB(directory.resolve("data").toFile()) .fileMmapEnableIfSupported(); if (config.isReadOnly()) { dbBuilder.readOnly(); } DB db = dbBuilder.make(); return createMapper(config.getMapping(), db); } }
public static void main(String[] args) { //import org.mapdb.* DB db = DBMaker.memoryDB().make(); BTreeMap<byte[], Integer> map = db .treeMap("towns", Serializer.BYTE_ARRAY, Serializer.INTEGER) .createOrOpen(); map.put("New York".getBytes(), 1); map.put("New Jersey".getBytes(), 2); map.put("Boston".getBytes(), 3); //get all New* cities Map<byte[], Integer> newCities = map.prefixSubMap("New".getBytes()); } }
public TextAnnotationMapDBHandler(String dbFile) { try { // enabling transactions avoids cache corruption if service fails. this.db = DBMaker.fileDB(dbFile).closeOnJvmShutdown().transactionEnable().make(); } catch (DBException e) { // logger.warn("mapdb couldn't instantiate db using file '{}': check error and either remove lock, " + // "repair file, or delete file.", dbFile); e.printStackTrace(); System.err.println("mapdb couldn't instantiate db using file '" + dbFile + "': check error and either remove lock, repair file, or delete file."); throw e; } } /**