/** * Creates a {@linkplain RDBBlobStore} instance using the provided * {@link DataSource} using default {@link RDBOptions}. */ public RDBBlobStore(DataSource ds) { this(ds, new RDBOptions()); }
/** * Creates a {@linkplain RDBDocumentStore} instance using the provided * {@link DataSource}, {@link DocumentNodeStoreBuilder}, and default * {@link RDBOptions}. */ public RDBDocumentStore(DataSource ds, DocumentNodeStoreBuilder<?> builder) { this(ds, builder, new RDBOptions()); }
/** * Creates a {@linkplain RDBDocumentStore} instance using the provided * {@link DataSource}, {@link DocumentNodeStoreBuilder}, and default * {@link RDBOptions}. */ public RDBDocumentStore(DataSource ds, DocumentNodeStoreBuilder<?> builder) { this(ds, builder, new RDBOptions()); }
/** * Creates a {@linkplain RDBBlobStore} instance using the provided * {@link DataSource} using default {@link RDBOptions}. */ public RDBBlobStore(DataSource ds) { this(ds, new RDBOptions()); }
/** * Sets a {@link DataSource} to use for the RDB document and blob * stores. * * @return this */ public RDBDocumentNodeStoreBuilder setRDBConnection(DataSource ds) { setRDBConnection(ds, new RDBOptions()); return thisBuilder(); }
/** * Sets a {@link DataSource} to use for the RDB document and blob * stores. * * @return this */ public RDBDocumentNodeStoreBuilder setRDBConnection(DataSource ds) { setRDBConnection(ds, new RDBOptions()); return thisBuilder(); }
private RDBOptions getOptions(boolean dropDBAFterTest, String tablePrefix) { return new RDBOptions().dropTablesOnClose(dropDBAfterTest).tablePrefix(tablePrefix); }
private RDBOptions getOptions(boolean dropDBAFterTest, String tablePrefix) { return new RDBOptions().dropTablesOnClose(dropDBAfterTest).tablePrefix(tablePrefix); }
/** * Sets a {@link DataSource} to use for the RDB document and blob * stores. * * @return this */ public Builder setRDBConnection(DataSource ds) { setRDBConnection(ds, new RDBOptions()); return this; }
@Override public NodeStore createNodeStore() { String prefix = "T" + Long.toHexString(System.currentTimeMillis()); RDBOptions options = new RDBOptions().tablePrefix(prefix).dropTablesOnClose(true); this.jdbcUrl = pUrl.replace("{fname}", fname); DataSource ds = RDBDataSourceFactory.forJdbcUrl(jdbcUrl, pUser, pPasswd); NodeStore result = new DocumentMK.Builder().setPersistentCache("target/persistentCache,time") .setRDBConnection(ds, options).getNodeStore(); this.dataSources.put(result, ds); return result; }
protected DocumentMK.Builder newBuilder(DataSource db) throws Exception { String prefix = "T" + Long.toHexString(System.currentTimeMillis()); RDBOptions opt = new RDBOptions().tablePrefix(prefix).dropTablesOnClose(true); return new DocumentMK.Builder().clock(getTestClock()).setRDBConnection(dataSource, opt); }
@Before @Override public void setUpConnection() throws Exception { dataSource = RDBDataSourceFactory.forJdbcUrl(URL, USERNAME, PASSWD); DocumentMK.Builder builder = new DocumentMK.Builder().clock(getTestClock()).setAsyncDelay(0); RDBOptions opt = new RDBOptions().tablePrefix("T" + Long.toHexString(System.currentTimeMillis())).dropTablesOnClose(true); store = new TestStore(dataSource, builder, opt); mk = builder.setDocumentStore(store).setLeaseCheckMode(LeaseCheckMode.DISABLED).open(); }
@Test public void init0then1() { RDBOptions op = new RDBOptions().tablePrefix("T0T1").initialSchema(0).upgradeToSchema(0).dropTablesOnClose(true); RDBDocumentStore rdb0 = null; RDBDocumentStore rdb1 = null; try { rdb0 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta0 = rdb0.getTable(Collection.NODES); assertFalse(meta0.hasVersion()); rdb1 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), new RDBOptions().tablePrefix("T0T1").initialSchema(0).upgradeToSchema(1)); RDBTableMetaData meta1 = rdb1.getTable(Collection.NODES); assertTrue(meta1.hasVersion()); UpdateOp testInsert = new UpdateOp(Utils.getIdFromPath("/foo"), true); assertTrue(rdb1.create(Collection.NODES, Collections.singletonList(testInsert))); } finally { if (rdb1 != null) { rdb1.dispose(); } if (rdb0 != null) { rdb0.dispose(); } } }
@Test public void init01() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 1").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T01").initialSchema(0).upgradeToSchema(1).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void init12() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 2").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T12").initialSchema(1).upgradeToSchema(2).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasSplitDocs()); int statementsPerTable = 5; assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), statementsPerTable * RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void init11() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 1").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T11").initialSchema(1).upgradeToSchema(1).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), 0, logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void init01fail() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("Attempted to upgrade").create(); logCustomizer.starting(); Assume.assumeTrue(ds instanceof RDBDataSourceWrapper); RDBDataSourceWrapper wds = (RDBDataSourceWrapper)ds; wds.setFailAlterTableAddColumnStatements(true); RDBOptions op = new RDBOptions().tablePrefix("T01F").initialSchema(0).upgradeToSchema(1).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertFalse(meta.hasVersion()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size()); UpdateOp testInsert = new UpdateOp(Utils.getIdFromPath("/foo"), true); assertTrue(rdb.create(Collection.NODES, Collections.singletonList(testInsert))); } finally { wds.setFailAlterTableAddColumnStatements(false); logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void initDefault() { RDBOptions op = new RDBOptions().tablePrefix("T00").initialSchema(0).upgradeToSchema(0).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertFalse(meta.hasVersion()); } finally { if (rdb != null) { rdb.dispose(); } } }
@Test public void init22() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T" + "22").initialSchema(2).upgradeToSchema(2).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertTrue(meta.hasSplitDocs()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), 0, logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void autoFixOAK7855() { RDBOptions op = new RDBOptions().tablePrefix("OAK7855").dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); String id = Utils.getIdFromPath("/foo"); UpdateOp testInsert = new UpdateOp(id, true); assertTrue(rdb.create(Collection.NODES, Collections.singletonList(testInsert))); UpdateOp testUpdate = new UpdateOp(id, false); // set the invalid split doc type introduced by OAK-7855 testUpdate.set(NodeDocument.SD_TYPE, 0); assertNotNull(rdb.findAndUpdate(Collection.NODES, testUpdate)); rdb.getNodeDocumentCache().invalidate(id); NodeDocument doc = rdb.find(Collection.NODES, id); assertNotNull(doc); assertEquals(SplitDocType.NONE, doc.getSplitDocType()); } finally { if (rdb != null) { rdb.dispose(); } } }