private RDBOptions getOptions(boolean dropDBAFterTest, String tablePrefix) { return new RDBOptions().dropTablesOnClose(dropDBAfterTest).tablePrefix(tablePrefix); }
@Test public void initDefault() { RDBOptions op = new RDBOptions().tablePrefix("T00").initialSchema(0).upgradeToSchema(0).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertFalse(meta.hasVersion()); } finally { if (rdb != null) { rdb.dispose(); } } }
@Test public void autoFixOAK7855() { RDBOptions op = new RDBOptions().tablePrefix("OAK7855").dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); String id = Utils.getIdFromPath("/foo"); UpdateOp testInsert = new UpdateOp(id, true); assertTrue(rdb.create(Collection.NODES, Collections.singletonList(testInsert))); UpdateOp testUpdate = new UpdateOp(id, false); // set the invalid split doc type introduced by OAK-7855 testUpdate.set(NodeDocument.SD_TYPE, 0); assertNotNull(rdb.findAndUpdate(Collection.NODES, testUpdate)); rdb.getNodeDocumentCache().invalidate(id); NodeDocument doc = rdb.find(Collection.NODES, id); assertNotNull(doc); assertEquals(SplitDocType.NONE, doc.getSplitDocType()); } finally { if (rdb != null) { rdb.dispose(); } } }
/** * Creates a {@linkplain RDBDocumentStore} instance using the provided * {@link DataSource}, {@link DocumentNodeStoreBuilder}, and default * {@link RDBOptions}. */ public RDBDocumentStore(DataSource ds, DocumentNodeStoreBuilder<?> builder) { this(ds, builder, new RDBOptions()); }
public static void main(String[] args) { RDBOptions defaultOpts = new RDBOptions(); int initial = defaultOpts.getInitialSchema(); int upgradeTo = defaultOpts.getUpgradeToSchema(); System.out.println("Table Creation Statements for RDBBlobStore and RDBDocumentStore"); System.out.println("RDBDocumentStore initial version: " + initial + ", with modifications up to version: " + upgradeTo); System.out.println(" " + ddb.getTableCreationStatement(table, defaultOpts.getInitialSchema())); for (String s : ddb.getIndexCreationStatements(table, defaultOpts.getInitialSchema())) { System.out.println(" " + s);
new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.NODES)))); this.tableMeta.put(Collection.CLUSTER_NODES, new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.CLUSTER_NODES)))); this.tableMeta.put(Collection.JOURNAL, new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.JOURNAL)))); this.tableMeta.put(Collection.SETTINGS, new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.SETTINGS)))); try { createTableFor(con, Collection.CLUSTER_NODES, this.tableMeta.get(Collection.CLUSTER_NODES), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema()); createTableFor(con, Collection.NODES, this.tableMeta.get(Collection.NODES), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema()); createTableFor(con, Collection.SETTINGS, this.tableMeta.get(Collection.SETTINGS), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema()); createTableFor(con, Collection.JOURNAL, this.tableMeta.get(Collection.JOURNAL), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema()); } finally { con.commit(); if (options.isDropTablesOnClose()) { tablesToBeDropped.addAll(tablesCreated); + (options.isDropTablesOnClose() ? " (will be dropped on exit)" : ""));
private void initialize(DataSource ds, RDBOptions options) throws Exception { this.tnData = RDBJDBCTools.createTableName(options.getTablePrefix(), "DATASTORE_DATA"); this.tnMeta = RDBJDBCTools.createTableName(options.getTablePrefix(), "DATASTORE_META"); if (options.isDropTablesOnClose()) { tablesToBeDropped.addAll(tablesCreated); + (options.isDropTablesOnClose() ? " (will be dropped on exit)" : ""));
@Test public void init0then1() { RDBOptions op = new RDBOptions().tablePrefix("T0T1").initialSchema(0).upgradeToSchema(0).dropTablesOnClose(true); RDBDocumentStore rdb0 = null; RDBDocumentStore rdb1 = null; try { rdb0 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta0 = rdb0.getTable(Collection.NODES); assertFalse(meta0.hasVersion()); rdb1 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), new RDBOptions().tablePrefix("T0T1").initialSchema(0).upgradeToSchema(1)); RDBTableMetaData meta1 = rdb1.getTable(Collection.NODES); assertTrue(meta1.hasVersion()); UpdateOp testInsert = new UpdateOp(Utils.getIdFromPath("/foo"), true); assertTrue(rdb1.create(Collection.NODES, Collections.singletonList(testInsert))); } finally { if (rdb1 != null) { rdb1.dispose(); } if (rdb0 != null) { rdb0.dispose(); } } }
/** * Creates a {@linkplain RDBDocumentStore} instance using the provided * {@link DataSource}, {@link DocumentNodeStoreBuilder}, and default * {@link RDBOptions}. */ public RDBDocumentStore(DataSource ds, DocumentNodeStoreBuilder<?> builder) { this(ds, builder, new RDBOptions()); }
public static void main(String[] args) { RDBOptions defaultOpts = new RDBOptions(); int initial = defaultOpts.getInitialSchema(); int upgradeTo = defaultOpts.getUpgradeToSchema(); System.out.println("Table Creation Statements for RDBBlobStore and RDBDocumentStore"); System.out.println("RDBDocumentStore initial version: " + initial + ", with modifications up to version: " + upgradeTo); System.out.println(" " + ddb.getTableCreationStatement(table, defaultOpts.getInitialSchema())); for (String s : ddb.getIndexCreationStatements(table, defaultOpts.getInitialSchema())) { System.out.println(" " + s);
new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.NODES)))); this.tableMeta.put(Collection.CLUSTER_NODES, new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.CLUSTER_NODES)))); this.tableMeta.put(Collection.JOURNAL, new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.JOURNAL)))); this.tableMeta.put(Collection.SETTINGS, new RDBTableMetaData(catalog, createTableName(options.getTablePrefix(), TABLEMAP.get(Collection.SETTINGS)))); try { createTableFor(con, Collection.CLUSTER_NODES, this.tableMeta.get(Collection.CLUSTER_NODES), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema()); createTableFor(con, Collection.NODES, this.tableMeta.get(Collection.NODES), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema()); createTableFor(con, Collection.SETTINGS, this.tableMeta.get(Collection.SETTINGS), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema()); createTableFor(con, Collection.JOURNAL, this.tableMeta.get(Collection.JOURNAL), tablesCreated, tablesPresent, options.getInitialSchema(), options.getUpgradeToSchema()); } finally { con.commit(); if (options.isDropTablesOnClose()) { tablesToBeDropped.addAll(tablesCreated); + (options.isDropTablesOnClose() ? " (will be dropped on exit)" : ""));
private void initialize(DataSource ds, RDBOptions options) throws Exception { this.tnData = RDBJDBCTools.createTableName(options.getTablePrefix(), "DATASTORE_DATA"); this.tnMeta = RDBJDBCTools.createTableName(options.getTablePrefix(), "DATASTORE_META"); if (options.isDropTablesOnClose()) { tablesToBeDropped.addAll(tablesCreated); + (options.isDropTablesOnClose() ? " (will be dropped on exit)" : ""));
@Test public void init11() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 1").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T11").initialSchema(1).upgradeToSchema(1).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), 0, logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
private RDBOptions getOptions(boolean dropDBAFterTest, String tablePrefix) { return new RDBOptions().dropTablesOnClose(dropDBAfterTest).tablePrefix(tablePrefix); }
/** * Creates a {@linkplain RDBBlobStore} instance using the provided * {@link DataSource} using default {@link RDBOptions}. */ public RDBBlobStore(DataSource ds) { this(ds, new RDBOptions()); }
@Test public void init0then2() { RDBOptions op = new RDBOptions().tablePrefix("T0T2").initialSchema(0).upgradeToSchema(0).dropTablesOnClose(true); RDBDocumentStore rdb0 = null; RDBDocumentStore rdb1 = null; try { rdb0 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta0 = rdb0.getTable(Collection.NODES); assertFalse(meta0.hasVersion()); rdb1 = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), new RDBOptions().tablePrefix("T0T2").initialSchema(0).upgradeToSchema(2)); RDBTableMetaData meta1 = rdb1.getTable(Collection.NODES); assertTrue(meta1.hasVersion()); UpdateOp testInsert = new UpdateOp(Utils.getIdFromPath("/foo"), true); testInsert.set(NodeDocument.SD_TYPE, 123L); assertTrue(rdb1.create(Collection.NODES, Collections.singletonList(testInsert))); // check that old instance can read a new entry NodeDocument check = rdb0.find(Collection.NODES, Utils.getIdFromPath("/foo")); assertNotNull(check); assertEquals(123L, check.get(NodeDocument.SD_TYPE)); } finally { if (rdb1 != null) { rdb1.dispose(); } if (rdb0 != null) { rdb0.dispose(); } } }
protected DocumentMK.Builder newBuilder(DataSource db) throws Exception { String prefix = "T" + Long.toHexString(System.currentTimeMillis()); RDBOptions opt = new RDBOptions().tablePrefix(prefix).dropTablesOnClose(true); return new DocumentMK.Builder().clock(getTestClock()).setRDBConnection(dataSource, opt); }
/** * Creates a {@linkplain RDBBlobStore} instance using the provided * {@link DataSource} using default {@link RDBOptions}. */ public RDBBlobStore(DataSource ds) { this(ds, new RDBOptions()); }
@Test public void init01() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 1").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T01").initialSchema(0).upgradeToSchema(1).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Override public NodeStore createNodeStore() { String prefix = "T" + Long.toHexString(System.currentTimeMillis()); RDBOptions options = new RDBOptions().tablePrefix(prefix).dropTablesOnClose(true); this.jdbcUrl = pUrl.replace("{fname}", fname); DataSource ds = RDBDataSourceFactory.forJdbcUrl(jdbcUrl, pUser, pPasswd); NodeStore result = new DocumentMK.Builder().setPersistentCache("target/persistentCache,time") .setRDBConnection(ds, options).getNodeStore(); this.dataSources.put(result, ds); return result; }