@Override public void grantReadOnlyUserPrivilegesToSqlgSchemas(SqlgGraph sqlgGraph) { Connection conn = sqlgGraph.tx().getConnection(); try (Statement statement = conn.createStatement()) { statement.execute("CREATE USER IF NOT EXISTS 'sqlgReadOnly'@'%' IDENTIFIED BY 'sqlgReadOnly'"); statement.execute("GRANT SELECT ON *.* TO 'sqlgReadOnly'@'%' IDENTIFIED BY 'sqlgReadOnly'"); statement.executeQuery("FLUSH PRIVILEGES"); } catch (SQLException e) { throw new RuntimeException(e); } } }
@Benchmark public long insert1000VerticesWith10Properties() { Map<String, Object> properties = new HashMap<>(); properties.put("a1", "aaaaaaaaaa1"); properties.put("a2", "aaaaaaaaaa2"); properties.put("a3", "aaaaaaaaaa3"); properties.put("a4", "aaaaaaaaaa4"); properties.put("a5", "aaaaaaaaaa5"); properties.put("a6", "aaaaaaaaaa6"); properties.put("a7", "aaaaaaaaaa7"); properties.put("a8", "aaaaaaaaaa8"); properties.put("a9", "aaaaaaaaaa9"); properties.put("a10", "aaaaaaaaaa10"); for (int i = 0; i < 1000; i++) { this.sqlgGraph.addVertex("Person", properties); } this.sqlgGraph.tx().commit(); return 1000; }
@SuppressWarnings("WeakerAccess") public boolean isInBatchMode() { return supportsBatchMode() && isInNormalBatchMode() || isInStreamingBatchMode() || isInStreamingWithLockBatchMode(); }
public void flush() { if (!this.isInBatchMode()) { throw new IllegalStateException("Transaction must be in batch mode to flush"); } logger.debug("flushing transaction!!!"); if (!this.getBatchManager().isBusyFlushing()) { this.getBatchManager().flush(); } }
private static BatchManager.BatchModeType flushAndSetTxToNone(SqlgGraph sqlgGraph) { //topology elements can not be added in batch mode because on flushing the topology //needs to be queries and yet the elements are still in the cache. BatchManager.BatchModeType batchModeType = sqlgGraph.tx().getBatchModeType(); if (sqlgGraph.tx().isInBatchMode()) { batchModeType = sqlgGraph.tx().getBatchModeType(); sqlgGraph.tx().flush(); sqlgGraph.tx().batchMode(BatchManager.BatchModeType.NONE); } return batchModeType; }
private static void executeDropQuery(SqlgGraph sqlgGraph, String sql, LinkedList<SchemaTableTree> distinctQueryStack, SchemaTable deletedSchemaTable) { if (sqlgGraph.tx().isInBatchMode()) { sqlgGraph.tx().flush(); } try { if (!distinctQueryStack.isEmpty() && distinctQueryStack.peekFirst().getStepType() != SchemaTableTree.STEP_TYPE.GRAPH_STEP) { Preconditions.checkState(!distinctQueryStack.peekFirst().getParentIdsAndIndexes().isEmpty()); } Connection conn = sqlgGraph.tx().getConnection(); if (logger.isDebugEnabled()) { logger.debug(sql); } PreparedStatement preparedStatement = conn.prepareStatement(sql); sqlgGraph.tx().add(preparedStatement); int parameterCount = 1; SqlgUtil.setParametersOnStatement(sqlgGraph, distinctQueryStack, preparedStatement, parameterCount); if (distinctQueryStack.isEmpty()) { preparedStatement.execute(); } else { preparedStatement.execute(); } } catch (SQLException e) { throw new RuntimeException(e); } }
Connection conn = this.tx().getConnection(); ObjectNode result = this.mapper.createObjectNode(); ArrayNode dataNode = this.mapper.createArrayNode(); throw new RuntimeException(e); } finally { this.tx().rollback();
@Override public Publisher<?> persistFramedEntries(Flowable<Map<String, Object>> framedEntries) { return framedEntries .map(BdioDocument::toGraphNodes) .map(nodes -> nodes.stream() .sorted(SqlgBlackDuckIoReader::nodeTypeOrder) .reduce(new SqlgNodeAccumulator(), SqlgNodeAccumulator::addNode, SqlgNodeAccumulator::combine) .flush()) .reduce(SqlgNodeAccumulator::combine) .doOnSuccess(SqlgNodeAccumulator::finish) .doOnSubscribe(x -> graph().tx().streamingBatchModeOn()) .toFlowable() .doOnComplete(() -> graph().tx().commit()) .doOnError(x -> graph().tx().rollback()) .doOnCancel(() -> graph().tx().rollback()); }
this.sqlgGraph.tx().commit(); stopWatch.stop(); logger.debug("Time to createVertexLabel sqlg topology: " + stopWatch.toString()); if (!existSqlgSchema) { addPublicSchema(); this.sqlgGraph.tx().commit(); validateTopology(); this.sqlgGraph.tx().commit(); } catch (Exception e) { this.sqlgGraph.tx().rollback(); throw e;
public void fromNotifyJson(int pid, LocalDateTime notifyTimestamp) { try { ImmutablePair<Integer, LocalDateTime> p = new ImmutablePair<>(pid, notifyTimestamp); if (!this.ownPids.contains(p)) { List<Vertex> logs = this.sqlgGraph.topology().V() .hasLabel(SQLG_SCHEMA + "." + SQLG_SCHEMA_LOG) .has(SQLG_SCHEMA_LOG_TIMESTAMP, notifyTimestamp) .toList(); Preconditions.checkState(logs.size() == 1, "There must be one and only be one log, found %d", logs.size()); LocalDateTime timestamp = logs.get(0).value("timestamp"); Preconditions.checkState(timestamp.equals(notifyTimestamp), "notify log's timestamp does not match."); int backEndPid = logs.get(0).value("pid"); Preconditions.checkState(backEndPid == pid, "notify pids do not match."); ObjectNode log = logs.get(0).value("log"); fromNotifyJson(timestamp, log); } else { // why? we get notifications for our own things //this.ownPids.remove(p); } } finally { this.sqlgGraph.tx().rollback(); } }
@Override public <L, R> void bulkAddEdges(SqlgGraph sqlgGraph, SchemaTable out, SchemaTable in, String edgeLabel, Pair<String, String> idFields, Collection<Pair<L, R>> uids, Map<String, PropertyType> edgeColumns, Map<String, Object> edgePropertyMap) { if (!sqlgGraph.tx().isInStreamingBatchMode() && !sqlgGraph.tx().isInStreamingWithLockBatchMode()) { throw SqlgExceptions.invalidMode("Transaction must be in " + BatchManager.BatchModeType.STREAMING + " or " + BatchManager.BatchModeType.STREAMING_WITH_LOCK + " mode for bulkAddEdges"); logger.debug(sql.toString()); Connection conn = sqlgGraph.tx().getConnection(); try (PreparedStatement preparedStatement = conn.prepareStatement(sql.toString())) { preparedStatement.executeUpdate();
public void batchMode(BatchManager.BatchModeType batchModeType) { if (supportsBatchMode()) { switch (batchModeType) { case NONE: readWrite(); this.threadLocalTx.get().getBatchManager().batchModeOn(BatchManager.BatchModeType.NONE); break; case NORMAL: this.normalBatchModeOn(); break; case STREAMING: this.streamingBatchModeOn(); break; case STREAMING_WITH_LOCK: this.streamingWithLockBatchModeOn(); break; default: throw new IllegalStateException("unhandled BatchModeType " + batchModeType.name()); } } }
throw new RuntimeException(e); this.sqlgTransaction = new SqlgTransaction(this, this.configuration.getBoolean("cache.vertices", false)); this.sqlgTransaction.setDefaultFetchSize(this.configuration.getInteger("fetch.size", this.sqlDialect.getDefaultFetchSize())); this.tx().readWrite(); this.tx().commit();
@Override protected void doCommit() throws TransactionException { if (!isOpen()) { return; if (supportsBatchMode() && this.threadLocalTx.get().getBatchManager().isInBatchMode()) { getBatchManager().flush(); connection.close(); } catch (Exception e) { this.rollback(); if (e instanceof RuntimeException) { throw (RuntimeException) e;
public void finish() throws NodeDoesNotExistException { graph().tx().commit(); doVacuumAnalyze(); graph().tx().commit(); doVacuumAnalyze(); graph().tx().streamingBatchModeOn(); for (Map.Entry<EdgeKey, Collection<Pair<Object, Object>>> edge : edges.asMap().entrySet()) { String edgeLabel = edge.getKey().edgeLabel; graph().tx().flush(); graph().tx().commit(); doVacuumAnalyze();
public void streamVertex(String label, LinkedHashMap<String, Object> keyValues) { if (!this.tx().isInStreamingBatchMode()) { throw SqlgExceptions.invalidMode(TRANSACTION_MUST_BE_IN + this.tx().getBatchModeType().toString() + MODE_FOR_STREAM_VERTEX); } Map<Object, Object> tmp = new LinkedHashMap<>(keyValues); tmp.put(T.label, label); Object[] keyValues1 = SqlgUtil.mapTokeyValues(tmp); streamVertex(keyValues1); }
@Override public Edge addEdge(String label, Vertex inVertex, Object... keyValues) { this.sqlgGraph.tx().readWrite(); boolean streaming = this.sqlgGraph.getSqlDialect().supportsBatchMode() && (this.sqlgGraph.tx().isInStreamingBatchMode() || this.sqlgGraph.tx().isInStreamingWithLockBatchMode()); if (streaming) { SchemaTable streamingBatchModeEdgeLabel = this.sqlgGraph.tx().getBatchManager().getStreamingBatchModeEdgeSchemaTable(); if (streamingBatchModeEdgeLabel != null && !streamingBatchModeEdgeLabel.getTable().substring(EDGE_PREFIX.length()).equals(label)) { throw new IllegalStateException("Streaming batch mode must occur for one label at a time. Expected \"" + streamingBatchModeEdgeLabel + "\" found \"" + label + "\". First commit the transaction or call SqlgGraph.flush() before streaming a different label"); } } return addEdgeInternal(streaming, label, inVertex, keyValues); }
private Iterator<Edge> internalEdges(Direction direction, String... labels) { this.sqlgGraph.tx().readWrite(); if (this.sqlgGraph.getSqlDialect().supportsBatchMode() && this.sqlgGraph.tx().isInBatchMode() && this.sqlgGraph.tx().getBatchManager().vertexIsCached(this)) { this.sqlgGraph.tx().flush(); } // need topology when we're a topology vertex GraphTraversalSource gts = Topology.SQLG_SCHEMA.equals(schema) ? this.sqlgGraph.topology() : this.sqlgGraph.traversal(); switch (direction) { case OUT: return gts.V(this).outE(labels); case IN: return gts.V(this).inE(labels); case BOTH: return gts.V(this).bothE(labels); } return Collections.emptyIterator(); }
@Override public Vertex addVertex(Object... keyValues) { if (this.tx().isInStreamingBatchMode()) { throw SqlgExceptions.invalidMode(String.format("Transaction is in %s, use streamVertex(Object ... keyValues)", this.tx().getBatchModeType().toString())); } if (this.tx().isInStreamingWithLockBatchMode()) { return internalStreamVertex(keyValues); } else { Triple<Map<String, PropertyType>, Map<String, Object>, Map<String, Object>> keyValueMapTriple = SqlgUtil.validateVertexKeysValues(this.sqlDialect, keyValues); final Pair<Map<String, Object>, Map<String, Object>> keyValueMapPair = Pair.of(keyValueMapTriple.getMiddle(), keyValueMapTriple.getRight()); final Map<String, PropertyType> columns = keyValueMapTriple.getLeft(); final String label = ElementHelper.getLabelValue(keyValues).orElse(Vertex.DEFAULT_LABEL); SchemaTable schemaTablePair = SchemaTable.from(this, label); this.tx().readWrite(); VertexLabel vertexLabel = this.getTopology().ensureVertexLabelExist(schemaTablePair.getSchema(), schemaTablePair.getTable(), columns); if (!vertexLabel.hasIDPrimaryKey()) { Preconditions.checkArgument(columns.keySet().containsAll(vertexLabel.getIdentifiers()), "identifiers must be present %s", vertexLabel.getIdentifiers()); } return new SqlgVertex(this, false, false, schemaTablePair.getSchema(), schemaTablePair.getTable(), keyValueMapPair); } }