protected PreparedStatementManager newPreparedStatementManager( JDBCStore store, Connection conn) { int batchLimit = dict.getBatchLimit(); return new BatchingPreparedStatementManagerImpl(store, conn, batchLimit); }
/** * Flush the given row immediately or deferred the flush in batch. */ protected void flushAndUpdate(RowImpl row) throws SQLException { if (isBatchDisabled(row)) { // if there were some statements batched before, then // we need to flush them out first before processing the // current non batch process. flushBatch(); super.flushAndUpdate(row); } else { // process the SQL statement, either execute it immediately or // batch it for later execution. batchOrExecuteRow(row); } }
List<RowImpl> batchedRows = getBatchedRows(); String batchedSql = getBatchedSql(); if (batchedRows == null) return; try { RowImpl onerow = null; ps = prepareStatement(batchedSql); if (batchSize == 1) { flushSingleRow(onerow, ps); } else { onerow = itr.next(); if (_batchLimit == 1) { flushSingleRow(onerow, ps); } else { if (count < _batchLimit || _batchLimit == -1) { if (ps != null) onerow.flush(ps, _dict, _store); addBatch(ps, onerow, count); count++; } else { int[] rtn = executeBatch(ps); checkUpdateCount(rtn, batchedRowsBaseIndex, ps); addBatch(ps, onerow, count);
private boolean isBatchDisabled(RowImpl row) { boolean rtnVal = true; int limit = getBatchLimit(); if ((limit < 0 || limit > 1) && !isBatchDisabled()) { OpenJPAStateManager sm = row.getPrimaryKey(); ClassMapping cmd = null; if (sm != null) cmd = (ClassMapping) sm.getMetaData(); Column[] autoAssign = null; if (row.getAction() == Row.ACTION_INSERT) autoAssign = row.getTable().getAutoAssignedColumns(); // validate batch capability rtnVal = _dict .validateBatchProcess(row, autoAssign, sm, cmd); setBatchDisabled(rtnVal); } return rtnVal; }
protected Collection flush(RowManager rowMgr, PreparedStatementManager psMgr, Collection exceps) { Collection rtnCol = super.flush(rowMgr, psMgr, exceps); BatchingPreparedStatementManagerImpl bPsMgr = (BatchingPreparedStatementManagerImpl) psMgr; bPsMgr.flushBatch(); return rtnCol; } }
private void flushSingleRow(RowImpl row, PreparedStatement ps) throws SQLException { if (ps != null) row.flush(ps, _dict, _store); int count = executeUpdate(ps, row.getSQL(_dict), row); if (count != 1) { logSQLWarnings(ps); Object failed = row.getFailedObject(); if (failed != null) _exceptions.add(new OptimisticException(failed)); else if (row.getAction() == Row.ACTION_INSERT) throw new SQLException(_loc.get("update-failed-no-failed-obj", String.valueOf(count), row.getSQL(_dict)).getMessage()); } }
if (isBatchDisabled(row)) { flushBatch(); default: flushBatch();
flushSingleRow(onerow, ps); } else { onerow = (RowImpl) itr.next(); if (_batchLimit == 1) { flushSingleRow(onerow, ps); } else { if (count < _batchLimit || _batchLimit == -1) { checkUpdateCount(rtn, batchedRowsBaseIndex); checkUpdateCount(rtn, batchedRowsBaseIndex);
List<RowImpl> batchedRows = getBatchedRows(); String batchedSql = getBatchedSql(); if (batchedRows == null) return; try { RowImpl onerow = null; ps = prepareStatement(batchedSql); if (batchSize == 1) { flushSingleRow(onerow, ps); } else { onerow = itr.next(); if (_batchLimit == 1) { flushSingleRow(onerow, ps); } else { if (count < _batchLimit || _batchLimit == -1) { if (ps != null) onerow.flush(ps, _dict, _store); addBatch(ps, onerow, count); count++; } else { int[] rtn = executeBatch(ps); checkUpdateCount(rtn, batchedRowsBaseIndex, ps); addBatch(ps, onerow, count);
private boolean isBatchDisabled(RowImpl row) { boolean rtnVal = true; int limit = getBatchLimit(); if ((limit < 0 || limit > 1) && !isBatchDisabled()) { OpenJPAStateManager sm = row.getPrimaryKey(); ClassMapping cmd = null; if (sm != null) cmd = (ClassMapping) sm.getMetaData(); Column[] autoAssign = null; if (row.getAction() == Row.ACTION_INSERT) autoAssign = row.getTable().getAutoAssignedColumns(); // validate batch capability rtnVal = _dict .validateBatchProcess(row, autoAssign, sm, cmd); setBatchDisabled(rtnVal); } return rtnVal; }
protected Collection flush(RowManager rowMgr, PreparedStatementManager psMgr, Collection exceps) { Collection rtnCol = super.flush(rowMgr, psMgr, exceps); BatchingPreparedStatementManagerImpl bPsMgr = (BatchingPreparedStatementManagerImpl) psMgr; bPsMgr.flushBatch(); return rtnCol; } }
private void flushSingleRow(RowImpl row, PreparedStatement ps) throws SQLException { if (ps != null) row.flush(ps, _dict, _store); int count = executeUpdate(ps, row.getSQL(_dict), row); if (count != 1) { logSQLWarnings(ps); Object failed = row.getFailedObject(); if (failed != null) _exceptions.add(new OptimisticException(failed)); else if (row.getAction() == Row.ACTION_INSERT) throw new SQLException(_loc.get("update-failed-no-failed-obj", String.valueOf(count), row.getSQL(_dict)).getMessage()); } }
List<RowImpl> batchedRows = getBatchedRows(); String batchedSql = getBatchedSql(); if (batchedRows == null) return; try { RowImpl onerow = null; ps = prepareStatement(batchedSql); if (batchSize == 1) { flushSingleRow(onerow, ps); } else { onerow = itr.next(); if (_batchLimit == 1) { flushSingleRow(onerow, ps); } else { if (count < _batchLimit || _batchLimit == -1) { if (ps != null) onerow.flush(ps, _dict, _store); addBatch(ps, onerow, count); count++; } else { int[] rtn = executeBatch(ps); checkUpdateCount(rtn, batchedRowsBaseIndex, ps); addBatch(ps, onerow, count);
/** * Flush the given row immediately or deferred the flush in batch. */ protected void flushAndUpdate(RowImpl row) throws SQLException { if (isBatchDisabled(row)) { // if there were some statements batched before, then // we need to flush them out first before processing the // current non batch process. flushBatch(); super.flushAndUpdate(row); } else { // process the SQL statement, either execute it immediately or // batch it for later execution. batchOrExecuteRow(row); } }
private boolean isBatchDisabled(RowImpl row) { boolean rtnVal = true; int limit = getBatchLimit(); if ((limit < 0 || limit > 1) && !isBatchDisabled()) { OpenJPAStateManager sm = row.getPrimaryKey(); ClassMapping cmd = null; if (sm != null) cmd = (ClassMapping) sm.getMetaData(); Column[] autoAssign = null; if (row.getAction() == Row.ACTION_INSERT) autoAssign = row.getTable().getAutoAssignedColumns(); // validate batch capability rtnVal = _dict .validateBatchProcess(row, autoAssign, sm, cmd); setBatchDisabled(rtnVal); } return rtnVal; }
protected void batchOrExecuteRow(RowImpl row) throws SQLException { String sql = row.getSQL(_dict); if (_batchedSql == null) { // brand new SQL _batchedSql = sql; } else if (!sql.equals(_batchedSql)) { // SQL statements changed. switch (_batchedRows.size()) { case 0: break; case 1: // single entry in cache, direct SQL execution. try { super.flushAndUpdate((RowImpl) _batchedRows.get(0)); } finally { _batchedSql = null; _batchedRows.clear(); } break; default: // flush all entries in cache in batch. flushBatch(); } _batchedSql = sql; } _batchedRows.add(row); }
private void flushSingleRow(RowImpl row, PreparedStatement ps) throws SQLException { if (ps != null) row.flush(ps, _dict, _store); int count = executeUpdate(ps, row.getSQL(_dict), row); if (count != 1) { logSQLWarnings(ps); Object failed = row.getFailedObject(); if (failed != null) _exceptions.add(new OptimisticException(failed)); else if (row.getAction() == Row.ACTION_INSERT) throw new SQLException(_loc.get("update-failed-no-failed-obj", String.valueOf(count), row.getSQL(_dict)).getMessage()); } }
protected PreparedStatementManager newPreparedStatementManager( JDBCStore store, Connection conn) { int batchLimit = dict.getBatchLimit(); return new BatchingPreparedStatementManagerImpl(store, conn, batchLimit); }
List<RowImpl> batchedRows = getBatchedRows(); String batchedSql = getBatchedSql(); if (batchedRows == null) return; try { RowImpl onerow = null; ps = prepareStatement(batchedSql); if (batchSize == 1) { flushSingleRow(onerow, ps); } else { onerow = itr.next(); if (_batchLimit == 1) { flushSingleRow(onerow, ps); } else { if (count < _batchLimit || _batchLimit == -1) { if (ps != null) onerow.flush(ps, _dict, _store); addBatch(ps, onerow, count); count++; } else { int[] rtn = executeBatch(ps); checkUpdateCount(rtn, batchedRowsBaseIndex, ps); addBatch(ps, onerow, count);
/** * Flush the given row immediately or deferred the flush in batch. */ protected void flushAndUpdate(RowImpl row) throws SQLException { if (isBatchDisabled(row)) { // if there were some statements batched before, then // we need to flush them out first before processing the // current non batch process. flushBatch(); super.flushAndUpdate(row); } else { // process the SQL statement, either execute it immediately or // batch it for later execution. batchOrExecuteRow(row); } }