@Override public Long getNextBatchId( DatabaseMeta dbm, Database ldb, String schemaName, String tableName, String fieldName ) throws KettleDatabaseException { // Always take off autocommit. ldb.setCommit( 10 ); // // Temporary work-around to handle batch-id from extended options // Eventually want this promoted to proper dialogs and such // Map<String, String> connectionExtraOptions = this.getExtraOptions(); String sequenceProp = this.getPluginId() + "." + SEQUENCE_FOR_BATCH_ID; String autoIncSQLProp = this.getPluginId() + "." + AUTOINCREMENT_SQL_FOR_BATCH_ID; if ( connectionExtraOptions != null ) { if ( this.supportsSequences() && connectionExtraOptions.containsKey( sequenceProp ) ) { return getNextBatchIdUsingSequence( connectionExtraOptions.get( sequenceProp ), schemaName, dbm, ldb ); } else if ( this.supportsAutoInc() && connectionExtraOptions.containsKey( autoIncSQLProp ) ) { return getNextBatchIdUsingAutoIncSQL( connectionExtraOptions.get( autoIncSQLProp ), dbm, ldb ); } } return getNextBatchIdUsingLockTables( dbm, ldb, schemaName, tableName, fieldName ); }
db.shareVariablesWith( this ); db.connect(); db.setCommit( logCommitSize );
ldb.shareVariablesWith( this ); ldb.connect(); ldb.setCommit( logCommitSize );
private void connectDatabase( Database database ) throws KettleDatabaseException { database.shareVariablesWith( this ); if ( getTransMeta().isUsingUniqueConnections() ) { synchronized ( getTrans() ) { database.connect( getTrans().getTransactionId(), getPartitionID() ); } } else { database.connect( getPartitionID() ); } database.setCommit( 100 ); // we never get a commit, but it just turns off auto-commit. if ( log.isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "DatabaseLookup.Log.ConnectedToDatabase" ) ); } } }
/** * Writes step information to a step logging table (if one has been configured). * * @throws KettleException if any errors occur during logging */ protected void writeStepLogInformation() throws KettleException { Database db = null; StepLogTable stepLogTable = getTransMeta().getStepLogTable(); try { db = createDataBase( stepLogTable.getDatabaseMeta() ); db.shareVariablesWith( this ); db.connect(); db.setCommit( logCommitSize ); for ( StepMetaDataCombi combi : getSteps() ) { db.writeLogRecord( stepLogTable, LogStatus.START, combi, null ); } db.cleanupLogRecords( stepLogTable ); } catch ( Exception e ) { throw new KettleException( BaseMessages.getString( PKG, "Trans.Exception.UnableToWriteStepInformationToLogTable" ), e ); } finally { disconnectDb( db ); } }
/** * Writes information to Job Log table. Cleans old records, in case job is finished. */ protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) throws KettleJobException, KettleDatabaseException { boolean cleanLogRecords = status.equals( LogStatus.END ); String tableName = jobLogTable.getActualTableName(); DatabaseMeta logcon = jobLogTable.getDatabaseMeta(); Database ldb = createDataBase( logcon ); ldb.shareVariablesWith( this ); try { ldb.connect(); ldb.setCommit( logCommitSize ); ldb.writeLogRecord( jobLogTable, status, this, null ); if ( cleanLogRecords ) { ldb.cleanupLogRecords( jobLogTable ); } } catch ( KettleDatabaseException dbe ) { addErrors( 1 ); throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe ); } finally { if ( !ldb.isAutoCommit() ) { ldb.commitLog( true, jobLogTable ); } ldb.disconnect(); } }
/** * Write job entry log information. * * @throws KettleException * the kettle exception */ protected void writeJobEntryLogInformation() throws KettleException { Database db = null; JobEntryLogTable jobEntryLogTable = getJobMeta().getJobEntryLogTable(); try { db = createDataBase( jobEntryLogTable.getDatabaseMeta() ); db.shareVariablesWith( this ); db.connect(); db.setCommit( logCommitSize ); for ( JobEntryCopy copy : getJobMeta().getJobCopies() ) { db.writeLogRecord( jobEntryLogTable, LogStatus.START, copy, this ); } db.cleanupLogRecords( jobEntryLogTable ); } catch ( Exception e ) { throw new KettleException( BaseMessages.getString( PKG, "Job.Exception.UnableToJobEntryInformationToLogTable" ), e ); } finally { if ( !db.isAutoCommit() ) { db.commitLog( true, jobEntryLogTable ); } db.disconnect(); } }
logDetailed( BaseMessages.getString( PKG, "CombinationLookup.Log.ConnectedToDB" ) ); data.db.setCommit( meta.getCommitSize() );
logDetailed( BaseMessages.getString( PKG, "DBProc.Log.AutoCommit" ) ); data.db.setCommit( 9999 );
db.shareVariablesWith( this ); db.connect(); db.setCommit( logCommitSize );
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) { meta = (InsertUpdateMeta) smi; data = (InsertUpdateData) sdi; if ( super.init( smi, sdi ) ) { try { if ( meta.getDatabaseMeta() == null ) { logError( BaseMessages.getString( PKG, "InsertUpdate.Init.ConnectionMissing", getStepname() ) ); return false; } data.db = new Database( this, meta.getDatabaseMeta() ); data.db.shareVariablesWith( this ); if ( getTransMeta().isUsingUniqueConnections() ) { synchronized ( getTrans() ) { data.db.connect( getTrans().getTransactionId(), getPartitionID() ); } } else { data.db.connect( getPartitionID() ); } data.db.setCommit( meta.getCommitSize( this ) ); return true; } catch ( KettleException ke ) { logError( BaseMessages.getString( PKG, "InsertUpdate.Log.ErrorOccurredDuringStepInitialize" ) + ke.getMessage() ); } } return false; }
db.shareVariablesWith( this ); db.connect(); db.setCommit( logCommitSize );
data.db.setCommit( 100 ); // we never get a commit, but it just turns off auto-commit.
data.db.setCommit( meta.getCommitSize( this ) );
data.db.setCommit( meta.getCommitSize( this ) );
data.db.setCommit( 100 ); // needed for PGSQL it seems...
data.db.setCommit( meta.getCommitSize() );
data.db.connect( getPartitionID() ); data.db.setCommit( data.commitSize );
logDetailed( BaseMessages.getString( PKG, "DimensionLookup.Log.ConnectedToDB" ) ); data.db.setCommit( meta.getCommitSize() );