private Set<DatabaseMeta> getUsedDatabaseMetas() { Set<DatabaseMeta> databaseMetas = new HashSet<DatabaseMeta>(); for ( JobEntryCopy jobEntryCopy : getJobCopies() ) { DatabaseMeta[] dbs = jobEntryCopy.getEntry().getUsedDatabaseConnections(); if ( dbs != null ) { for ( DatabaseMeta db : dbs ) { databaseMetas.add( db ); } } } databaseMetas.add( jobLogTable.getDatabaseMeta() ); for ( LogTableInterface logTable : getExtraLogTables() ) { databaseMetas.add( logTable.getDatabaseMeta() ); } return databaseMetas; }
if ( connectionName.equals( jobLogTable.getDatabaseMeta().getName() ) ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "The database connection used for logging is: "
monitor.subTask( BaseMessages.getString( PKG, "JobMeta.Monitor.GettingSQLStatementsForJobLogTables" ) ); if ( jobLogTable.getDatabaseMeta() != null && !Utils.isEmpty( jobLogTable.getTableName() ) ) { Database db = new Database( this, jobLogTable.getDatabaseMeta() ); try { db.connect(); if ( sql != null && sql.length() > 0 ) { SQLStatement stat = new SQLStatement( BaseMessages.getString( PKG, "JobMeta.SQLFeedback.ThisJob" ), jobLogTable.getDatabaseMeta(), sql ); stats.add( stat ); jobLogTable.getDatabaseMeta(), null ); stat.setError( BaseMessages.getString( PKG, "JobMeta.SQLFeedback.ErrorObtainingJobLogTableInfo" ) + dbe.getMessage() );
/** * Writes information to Job Log table. Cleans old records, in case job is finished. */ protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) throws KettleJobException, KettleDatabaseException { boolean cleanLogRecords = status.equals( LogStatus.END ); String tableName = jobLogTable.getActualTableName(); DatabaseMeta logcon = jobLogTable.getDatabaseMeta(); Database ldb = createDataBase( logcon ); ldb.shareVariablesWith( this ); try { ldb.connect(); ldb.setCommit( logCommitSize ); ldb.writeLogRecord( jobLogTable, status, this, null ); if ( cleanLogRecords ) { ldb.cleanupLogRecords( jobLogTable ); } } catch ( KettleDatabaseException dbe ) { addErrors( 1 ); throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe ); } finally { if ( !ldb.isAutoCommit() ) { ldb.commitLog( true, jobLogTable ); } ldb.disconnect(); } }
DatabaseMeta logcon = jobMeta.getJobLogTable().getDatabaseMeta(); String schemaName = environmentSubstitute( jobMeta.getJobLogTable().getActualSchemaName() ); String tableName = environmentSubstitute( jobMeta.getJobLogTable().getActualTableName() ); String schemaAndTable = jobMeta.getJobLogTable().getDatabaseMeta().getQuotedSchemaTableCombination( schemaName, tableName ); Database ldb = new Database( this, logcon ); ldb.shareVariablesWith( this );
if ( jobMeta.getJobLogTable().getDatabaseMeta() != null && !Utils.isEmpty( jobMeta.getJobLogTable().getTableName() ) ) { jobGraph.addAllTabs();
private void saveJobDetails( DataNode rootNode, JobMeta jobMeta ) throws KettleException { rootNode.setProperty( PROP_EXTENDED_DESCRIPTION, jobMeta.getExtendedDescription() ); rootNode.setProperty( PROP_JOB_VERSION, jobMeta.getJobversion() ); rootNode.setProperty( PROP_JOB_STATUS, jobMeta.getJobstatus() < 0 ? -1L : jobMeta.getJobstatus() ); if ( jobMeta.getJobLogTable().getDatabaseMeta() != null ) { DataNodeRef ref = new DataNodeRef( jobMeta.getJobLogTable().getDatabaseMeta().getObjectId().getId() ); rootNode.setProperty( PROP_DATABASE_LOG, ref ); } rootNode.setProperty( PROP_TABLE_NAME_LOG, jobMeta.getJobLogTable().getTableName() ); rootNode.setProperty( PROP_CREATED_USER, jobMeta.getCreatedUser() ); rootNode.setProperty( PROP_CREATED_DATE, jobMeta.getCreatedDate() ); rootNode.setProperty( PROP_MODIFIED_USER, jobMeta.getModifiedUser() ); rootNode.setProperty( PROP_MODIFIED_DATE, jobMeta.getModifiedDate() ); rootNode.setProperty( PROP_USE_BATCH_ID, jobMeta.getJobLogTable().isBatchIdUsed() ); rootNode.setProperty( PROP_PASS_BATCH_ID, jobMeta.isBatchIdPassed() ); rootNode.setProperty( PROP_USE_LOGFIELD, jobMeta.getJobLogTable().isLogFieldUsed() ); rootNode.setProperty( PROP_SHARED_FILE, jobMeta.getSharedObjectsFile() ); rootNode.setProperty( PROP_LOG_SIZE_LIMIT, jobMeta.getJobLogTable().getLogSizeLimit() ); // Save the logging tables too.. // RepositoryAttributeInterface attributeInterface = new PurRepositoryAttribute( rootNode, jobMeta.getDatabases() ); for ( LogTableInterface logTable : jobMeta.getLogTables() ) { logTable.saveToRepository( attributeInterface ); } // Load the attributes map // AttributesMapUtil.saveAttributesMap( rootNode, jobMeta ); }
.getJobLogTable().getDatabaseMeta() != null ? jobMeta.getJobLogTable().getDatabaseMeta().getObjectId() : -1L ); table.addValue( new ValueMetaString( KettleDatabaseRepository.FIELD_JOB_TABLE_NAME_LOG ), jobMeta if ( jobMeta.getJobLogTable().getDatabaseMeta() != null ) { repository.insertJobEntryDatabase( jobMeta.getObjectId(), null, jobMeta .getJobLogTable().getDatabaseMeta().getObjectId() );