public void satisfyDependencies( Dependencies dependencies ) { dependencies.satisfyDependencies( transactionCommitProcess, kernel, kernelTransactions, fileListing ); } }
public void satisfyDependencies( Dependencies dependencies ) { dependencies.satisfyDependencies( checkPointer, logFiles, logFiles.getLogFileInformation(), explicitIndexTransactionOrdering, logicalTransactionStore, logRotation, appender ); } }
private static DependencyResolver buildIndexDependencies( IndexProvider... providers ) { Dependencies dependencies = new Dependencies(); dependencies.satisfyDependencies( (Object[]) providers ); return dependencies; }
@SuppressWarnings( "unchecked" ) public static DatabaseKernelExtensions instantiateKernelExtensions( File databaseDirectory, FileSystemAbstraction fileSystem, Config config, LogService logService, PageCache pageCache, JobScheduler jobScheduler, RecoveryCleanupWorkCollector recoveryCollector, DatabaseInfo databaseInfo, Monitors monitors, TokenHolders tokenHolders ) { Dependencies deps = new Dependencies(); deps.satisfyDependencies( fileSystem, config, logService, pageCache, recoveryCollector, monitors, jobScheduler, tokenHolders ); @SuppressWarnings( "rawtypes" ) Iterable kernelExtensions = Service.load( KernelExtensionFactory.class ); KernelContext kernelContext = new SimpleKernelContext( databaseDirectory, databaseInfo, deps ); return new DatabaseKernelExtensions( kernelContext, kernelExtensions, deps, KernelExtensionFailureStrategies.ignore() ); } }
@BeforeEach void setUp() { Dependencies dependencies = new Dependencies(); dependencies.satisfyDependencies( new StubIdGeneratorFactory() ); dependencies.satisfyDependencies( fileSystem ); when( dataSource.getDependencyResolver() ).thenReturn( dependencies ); when( dataSource.getDatabaseLayout() ).thenReturn( DatabaseLayout.of( new File( "database" ) ) ); when( dataSource.getStoreId() ).thenReturn( StoreId.DEFAULT ); dataSourceManager.start(); dataSourceManager.register( dataSource ); }
@Test public void shouldAlwaysShutdownLifeEvenWhenCheckPointingFails() throws Exception { // Given FileSystemAbstraction fs = this.fs.get(); PageCache pageCache = pageCacheRule.getPageCache( fs ); DatabaseHealth databaseHealth = mock( DatabaseHealth.class ); when( databaseHealth.isHealthy() ).thenReturn( true ); IOException ex = new IOException( "boom!" ); doThrow( ex ).when( databaseHealth ) .assertHealthy( IOException.class ); // <- this is a trick to simulate a failure during checkpointing Dependencies dependencies = new Dependencies(); dependencies.satisfyDependencies( databaseHealth ); NeoStoreDataSource dataSource = dsRule.getDataSource( dir.databaseLayout(), fs, pageCache, dependencies ); dataSource.start(); try { // When dataSource.stop(); fail( "it should have thrown" ); } catch ( LifecycleException e ) { // Then assertEquals( ex, e.getCause() ); } }
@Override protected QueryExecutionEngine createEngine( Dependencies deps, GraphDatabaseAPI graphAPI ) { GraphDatabaseCypherService queryService = new GraphDatabaseCypherService( graphAPI ); deps.satisfyDependency( queryService ); DependencyResolver resolver = graphAPI.getDependencyResolver(); LogService logService = resolver.resolveDependency( LogService.class ); Monitors monitors = resolver.resolveDependency( Monitors.class ); Config config = resolver.resolveDependency( Config.class ); CypherConfiguration cypherConfig = CypherConfiguration.fromConfig( config ); CypherPlannerConfiguration plannerConfig = cypherConfig.toCypherPlannerConfiguration( config ); CypherRuntimeConfiguration runtimeConfig = cypherConfig.toCypherRuntimeConfiguration(); LogProvider logProvider = logService.getInternalLogProvider(); CommunityCompilerFactory compilerFactory = new CommunityCompilerFactory( queryService, monitors, logProvider, plannerConfig, runtimeConfig ); deps.satisfyDependencies( compilerFactory ); return createEngine( queryService, config, logProvider, compilerFactory ); }
platform.dependencies.satisfyDependencies( securityProvider.authManager() ); platform.dependencies.satisfyDependencies( securityProvider.userManagerSupplier() );
dependencies.satisfyDependencies( dataSourceManager ); dependencies.satisfyDependency( logFiles ); dependencies.satisfyDependency( explicitIndexProviderLookup );
@Test public void logModuleSetUpError() { Config config = Config.defaults(); IdGeneratorFactory idGeneratorFactory = mock( IdGeneratorFactory.class ); Throwable openStoresError = new RuntimeException( "Can't set up modules" ); doThrow( openStoresError ).when( idGeneratorFactory ).create( any( File.class ), anyLong(), anyBoolean() ); CommunityIdTypeConfigurationProvider idTypeConfigurationProvider = new CommunityIdTypeConfigurationProvider(); AssertableLogProvider logProvider = new AssertableLogProvider(); SimpleLogService logService = new SimpleLogService( logProvider, logProvider ); PageCache pageCache = pageCacheRule.getPageCache( fs.get() ); Dependencies dependencies = new Dependencies(); dependencies.satisfyDependencies( idGeneratorFactory, idTypeConfigurationProvider, config, logService ); NeoStoreDataSource dataSource = dsRule.getDataSource( dir.databaseLayout(), fs.get(), pageCache, dependencies ); try { dataSource.start(); fail( "Exception expected" ); } catch ( Exception e ) { assertEquals( openStoresError, e ); } logProvider.assertAtLeastOnce( inLog( NeoStoreDataSource.class ).warn( equalTo( "Exception occurred while setting up store modules. Attempting to close things down." ), equalTo( openStoresError ) ) ); }
public void initialize( Input input ) throws IOException { log.info( "Import starting" ); startTime = currentTimeMillis(); this.input = input; PageCacheArrayFactoryMonitor numberArrayFactoryMonitor = new PageCacheArrayFactoryMonitor(); numberArrayFactory = auto( neoStore.getPageCache(), storeDir, config.allowCacheAllocationOnHeap(), numberArrayFactoryMonitor ); badCollector = input.badCollector(); // Some temporary caches and indexes in the import idMapper = input.idMapper( numberArrayFactory ); nodeRelationshipCache = new NodeRelationshipCache( numberArrayFactory, config.denseNodeThreshold() ); Estimates inputEstimates = input.calculateEstimates( neoStore.getPropertyStore().newValueEncodedSizeCalculator() ); // Sanity checking against estimates new EstimationSanityChecker( recordFormats, monitor ).sanityCheck( inputEstimates ); new HeapSizeSanityChecker( monitor ).sanityCheck( inputEstimates, recordFormats, neoStore, nodeRelationshipCache.memoryEstimation( inputEstimates.numberOfNodes() ), idMapper.memoryEstimation( inputEstimates.numberOfNodes() ) ); dependencies.satisfyDependencies( inputEstimates, idMapper, neoStore, nodeRelationshipCache, numberArrayFactoryMonitor ); if ( neoStore.determineDoubleRelationshipRecordUnits( inputEstimates ) ) { monitor.doubleRelationshipRecordUnitsEnabled(); } executionMonitor.initialize( dependencies ); }
Monitors monitors = new Monitors(); deps.satisfyDependencies( fileSystem, config, logService, storeIndexStoreView, pageCache, monitors, RecoveryCleanupWorkCollector.immediate() );
public void satisfyDependencies( Dependencies dependencies ) { dependencies.satisfyDependencies( transactionCommitProcess, kernel, kernelTransactions, fileListing ); } }
public void satisfyDependencies( Dependencies dependencies ) { dependencies.satisfyDependencies( checkPointer, logFiles, logFiles.getLogFileInformation(), explicitIndexTransactionOrdering, logicalTransactionStore, logRotation, appender ); } }
@Override protected PipelineWrapper createPipelineWrapper( Config config ) { SecureClusteringPipelineFactory factory = new SecureClusteringPipelineFactory(); Dependencies deps = new Dependencies(); deps.satisfyDependencies( SslPolicyLoader.create( config, logProvider ) ); return factory.forServer( config, deps, logProvider ); } }
platform.dependencies.satisfyDependencies( securityProvider.authManager() ); platform.dependencies.satisfyDependencies( securityProvider.userManagerSupplier() );
@Override protected QueryExecutionEngine createEngine( Dependencies deps, GraphDatabaseAPI graphAPI ) { GraphDatabaseCypherService queryService = new GraphDatabaseCypherService( graphAPI ); deps.satisfyDependency( queryService ); DependencyResolver resolver = graphAPI.getDependencyResolver(); LogService logService = resolver.resolveDependency( LogService.class ); Monitors monitors = resolver.resolveDependency( Monitors.class ); Config config = resolver.resolveDependency( Config.class ); CypherConfiguration cypherConfig = CypherConfiguration.fromConfig( config ); CypherPlannerConfiguration plannerConfig = cypherConfig.toCypherPlannerConfiguration( config ); CypherRuntimeConfiguration runtimeConfig = cypherConfig.toCypherRuntimeConfiguration(); LogProvider logProvider = logService.getInternalLogProvider(); CommunityCompilerFactory compilerFactory = new CommunityCompilerFactory( queryService, monitors, logProvider, plannerConfig, runtimeConfig ); deps.satisfyDependencies( compilerFactory ); return createEngine( queryService, config, logProvider, compilerFactory ); }
public void initialize( Input input ) throws IOException { log.info( "Import starting" ); startTime = currentTimeMillis(); this.input = input; PageCacheArrayFactoryMonitor numberArrayFactoryMonitor = new PageCacheArrayFactoryMonitor(); numberArrayFactory = auto( neoStore.getPageCache(), storeDir, config.allowCacheAllocationOnHeap(), numberArrayFactoryMonitor ); badCollector = input.badCollector(); // Some temporary caches and indexes in the import idMapper = input.idMapper( numberArrayFactory ); nodeRelationshipCache = new NodeRelationshipCache( numberArrayFactory, config.denseNodeThreshold() ); Estimates inputEstimates = input.calculateEstimates( neoStore.getPropertyStore().newValueEncodedSizeCalculator() ); // Sanity checking against estimates new EstimationSanityChecker( recordFormats, monitor ).sanityCheck( inputEstimates ); new HeapSizeSanityChecker( monitor ).sanityCheck( inputEstimates, recordFormats, neoStore, nodeRelationshipCache.memoryEstimation( inputEstimates.numberOfNodes() ), idMapper.memoryEstimation( inputEstimates.numberOfNodes() ) ); dependencies.satisfyDependencies( inputEstimates, idMapper, neoStore, nodeRelationshipCache, numberArrayFactoryMonitor ); if ( neoStore.determineDoubleRelationshipRecordUnits( inputEstimates ) ) { monitor.doubleRelationshipRecordUnitsEnabled(); } executionMonitor.initialize( dependencies ); }
Monitors monitors = new Monitors(); deps.satisfyDependencies( fileSystem, config, logService, storeIndexStoreView, pageCache, monitors, RecoveryCleanupWorkCollector.immediate() );
config.get( state_machine_apply_max_batch_size ), logProvider ); dependencies.satisfyDependencies( replicatedTxStateMachine );