Refine search
@Test void shouldReturnSameLoggerForSameContext() { // Given FormattedLogProvider logProvider = FormattedLogProvider.toOutputStream( new ByteArrayOutputStream() ); // Then FormattedLog log = logProvider.getLog( "test context" ); assertThat( logProvider.getLog( "test context" ), sameInstance( log ) ); }
/** * Create page cache * @param fileSystem file system that page cache will be based on * @param pageCacheTracer global page cache tracer * @param pageCursorTracerSupplier supplier of thread local (transaction local) page cursor tracer that will provide * thread local page cache statistics * @param config page cache configuration * @param versionContextSupplier version context supplier * @param jobScheduler page cache job scheduler * @return created page cache instance */ public static PageCache createPageCache( FileSystemAbstraction fileSystem, PageCacheTracer pageCacheTracer, PageCursorTracerSupplier pageCursorTracerSupplier, Config config, VersionContextSupplier versionContextSupplier, JobScheduler jobScheduler ) { config.augmentDefaults( GraphDatabaseSettings.pagecache_memory, "8M" ); ZoneId logTimeZone = config.get( GraphDatabaseSettings.db_timezone ).getZoneId(); FormattedLogProvider logProvider = FormattedLogProvider.withZoneId( logTimeZone ).toOutputStream( System.err ); ConfiguringPageCacheFactory pageCacheFactory = new ConfiguringPageCacheFactory( fileSystem, config, pageCacheTracer, pageCursorTracerSupplier, logProvider.getLog( PageCache.class ), versionContextSupplier, jobScheduler ); return pageCacheFactory.getOrCreatePageCache(); } }
/** * Creates a {@link FormattedLogProvider} instance that writes messages to {@link PrintWriter}s obtained from the specified * {@link Supplier}. The PrintWriter is obtained from the Supplier before every log message is written. * * @param writerSupplier A supplier for a {@link PrintWriter} to write to * @return A {@link FormattedLogProvider} instance that writes to the specified PrintWriter */ public FormattedLogProvider toPrintWriter( Supplier<PrintWriter> writerSupplier ) { return new FormattedLogProvider( writerSupplier, zoneId, renderContext, levels, defaultLevel, autoFlush ); } }
public File dumpState( Locks lm, LockWorker... workers ) throws IOException { FileOutputStream out = new FileOutputStream( file, false ); FormattedLogProvider logProvider = FormattedLogProvider.withoutAutoFlush().toOutputStream( out ); try { // * locks held by the lock manager lm.accept( new DumpLocksVisitor( logProvider.getLog( LockWorkFailureDump.class ) ) ); // * rag manager state; // * workers state Log log = logProvider.getLog( getClass() ); for ( LockWorker worker : workers ) { // - what each is doing and have up to now log.info( "Worker %s", worker ); } return file; } finally { out.flush(); out.close(); } } }
checkGraph = config.get( ConsistencyCheckSettings.consistency_check_graph ); checkIndexes = config.get( ConsistencyCheckSettings.consistency_check_indexes ); checkLabelScanStore = config.get( ConsistencyCheckSettings.consistency_check_label_scan_store ); FormattedLogProvider.withZoneId( logTimeZone ).toOutputStream( System.out ), fileSystem, verbose, reportDir.toFile(), new ConsistencyFlags( checkGraph, checkIndexes, checkLabelScanStore, checkPropertyOwners ) );
private void assertDatabaseConsistent() { LogProvider logProvider = FormattedLogProvider.toOutputStream( System.out ); try { ConsistencyCheckService.Result result = new ConsistencyCheckService().runFullConsistencyCheck( testDirectory.databaseLayout(), Config.defaults(), ProgressMonitorFactory.textual( System.err ), logProvider, false ); assertTrue( result.isSuccessful() ); } catch ( ConsistencyCheckIncompleteException e ) { fail( e.getMessage() ); } } }
config.put( GraphDatabaseSettings.store_internal_log_path.name(), internalLogFile.getAbsolutePath() ); LogProvider userLogProvider = FormattedLogProvider.withZoneId( logZoneIdFrom( config ) ).toOutputStream( logOutputStream ); GraphDatabaseDependencies dependencies = GraphDatabaseDependencies.newDependencies() .userLogProvider( userLogProvider ); dependencies = dependencies.kernelExtensions( kernelExtensions ); Config dbConfig = Config.defaults( config ); GraphFactory graphFactory = createGraphFactory( dbConfig ); boolean httpAndHttpsDisabled = dbConfig.enabledHttpConnectors().isEmpty();
@Override public void start() { Map<String, String> opts = new HashMap<>(); // Neo4j 3.x opts.put(DBMS_CONNECTOR_HTTP_TYPE, HTTP_TYPE); opts.put(DBMS_CONNECTOR_HTTP_ENABLED, Boolean.TRUE.toString()); opts.put(DBMS_CONNECTOR_HTTP_LISTEN_ADDRESS, embeddedNeo4jConfiguration.getListenAddress() + ":" + embeddedNeo4jConfiguration.getHttpPort()); opts.put(DBMS_CONNECTOR_BOLT_ENABLED, Boolean.TRUE.toString()); opts.put(DBMS_CONNECTOR_BOLT_LISTEN_ADDRESS, embeddedNeo4jConfiguration.getListenAddress() + ":" + embeddedNeo4jConfiguration.getBoltPort()); Config defaults = Config.defaults(opts); FormattedLogProvider logProvider = FormattedLogProvider.withDefaultLogLevel(Level.INFO).toOutputStream(System.out); final GraphDatabaseDependencies graphDatabaseDependencies = GraphDatabaseDependencies.newDependencies().userLogProvider(logProvider); GraphFactory graphFactory = (config, dependencies) -> (GraphDatabaseFacade) graphDatabaseService; communityNeoServer = new CommunityNeoServer(defaults, graphFactory, graphDatabaseDependencies); communityNeoServer.start(); }
@BeforeClass public static void setupServer() throws IOException { out = new ByteArrayOutputStream(); server = ServerHelper.createNonPersistentServer( FormattedLogProvider.toOutputStream( out ) ); }
private PageCache createPageCache( FileSystemAbstraction fs, PageCacheConfig pageCacheConfig, Config config ) { PageCacheTracer tracer = selectConfig( baseConfig.tracer, pageCacheConfig.tracer, PageCacheTracer.NULL ); PageCursorTracerSupplier cursorTracerSupplier = selectConfig( baseConfig.pageCursorTracerSupplier, pageCacheConfig.pageCursorTracerSupplier, PageCursorTracerSupplier.NULL ); config.augmentDefaults( GraphDatabaseSettings.pagecache_memory, "8M" ); FormattedLogProvider logProvider = FormattedLogProvider.toOutputStream( System.err ); initializeJobScheduler(); ConfiguringPageCacheFactory pageCacheFactory = new ConfiguringPageCacheFactory( fs, config, tracer, cursorTracerSupplier, logProvider.getLog( PageCache.class ), EmptyVersionContextSupplier.EMPTY, jobScheduler ); return pageCacheFactory.getOrCreatePageCache(); } }
@Override public void outputFileCreated( OutputStream newStream ) { FormattedLogProvider logProvider = internalLogBuilder.toOutputStream( newStream ); logProvider.getLog( StoreLogService.class ).info( "Opened new internal log file" ); rotationListener.accept( logProvider ); }
public DirectStoreAccess directStoreAccess() { if ( directStoreAccess == null ) { DefaultFileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction(); PageCache pageCache = getPageCache( fileSystem ); StoreAccess nativeStores = new StoreAccess( fileSystem, pageCache, directory ).initialize(); Config config = new Config(); OperationalMode operationalMode = OperationalMode.single; directStoreAccess = new DirectStoreAccess( nativeStores, new LuceneLabelScanStoreBuilder( directory, nativeStores.getRawNeoStores(), fileSystem, config, operationalMode, FormattedLogProvider.toOutputStream( System.out ) ).build(), createIndexes( fileSystem, config, operationalMode ) ); } return directStoreAccess; }
public AsyncRequestLog( FileSystemAbstraction fs, ZoneId logTimeZone, String logFile, long rotationSize, int rotationKeepNumber ) throws IOException { NamedThreadFactory threadFactory = new NamedThreadFactory( "HTTP-Log-Rotator", true ); ExecutorService rotationExecutor = Executors.newCachedThreadPool( threadFactory ); outputSupplier = new RotatingFileOutputStreamSupplier( fs, new File( logFile ), rotationSize, 0, rotationKeepNumber, rotationExecutor ); FormattedLogProvider logProvider = FormattedLogProvider.withZoneId( logTimeZone ) .toOutputStream( outputSupplier ); asyncLogProcessingExecutor = Executors.newSingleThreadExecutor( new NamedThreadFactory( "HTTP-Log-Writer" ) ); asyncEventProcessor = new AsyncEvents<>( this, this ); AsyncLogProvider asyncLogProvider = new AsyncLogProvider( asyncEventProcessor, logProvider ); log = asyncLogProvider.getLog( "REQUEST" ); }
@Override public void start() { tempDirectory = createTempDirectory(); Map<String, String> opts = new HashMap<>(); // Neo4j 2.x opts.put(DBMS_SECURITY_AUTH_ENABLED, Boolean.FALSE.toString()); opts.put(ORG_NEO_4J_SERVER_WEBSERVER_ADDRESS, embeddedNeo4jConfiguration.getListenAddress()); opts.put(ORG_NEO_4J_SERVER_WEBSERVER_PORT, Integer.toString(embeddedNeo4jConfiguration.getHttpPort())); // Neo4j 2.x/3.x String sslDir = tempDirectory.toFile().getAbsolutePath() + "neo4j-home/"; opts.put(ServerSettings.tls_key_file.name(), sslDir + "/ssl/snakeoil.key"); opts.put(ServerSettings.tls_certificate_file.name(), sslDir + "/ssl/snakeoil.cert"); Config defaults = new Config(opts); // Config.empty().with(opts); FormattedLogProvider logProvider = FormattedLogProvider.withDefaultLogLevel(Level.INFO).toOutputStream(System.out); GraphDatabaseDependencies graphDatabaseDependencies = GraphDatabaseDependencies.newDependencies().userLogProvider(logProvider); Database.Factory factory = new Database.Factory() { @Override public Database newDatabase(Config config, GraphDatabaseFacadeFactory.Dependencies dependencies) { return new WrappedDatabase((GraphDatabaseAPI) graphDatabaseService); } }; communityNeoServer = new CommunityNeoServer(defaults, factory, graphDatabaseDependencies, logProvider); communityNeoServer.start(); }
@Override @Nonnull public AdminCommand create( Path homeDir, Path configDir, OutsideWorld outsideWorld ) { boolean debug = System.getenv().get( "NEO4J_DEBUG") != null; LogProvider logProvider = FormattedLogProvider.withDefaultLogLevel( debug ? Level.DEBUG : Level.NONE ).toOutputStream( outsideWorld.outStream() ); Monitors monitors = new Monitors(); OnlineBackupContextBuilder contextBuilder = new OnlineBackupContextBuilder( homeDir, configDir ); BackupModule backupModule = new BackupModule( outsideWorld, logProvider, monitors ); BackupSupportingClassesFactoryProvider classesFactoryProvider = getProvidersByPriority().findFirst().orElseThrow( noProviderException() ); BackupSupportingClassesFactory supportingClassesFactory = classesFactoryProvider.getFactory( backupModule ); BackupStrategyCoordinatorFactory coordinatorFactory = new BackupStrategyCoordinatorFactory( backupModule ); return new OnlineBackupCommand( outsideWorld, contextBuilder, supportingClassesFactory, coordinatorFactory ); }
ConsistencyCheckService.Result run( String... args ) throws ToolFailureException { Args arguments = Args.withFlags( VERBOSE ).parse( args ); File storeDir = determineStoreDirectory( arguments ); Config tuningConfiguration = readConfiguration( arguments ); boolean verbose = isVerbose( arguments ); DatabaseLayout databaseLayout = DatabaseLayout.of( storeDir ); checkDbState( databaseLayout, tuningConfiguration ); ZoneId logTimeZone = tuningConfiguration.get( GraphDatabaseSettings.db_timezone ).getZoneId(); LogProvider logProvider = FormattedLogProvider.withZoneId( logTimeZone ).toOutputStream( systemOut ); try { return consistencyCheckService.runFullConsistencyCheck( databaseLayout, tuningConfiguration, ProgressMonitorFactory.textual( systemError ), logProvider, fs, verbose, new ConsistencyFlags( tuningConfiguration ) ); } catch ( ConsistencyCheckIncompleteException e ) { throw new ToolFailureException( "Check aborted due to exception", e ); } }
/** * The test case is basically loads of concurrent CREATE/DELETE NODE or sometimes just CREATE, keeping the created node in an array * for dedicated deleter threads to pick up and delete as fast as they can see them. This concurrently with large creation transactions. */ @Test public void shouldStressIt() throws Throwable { // given Race race = new Race().withMaxDuration( 5, TimeUnit.SECONDS ); AtomicReferenceArray<Node> nodeHeads = new AtomicReferenceArray<>( NUMBER_OF_CREATORS ); for ( int i = 0; i < NUMBER_OF_CREATORS; i++ ) { race.addContestant( creator( nodeHeads, i ) ); } race.addContestants( NUMBER_OF_DELETORS, deleter( nodeHeads ) ); // when race.go(); // then DatabaseLayout dbLayout = db.databaseLayout(); db.shutdownAndKeepStore(); assertTrue( new ConsistencyCheckService().runFullConsistencyCheck( dbLayout, defaults(), NONE, toOutputStream( System.out ), false, new ConsistencyFlags( true, true, true, false ) ).isSuccessful() ); }
config.put( GraphDatabaseSettings.store_internal_log_path.name(), internalLogFile.getAbsolutePath() ); LogProvider userLogProvider = FormattedLogProvider.withZoneId( logZoneIdFrom( config ) ).toOutputStream( logOutputStream ); GraphDatabaseDependencies dependencies = GraphDatabaseDependencies.newDependencies() .userLogProvider( userLogProvider ); dependencies = dependencies.kernelExtensions( kernelExtensions ); Config dbConfig = Config.defaults( config ); GraphFactory graphFactory = createGraphFactory( dbConfig ); boolean httpAndHttpsDisabled = dbConfig.enabledHttpConnectors().isEmpty();
public static void main( String[] args ) throws Exception { Args arg = Args.withFlags( HEAP ).parse( args == null ? new String[0] : args ); boolean doHeapDump = arg.getBoolean( HEAP, false, true ); String[] containing = arg.orphans().toArray( new String[arg.orphans().size()] ); String dumpDir = arg.get( DIR, "data" ); new DumpProcessInformation( FormattedLogProvider.toOutputStream( System.out ), new File( dumpDir ) ).dumpRunningProcesses( doHeapDump, containing ); }
@Override public void rotationError( Exception e, OutputStream outStream ) { FormattedLogProvider logProvider = internalLogBuilder.toOutputStream( outStream ); logProvider.getLog( StoreLogService.class ).info( "Rotation of internal log file failed:", e ); } } );