private static LogCustomizer createLogCustomizer(Level level){ LogCustomizer lc = LogCustomizer.forLogger(AsyncIndexUpdate.class.getName()) .filter(level) .enable(level) .create(); lc.starting(); return lc; }
@Test public void multipleQueryRuns() { final int executions = 16; final int trackEvery = 5; final int numTraces = executions / trackEvery; OrderedPropertyIndexProvider.setThreshold(trackEvery); List<String> expectedLogs = Collections.nCopies(numTraces, OrderedIndex.DEPRECATION_MESSAGE); custom.starting(); for (int i = 0; i < executions; i++) { executeQuery("SELECT * FROM [oak:Unstructured]", SQL2); } assertThat(custom.getLogs(), is(expectedLogs)); custom.finished(); } }
@Test public void testExactMatch() { LogCustomizer custom = LogCustomizer .forLogger("org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .exactlyMatches("Test Message") .create(); try { custom.starting(); LOG.info("test message"); LOG.info("test message 1"); LOG.info("1 test message"); List<String> logs = custom.getLogs(); assertTrue(logs.isEmpty()); LOG.info("Test Message"); assertEquals(1, logs.size()); } finally { custom.finished(); } }
@Test public void singleQueryRun() { custom.starting(); executeQuery("SELECT * FROM [oak:Unstructured]", SQL2); List<String> logs = custom.getLogs(); assertEquals(1, logs.size()); assertThat(logs, hasItem(OrderedIndex.DEPRECATION_MESSAGE)); custom.finished(); }
public static void testIncorrectParams(List<String> argList, ArrayList<String> assertMsg, Class logger) { LogCustomizer customLogs = LogCustomizer .forLogger(logger.getName()) .enable(Level.INFO) .filter(Level.INFO) .contains(assertMsg.get(0)) .create(); customLogs.starting(); DataStoreCommand cmd = new DataStoreCommand(); try { cmd.execute(argList.toArray(new String[0])); } catch (Exception e) { log.error("", e); } Assert.assertNotNull(customLogs.getLogs().get(0)); customLogs.finished(); }
@Test public void checkMark() throws Exception { String rootFolder = folder.newFolder().getAbsolutePath(); LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); DataStoreState state = setUp(true, 10); log.info("{} blobs available : {}", state.blobsPresent.size(), state.blobsPresent); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferences(state.blobsPresent, rootFolder); }
@Test public void checkMark() throws Exception { LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); DataStoreState state = setUp(10); log.info("{} blobs available : {}", state.blobsPresent.size(), state.blobsPresent); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); String rootFolder = folder.newFolder().getAbsolutePath(); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferenceRecords(state.blobsPresent, rootFolder); }
@Before public void prepare() throws Exception { // Capture logs customLogs = LogCustomizer .forLogger(UploadStagingCache.class.getName()) .enable(Level.INFO) .filter(Level.INFO) .contains("Uploads in progress on close [0]") .create(); customLogs.starting(); super.prepare(); }
@Test public void testUpgradeCompromisedSerializedMap() throws IOException { // Close the init setup closer.close(); // Create pre-upgrade load File home = folder.newFolder(); File pendingUploadsFile = new File(home, DataStoreCacheUpgradeUtils.UPLOAD_MAP); createGibberishLoad(home, pendingUploadsFile); LogCustomizer lc = LogCustomizer.forLogger(DataStoreCacheUpgradeUtils.class.getName()) .filter(Level.WARN) .enable(Level.WARN) .create(); lc.starting(); // Start init(2, new TestStagingUploader(folder.newFolder()), home); assertThat(lc.getLogs().toString(), containsString("Error in reading pending uploads map")); }
@Test public void testLogs1() { LogCustomizer custom = LogCustomizer .forLogger( "org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .enable(Level.DEBUG).create(); try { custom.starting(); LOG.debug("test message"); List<String> logs = custom.getLogs(); assertTrue(logs.size() == 1); assertThat("logs were recorded by custom logger", logs.toString(), containsString("test message")); } finally { custom.finished(); } }
@Test public void testLogs2() { LogCustomizer custom = LogCustomizer .forLogger( "org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .enable(Level.DEBUG).filter(Level.INFO).create(); try { custom.starting(); LOG.debug("test message"); List<String> logs = custom.getLogs(); assertTrue(logs.isEmpty()); } finally { custom.finished(); } }
@Before public void before() throws Exception { clock = new Clock.Virtual(); clock.waitUntil(System.currentTimeMillis()); Revision.setClock(clock); ns = new DocumentMK.Builder().setAsyncDelay(0).clock(clock).getNodeStore(); logCustomizer.starting(); }
@Test public void checkConsistencyPathLogging() throws Exception { String rootFolder = folder.newFolder().getAbsolutePath(); LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); setUp(false); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); MarkSweepGarbageCollector gcObj = init(86400, executor, rootFolder); gcObj.checkConsistency(); customLogs.finished(); assertBlobReferenceRecords(2, rootFolder); }
@Test public void checkGcPathLogging() throws Exception { String rootFolder = folder.newFolder().getAbsolutePath(); LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); setUp(false); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferenceRecords(1, rootFolder); }
@Test public void warnOnQueueFull() throws RepositoryException, InterruptedException, ExecutionException { LogCustomizer customLogs = LogCustomizer.forLogger(ChangeProcessor.class.getName()) .filter(Level.WARN) .contains(OBS_QUEUE_FULL_WARN) .create(); observationManager.addEventListener(listener, NODE_ADDED, TEST_PATH, true, null, null, false); try { customLogs.starting(); addNodeToFillObsQueue(); assertTrue("Observation queue full warning must get logged", customLogs.getLogs().size() > 0); customLogs.finished(); } finally { observationManager.removeEventListener(listener); } }
@Test public void logWarnWhenSeekingBackAfterRead() throws Exception { byte[] fileBytes = writeFile(); LogCustomizer logRecorder = LogCustomizer .forLogger(OakStreamingIndexFile.class.getName()).enable(Level.WARN) .contains("Seeking back on streaming index file").create(); NodeBuilder fooBuilder = builder.child("foo"); try (OakStreamingIndexFile readFile = new OakStreamingIndexFile("foo", fooBuilder, "dirDetails", modeDependantBlobFactory.getNodeBuilderBlobFactory(fooBuilder)) ) { logRecorder.starting(); byte[] readBytes = new byte[fileBytes.length]; readFile.readBytes(readBytes, 0, 10); assertEquals("Don't log for simple reads", 0, logRecorder.getLogs().size()); readFile.seek(12); assertEquals("Don't log for forward seeks", 0, logRecorder.getLogs().size()); readFile.seek(2); assertEquals("Log warning for backward seeks", 1, logRecorder.getLogs().size()); } logRecorder.finished(); }
@Test public void withIndexMultipleNodes() throws RepositoryException, CommitFailedException { final int threshold = 5; final int nodes = 16; final int traces = 1 + (nodes - 1) / threshold; OrderedPropertyIndexEditorProvider.setThreshold(threshold); final List<String> expected = Collections.nCopies(traces, DEPRECATION_MESSAGE); NodeBuilder root = EMPTY_NODE.builder(); createIndexDef(root); custom.starting(); for (int i = 0; i < nodes; i++) { NodeState before = root.getNodeState(); root.child("n" + i).setProperty(indexedProperty, "dead" + i); NodeState after = root.getNodeState(); root = hook.processCommit(before, after, CommitInfo.EMPTY).builder(); } assertThat(custom.getLogs(), is(expected)); custom.finished(); assertFalse(root.getChildNode(INDEX_DEFINITIONS_NAME).getChildNode(indexName) .getChildNode(INDEX_CONTENT_NODE_NAME).exists()); }
@Test public void withIndexDefSingleNode() throws RepositoryException, CommitFailedException { NodeBuilder root = EMPTY_NODE.builder(); createIndexDef(root); NodeState before = root.getNodeState(); root.child("n1").setProperty(indexedProperty, "dead"); NodeState after = root.getNodeState(); custom.starting(); root = hook.processCommit(before, after, CommitInfo.EMPTY).builder(); assertEquals(1, custom.getLogs().size()); assertThat(custom.getLogs(), hasItem(DEPRECATION_MESSAGE)); custom.finished(); NodeBuilder b = root.getChildNode(IndexConstants.INDEX_DEFINITIONS_NODE_TYPE) .getChildNode(indexName).getChildNode(IndexConstants.INDEX_CONTENT_NODE_NAME); assertFalse("nothing should have been touched under the actual index", b.exists()); }
@Test public void init12() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 2").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T12").initialSchema(1).upgradeToSchema(2).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasSplitDocs()); int statementsPerTable = 5; assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), statementsPerTable * RDBDocumentStore.getTableNames().size(), logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }
@Test public void init11() { LogCustomizer logCustomizer = LogCustomizer.forLogger(RDBDocumentStore.class.getName()).enable(Level.INFO) .contains("to DB level 1").create(); logCustomizer.starting(); RDBOptions op = new RDBOptions().tablePrefix("T11").initialSchema(1).upgradeToSchema(1).dropTablesOnClose(true); RDBDocumentStore rdb = null; try { rdb = new RDBDocumentStore(this.ds, new DocumentMK.Builder(), op); RDBTableMetaData meta = rdb.getTable(Collection.NODES); assertEquals(op.getTablePrefix() + "_NODES", meta.getName()); assertTrue(meta.hasVersion()); assertEquals("unexpected # of log entries: " + logCustomizer.getLogs(), 0, logCustomizer.getLogs().size()); } finally { logCustomizer.finished(); if (rdb != null) { rdb.dispose(); } } }