@Test public void testLogs2() { LogCustomizer custom = LogCustomizer .forLogger( "org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .enable(Level.DEBUG).filter(Level.INFO).create(); try { custom.starting(); LOG.debug("test message"); List<String> logs = custom.getLogs(); assertTrue(logs.isEmpty()); } finally { custom.finished(); } }
@Test public void multipleQueryRuns() { final int executions = 16; final int trackEvery = 5; final int numTraces = executions / trackEvery; OrderedPropertyIndexProvider.setThreshold(trackEvery); List<String> expectedLogs = Collections.nCopies(numTraces, OrderedIndex.DEPRECATION_MESSAGE); custom.starting(); for (int i = 0; i < executions; i++) { executeQuery("SELECT * FROM [oak:Unstructured]", SQL2); } assertThat(custom.getLogs(), is(expectedLogs)); custom.finished(); } }
@Test public void checkMark() throws Exception { String rootFolder = folder.newFolder().getAbsolutePath(); LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); DataStoreState state = setUp(true, 10); log.info("{} blobs available : {}", state.blobsPresent.size(), state.blobsPresent); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferences(state.blobsPresent, rootFolder); }
@Test public void testUpgradeCompromisedSerializedMap() throws IOException { // Close the init setup closer.close(); // Create pre-upgrade load File home = folder.newFolder(); File pendingUploadsFile = new File(home, DataStoreCacheUpgradeUtils.UPLOAD_MAP); createGibberishLoad(home, pendingUploadsFile); LogCustomizer lc = LogCustomizer.forLogger(DataStoreCacheUpgradeUtils.class.getName()) .filter(Level.WARN) .enable(Level.WARN) .create(); lc.starting(); // Start init(2, new TestStagingUploader(folder.newFolder()), home); assertThat(lc.getLogs().toString(), containsString("Error in reading pending uploads map")); }
private List<String> getDeleteMessages() { List<String> messages = Lists.newArrayList(); for (String msg : logCustomizer.getLogs()) { if (msg.startsWith("Proceeding to delete [")) { messages.add(msg); } } return messages; }
@Before public void before() throws Exception { clock = new Clock.Virtual(); clock.waitUntil(System.currentTimeMillis()); Revision.setClock(clock); ns = new DocumentMK.Builder().setAsyncDelay(0).clock(clock).getNodeStore(); logCustomizer.starting(); }
@After public void after() { logCustomizer.finished(); }
public static LogCustomizerBuilder forRootLogger() { return forLogger(ROOT_LOGGER_NAME); }
@Test public void checkMark() throws Exception { LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); DataStoreState state = setUp(10); log.info("{} blobs available : {}", state.blobsPresent.size(), state.blobsPresent); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); String rootFolder = folder.newFolder().getAbsolutePath(); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferenceRecords(state.blobsPresent, rootFolder); }
@Test public void noRunWhenClosed() throws Exception{ NodeStore store = new MemoryNodeStore(); IndexEditorProvider provider = new PropertyIndexEditorProvider(); AsyncIndexUpdate async = new AsyncIndexUpdate("async", store, provider); async.run(); async.close(); LogCustomizer lc = createLogCustomizer(Level.WARN); async.run(); assertEquals(1, lc.getLogs().size()); assertThat(lc.getLogs().get(0), containsString("Could not acquire run permit")); lc.finished(); async.close(); }
private Iterable<Integer> getUpdates() { Pattern p = Pattern.compile(REGEX); List<Integer> updates = new ArrayList<>(); for (String msg : logCustomizer.getLogs()) { Matcher m = p.matcher(msg); if (m.find()) { updates.add(Integer.parseInt(m.group(1))); } } return updates; }
logCustomizer.starting(); final AtomicBoolean running = new AtomicBoolean(true); Thread bgThread = new Thread(new Runnable() {
@After public void after() { logCustomizer.finished(); }
@Test public void testExactMatch() { LogCustomizer custom = LogCustomizer .forLogger("org.apache.jackrabbit.oak.commons.junit.LogCustomizerTest") .exactlyMatches("Test Message") .create(); try { custom.starting(); LOG.info("test message"); LOG.info("test message 1"); LOG.info("1 test message"); List<String> logs = custom.getLogs(); assertTrue(logs.isEmpty()); LOG.info("Test Message"); assertEquals(1, logs.size()); } finally { custom.finished(); } }
Set<String> cvLogs; try { logMergingNodeStateDiff.starting(); logConflictValidator.starting(); deleteChangedNodeOps("node1"); } finally { mnsdLogs = Sets.newHashSet(logMergingNodeStateDiff.getLogs()); cvLogs = Sets.newHashSet(logConflictValidator.getLogs()); logMergingNodeStateDiff.finished(); logConflictValidator.finished();
@Test public void checkGcPathLogging() throws Exception { String rootFolder = folder.newFolder().getAbsolutePath(); LogCustomizer customLogs = LogCustomizer .forLogger(MarkSweepGarbageCollector.class.getName()) .enable(Level.TRACE) .filter(Level.TRACE) .create(); setUp(false); customLogs.starting(); ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(10); MarkSweepGarbageCollector gcObj = init(0, executor, rootFolder); gcObj.collectGarbage(true); customLogs.finished(); assertBlobReferenceRecords(1, rootFolder); }