@Test public void do_not_stop_run_if_success_rate_is_greater_than_circuit_breaker() { IntStream.range(0, 10).forEach(i -> insertItem(FOO_TYPE, "" + i)); advanceInTime(); // 10 docs to process, by groups of 5. // Each group successfully recovers 4 docs --> below 30% of failures --> continue run PartiallyFailingIndexer indexer = new PartiallyFailingIndexer(FOO_TYPE, 4, 4, 2); MapSettings settings = new MapSettings() .setProperty("sonar.search.recovery.loopLimit", "5"); underTest = newRecoveryIndexer(settings.asConfig(), indexer); underTest.recover(); assertThatLogsDoNotContain(ERROR, "too many failures"); assertThatQueueHasSize(0); assertThat(indexer.indexed).hasSize(10); assertThat(indexer.called).hasSize(10 + 2 /* retries */); }
@Test public void failing_always_on_same_document_does_not_generate_infinite_loop() { EsQueueDto buggy = insertItem(FOO_TYPE, "buggy"); IntStream.range(0, 10).forEach(i -> insertItem(FOO_TYPE, "" + i)); advanceInTime(); FailingAlwaysOnSameElementIndexer indexer = new FailingAlwaysOnSameElementIndexer(FOO_TYPE, buggy); underTest = newRecoveryIndexer(indexer); underTest.recover(); assertThatLogsContain(ERROR, "Elasticsearch recovery - too many failures [1/1 documents], waiting for next run"); assertThatQueueHasSize(1); }
@Test public void stop_run_if_too_many_failures() { IntStream.range(0, 10).forEach(i -> insertItem(FOO_TYPE, "" + i)); advanceInTime(); // 10 docs to process, by groups of 3. // The first group successfully recovers only 1 docs --> above 30% of failures --> stop run PartiallyFailingIndexer indexer = new PartiallyFailingIndexer(FOO_TYPE, 1); MapSettings settings = new MapSettings() .setProperty("sonar.search.recovery.loopLimit", "3"); underTest = newRecoveryIndexer(settings.asConfig(), indexer); underTest.recover(); assertThatLogsContain(ERROR, "Elasticsearch recovery - too many failures [2/3 documents], waiting for next run"); assertThatQueueHasSize(9); // The indexer must have been called once and only once. assertThat(indexer.called).hasSize(3); }
@Test public void hard_failures_are_logged_and_do_not_stop_recovery_scheduling() throws Exception { insertItem(FOO_TYPE, "f1"); HardFailingFakeIndexer indexer = new HardFailingFakeIndexer(FOO_TYPE); advanceInTime(); underTest = newRecoveryIndexer(indexer); underTest.start(); // all runs fail, but they are still scheduled // -> waiting for 2 runs while (indexer.called.size() < 2) { Thread.sleep(1L); } underTest.stop(); // No rows treated assertThatQueueHasSize(1); assertThatLogsContain(ERROR, "Elasticsearch recovery - fail to recover documents"); }
@Test public void soft_failures_are_logged_and_do_not_stop_recovery_scheduling() throws Exception { insertItem(FOO_TYPE, "f1"); SoftFailingFakeIndexer indexer = new SoftFailingFakeIndexer(FOO_TYPE); advanceInTime(); underTest = newRecoveryIndexer(indexer); underTest.start(); // all runs fail, but they are still scheduled // -> waiting for 2 runs while (indexer.called.size() < 2) { Thread.sleep(1L); } underTest.stop(); // No rows treated assertThatQueueHasSize(1); assertThatLogsContain(INFO, "Elasticsearch recovery - 1 documents processed [1 failures]"); }
@Test public void successfully_recover_indexing_requests() { IndexType type1 = new IndexType("foos", "foo"); EsQueueDto item1a = insertItem(type1, "f1"); EsQueueDto item1b = insertItem(type1, "f2"); IndexType type2 = new IndexType("bars", "bar"); EsQueueDto item2 = insertItem(type2, "b1"); SuccessfulFakeIndexer indexer1 = new SuccessfulFakeIndexer(type1); SuccessfulFakeIndexer indexer2 = new SuccessfulFakeIndexer(type2); advanceInTime(); underTest = newRecoveryIndexer(indexer1, indexer2); underTest.recover(); assertThatQueueHasSize(0); assertThatLogsContain(INFO, "Elasticsearch recovery - 3 documents processed [0 failures]"); assertThat(indexer1.called).hasSize(1); assertThat(indexer1.called.get(0)) .extracting(EsQueueDto::getUuid) .containsExactlyInAnyOrder(item1a.getUuid(), item1b.getUuid()); assertThatLogsContain(TRACE, "Elasticsearch recovery - processing 2 [foos/foo]"); assertThat(indexer2.called).hasSize(1); assertThat(indexer2.called.get(0)) .extracting(EsQueueDto::getUuid) .containsExactlyInAnyOrder(item2.getUuid()); assertThatLogsContain(TRACE, "Elasticsearch recovery - processing 1 [bars/bar]"); }
@Test public void recover_multiple_times_the_same_document() { EsQueueDto item1 = insertItem(FOO_TYPE, "f1"); EsQueueDto item2 = insertItem(FOO_TYPE, item1.getDocId()); EsQueueDto item3 = insertItem(FOO_TYPE, item1.getDocId()); advanceInTime(); SuccessfulFakeIndexer indexer = new SuccessfulFakeIndexer(FOO_TYPE); underTest = newRecoveryIndexer(indexer); underTest.recover(); assertThatQueueHasSize(0); assertThat(indexer.called).hasSize(1); assertThat(indexer.called.get(0)).extracting(EsQueueDto::getUuid) .containsExactlyInAnyOrder(item1.getUuid(), item2.getUuid(), item3.getUuid()); assertThatLogsContain(TRACE, "Elasticsearch recovery - processing 3 [foos/foo]"); assertThatLogsContain(INFO, "Elasticsearch recovery - 3 documents processed [0 failures]"); }
@Test public void unsupported_types_are_kept_in_queue_for_manual_fix_operation() { insertItem(FOO_TYPE, "f1"); ResilientIndexer indexer = new SuccessfulFakeIndexer(new IndexType("bars", "bar")); advanceInTime(); underTest = newRecoveryIndexer(indexer); underTest.recover(); assertThatQueueHasSize(1); assertThatLogsContain(ERROR, "Elasticsearch recovery - ignore 1 items with unsupported type [foos/foo]"); }