@Test public void onFinish_must_not_throw_any_exception_if_no_failure() { IndexingResult indexingResult = new IndexingResult(); indexingResult.incrementRequests(); indexingResult.incrementSuccess(); IndexingListener.FAIL_ON_ERROR.onFinish(indexingResult); } }
@Test public void test_success() { underTest.incrementRequests(); underTest.incrementRequests(); underTest.incrementSuccess(); underTest.incrementSuccess(); assertThat(underTest.getFailures()).isEqualTo(0); assertThat(underTest.getSuccess()).isEqualTo(2); assertThat(underTest.getTotal()).isEqualTo(2); assertThat(underTest.getSuccessRatio()).isEqualTo(1.0, DOUBLE_OFFSET); assertThat(underTest.isSuccess()).isTrue(); }
@VisibleForTesting void recover() { try (DbSession dbSession = dbClient.openSession(false)) { Profiler profiler = Profiler.create(LOGGER).start(); long beforeDate = system2.now() - minAgeInMs; IndexingResult result = new IndexingResult(); Collection<EsQueueDto> items = dbClient.esQueueDao().selectForRecovery(dbSession, beforeDate, loopLimit); while (!items.isEmpty()) { IndexingResult loopResult = new IndexingResult(); groupItemsByType(items).asMap().forEach((type, typeItems) -> loopResult.add(doIndex(dbSession, type, typeItems))); result.add(loopResult); if (loopResult.getSuccessRatio() <= CIRCUIT_BREAKER_IN_PERCENT) { LOGGER.error(LOG_PREFIX + "too many failures [{}/{} documents], waiting for next run", loopResult.getFailures(), loopResult.getTotal()); break; } if (loopResult.getTotal() == 0L) { break; } items = dbClient.esQueueDao().selectForRecovery(dbSession, beforeDate, loopLimit); } if (result.getTotal() > 0L) { profiler.stopInfo(LOG_PREFIX + format("%d documents processed [%d failures]", result.getTotal(), result.getFailures())); } } catch (Throwable t) { LOGGER.error(LOG_PREFIX + "fail to recover documents", t); } }
@Override public void onFinish(IndexingResult result) { if (result.getFailures() > 0) { throw new IllegalStateException(String.format("Unrecoverable indexation failures: %d errors among %d requests", result.getFailures(), result.getTotal())); } } };
@Override public IndexingResult index(DbSession dbSession, Collection<EsQueueDto> items) { IndexingResult result = new IndexingResult(); if (!items.isEmpty()) { ListMultimap<IndexType, EsQueueDto> itemsByType = groupItemsByType(items); result.add(doIndexRules(dbSession, itemsByType.get(INDEX_TYPE_RULE))); result.add(doIndexRuleExtensions(dbSession, itemsByType.get(INDEX_TYPE_RULE_EXTENSION))); } return result; }
@Override public IndexingResult index(DbSession dbSession, Collection<EsQueueDto> items) { called.add(items); IndexingResult result = new IndexingResult(); items.forEach(i -> result.incrementRequests()); return result; } }
@Override public IndexingResult index(DbSession dbSession, Collection<EsQueueDto> items) { this.calledItems = items; return new IndexingResult(); }
@Test public void update_index_when_project_key_is_updated() { ComponentDto project = db.components().insertPrivateProject(); IndexingResult result = indexProject(project, PROJECT_KEY_UPDATE); assertThatIndexContainsOnly(project); assertThat(result.getTotal()).isEqualTo(1L); assertThat(result.getSuccess()).isEqualTo(1L); }
private IndexingResult doIndexRuleProfiles(DbSession dbSession, Map<String, EsQueueDto> ruleProfileItems) { IndexingResult result = new IndexingResult(); for (Map.Entry<String, EsQueueDto> entry : ruleProfileItems.entrySet()) { String ruleProfileUUid = entry.getKey(); EsQueueDto item = entry.getValue(); IndexingResult profileResult; RulesProfileDto profile = dbClient.qualityProfileDao().selectRuleProfile(dbSession, ruleProfileUUid); if (profile == null) { // profile does not exist anymore in db --> related documents must be deleted from index rules/activeRule SearchRequestBuilder search = esClient.prepareSearch(INDEX_TYPE_ACTIVE_RULE) .setQuery(QueryBuilders.boolQuery().must(termQuery(FIELD_ACTIVE_RULE_PROFILE_UUID, ruleProfileUUid))); profileResult = BulkIndexer.delete(esClient, INDEX_TYPE_ACTIVE_RULE, search); } else { BulkIndexer bulkIndexer = createBulkIndexer(Size.REGULAR, IndexingListener.FAIL_ON_ERROR); bulkIndexer.start(); dbClient.activeRuleDao().scrollByRuleProfileForIndexing(dbSession, ruleProfileUUid, i -> bulkIndexer.add(newIndexRequest(i))); profileResult = bulkIndexer.stop(); } if (profileResult.isSuccess()) { deleteQueueDto(dbSession, item); } result.add(profileResult); } return result; }
@Test public void indexing_errors_during_commitAndIndexIssues_are_recovered() { RuleDefinitionDto rule = db.rules().insert(); ComponentDto project = db.components().insertPrivateProject(organization); ComponentDto file = db.components().insertComponent(newFileDto(project)); // insert issues in db without committing IssueDto issue1 = IssueTesting.newIssue(rule, project, file); IssueDto issue2 = IssueTesting.newIssue(rule, project, file); db.getDbClient().issueDao().insert(db.getSession(), issue1, issue2); // index is read-only es.lockWrites(INDEX_TYPE_ISSUE); underTest.commitAndIndexIssues(db.getSession(), asList(issue1, issue2)); // issues are persisted but not indexed assertThatIndexHasSize(0); assertThatDbHasOnly(issue1, issue2); assertThatEsQueueTableHasSize(2); // re-enable write on index es.unlockWrites(INDEX_TYPE_ISSUE); // emulate the recovery daemon IndexingResult result = recover(); assertThatEsQueueTableHasSize(0); assertThatIndexHasOnly(issue1, issue2); assertThat(result.isSuccess()).isTrue(); assertThat(result.getTotal()).isEqualTo(2L); }
public void add(DeleteRequest request) { result.incrementRequests(); bulkProcessor.add(request); }
@Test public void index_is_not_updated_when_creating_project() { // it's impossible to already have an issue on a project // that is being created, but it's just to verify that // indexing is disabled IssueDto issue = db.issues().insertIssue(organization); IndexingResult result = indexProject(issue.getProjectUuid(), ProjectIndexer.Cause.PROJECT_CREATION); assertThat(result.getTotal()).isEqualTo(0L); assertThatIndexHasSize(0); }
@Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { stopProfiler(request); List<DocId> successDocIds = new ArrayList<>(); for (BulkItemResponse item : response.getItems()) { if (item.isFailed()) { LOGGER.error("index [{}], type [{}], id [{}], message [{}]", item.getIndex(), item.getType(), item.getId(), item.getFailureMessage()); } else { result.incrementSuccess(); successDocIds.add(new DocId(item.getIndex(), item.getType(), item.getId())); } } indexingListener.onSuccess(successDocIds); }
@Override public void onFinish(IndexingResult result) { if (result.isSuccess()) { dbClient.esQueueDao().delete(dbSession, items); dbSession.commit(); } } }
public void start() { result.clear(); sizeHandler.beforeStart(this); }
@Test public void errors_during_project_deletion_are_recovered() { addIssueToIndex("P1", "I1"); assertThatIndexHasSize(1); es.lockWrites(INDEX_TYPE_ISSUE); IndexingResult result = indexProject("P1", ProjectIndexer.Cause.PROJECT_DELETION); assertThat(result.getTotal()).isEqualTo(1L); assertThat(result.getFailures()).isEqualTo(1L); // index is still read-only, fail to recover result = recover(); assertThat(result.getTotal()).isEqualTo(1L); assertThat(result.getFailures()).isEqualTo(1L); assertThatIndexHasSize(1); es.unlockWrites(INDEX_TYPE_ISSUE); result = recover(); assertThat(result.getTotal()).isEqualTo(1L); assertThat(result.getFailures()).isEqualTo(0L); assertThatIndexHasSize(0); }
@Override public IndexingResult index(DbSession dbSession, Collection<EsQueueDto> items) { ListMultimap<String, EsQueueDto> itemsByIssueKey = ArrayListMultimap.create(); ListMultimap<String, EsQueueDto> itemsByProjectKey = ArrayListMultimap.create(); items.forEach(i -> { if (ID_TYPE_ISSUE_KEY.equals(i.getDocIdType())) { itemsByIssueKey.put(i.getDocId(), i); } else if (ID_TYPE_PROJECT_UUID.equals(i.getDocIdType())) { itemsByProjectKey.put(i.getDocId(), i); } else { LOGGER.error("Unsupported es_queue.doc_id_type for issues. Manual fix is required: " + i); } }); IndexingResult result = new IndexingResult(); result.add(doIndexIssueItems(dbSession, itemsByIssueKey)); result.add(doIndexProjectItems(dbSession, itemsByProjectKey)); return result; }
private IndexingResult doIndex(DbSession dbSession, IndexType type, Collection<EsQueueDto> typeItems) { LOGGER.trace(LOG_PREFIX + "processing {} {}", typeItems.size(), type); ResilientIndexer indexer = indexersByType.get(type); if (indexer == null) { LOGGER.error(LOG_PREFIX + "ignore {} items with unsupported type {}", typeItems.size(), type); return new IndexingResult(); } return indexer.index(dbSession, typeItems); }
@Test public void index_is_updated_when_deleting_project() { addIssueToIndex("P1", "I1"); assertThatIndexHasSize(1); IndexingResult result = indexProject("P1", ProjectIndexer.Cause.PROJECT_DELETION); assertThat(result.getTotal()).isEqualTo(1L); assertThat(result.getSuccess()).isEqualTo(1L); assertThatIndexHasSize(0); }
private IndexingResult doIndexRuleProfiles(DbSession dbSession, Map<String, EsQueueDto> ruleProfileItems) { IndexingResult result = new IndexingResult(); for (Map.Entry<String, EsQueueDto> entry : ruleProfileItems.entrySet()) { String ruleProfileUUid = entry.getKey(); EsQueueDto item = entry.getValue(); IndexingResult profileResult; RulesProfileDto profile = dbClient.qualityProfileDao().selectRuleProfile(dbSession, ruleProfileUUid); if (profile == null) { // profile does not exist anymore in db --> related documents must be deleted from index rules/activeRule SearchRequestBuilder search = esClient.prepareSearch(INDEX_TYPE_ACTIVE_RULE) .setQuery(QueryBuilders.boolQuery().must(termQuery(FIELD_ACTIVE_RULE_PROFILE_UUID, ruleProfileUUid))); profileResult = BulkIndexer.delete(esClient, INDEX_TYPE_ACTIVE_RULE, search); } else { BulkIndexer bulkIndexer = createBulkIndexer(Size.REGULAR, IndexingListener.FAIL_ON_ERROR); bulkIndexer.start(); dbClient.activeRuleDao().scrollByRuleProfileForIndexing(dbSession, ruleProfileUUid, i -> bulkIndexer.add(newIndexRequest(i))); profileResult = bulkIndexer.stop(); } if (profileResult.isSuccess()) { deleteQueueDto(dbSession, item); } result.add(profileResult); } return result; }