private BulkIndexer newBulkIndexer(Size bulkSize, IndexingListener listener) { return new BulkIndexer(esClient, INDEX_TYPE_VIEW, bulkSize, listener); } }
private BulkIndexer createBulkIndexer(Size size, IndexingListener listener) { return new BulkIndexer(esClient, INDEX_TYPE_ISSUE, size, listener); } }
private BulkIndexer newBulkIndexer(Size bulkSize, IndexingListener listener) { return new BulkIndexer(esClient, INDEX_TYPE_USER, bulkSize, listener); }
private BulkIndexer createBulkIndexer(Size bulkSize, IndexingListener listener) { return new BulkIndexer(esClient, INDEX_TYPE_PROJECT_MEASURES, bulkSize, listener); }
private BulkIndexer createBulkIndexer(Size size, IndexingListener listener) { return new BulkIndexer(esClient, INDEX_TYPE_ACTIVE_RULE, size, listener); }
private BulkIndexer createBulkIndexer(Size bulkSize, IndexingListener listener) { return new BulkIndexer(esClient, INDEX_TYPE_RULE, bulkSize, listener); }
@VisibleForTesting void index(ComponentDto... docs) { BulkIndexer bulk = new BulkIndexer(esClient, INDEX_TYPE_COMPONENT, Size.REGULAR); bulk.start(); Arrays.stream(docs) .map(ComponentIndexer::toDocument) .map(ComponentIndexer::newIndexRequest) .forEach(bulk::add); bulk.stop(); }
public void delete(String projectUuid, Collection<String> disabledComponentUuids) { BulkIndexer bulk = new BulkIndexer(esClient, INDEX_TYPE_COMPONENT, Size.REGULAR); bulk.start(); disabledComponentUuids.forEach(uuid -> bulk.addDeletion(INDEX_TYPE_COMPONENT, uuid, projectUuid)); bulk.stop(); }
private void index(Collection<IndexPermissions> authorizations, Stream<AuthorizationScope> scopes, Size bulkSize) { if (authorizations.isEmpty()) { return; } // index each authorization in each scope scopes.forEach(scope -> { IndexType indexType = scope.getIndexType(); BulkIndexer bulkIndexer = new BulkIndexer(esClient, indexType, bulkSize); bulkIndexer.start(); authorizations.stream() .filter(scope.getProjectPredicate()) .map(dto -> newIndexRequest(dto, indexType)) .forEach(bulkIndexer::add); bulkIndexer.stop(); }); }
/** * Index a single document. * <p/> * The views lookup cache will be cleared */ public void index(ViewDoc viewDoc) { BulkIndexer bulk = new BulkIndexer(esClient, ViewIndexDefinition.INDEX_TYPE_VIEW, Size.REGULAR); bulk.start(); doIndex(bulk, viewDoc, true); bulk.stop(); }
/** * Delete all the documents matching the given search request. This method is blocking. * Index is refreshed, so docs are not searchable as soon as method is executed. * <p> * Note that the parameter indexType could be removed if progress logs are not needed. */ public static IndexingResult delete(EsClient client, IndexType indexType, SearchRequestBuilder searchRequest) { BulkIndexer bulk = new BulkIndexer(client, indexType, Size.REGULAR); bulk.start(); bulk.addDeletion(searchRequest); return bulk.stop(); }
private void index(DbSession dbSession, Map<String, String> viewAndProjectViewUuidMap, boolean needClearCache, Size bulkSize) { BulkIndexer bulk = new BulkIndexer(esClient, ViewIndexDefinition.INDEX_TYPE_VIEW, bulkSize); bulk.start(); for (Map.Entry<String, String> entry : viewAndProjectViewUuidMap.entrySet()) { String viewUuid = entry.getKey(); List<String> projects = dbClient.componentDao().selectProjectsFromView(dbSession, viewUuid, entry.getValue()); doIndex(bulk, new ViewDoc() .setUuid(viewUuid) .setProjects(projects), needClearCache); } bulk.stop(); }
@Test public void index_nothing() { BulkIndexer indexer = new BulkIndexer(es.client(), INDEX_TYPE_FAKE, Size.REGULAR); indexer.start(); indexer.stop(); assertThat(count()).isEqualTo(0); }
/** * @param projectUuid the uuid of the project to analyze, or {@code null} if all content should be indexed.<br/> * <b>Warning:</b> only use {@code null} during startup. */ private void doIndexByProjectUuid(@Nullable String projectUuid, Size bulkSize) { BulkIndexer bulk = new BulkIndexer(esClient, INDEX_TYPE_COMPONENT, bulkSize); bulk.start(); try (DbSession dbSession = dbClient.openSession(false)) { dbClient.componentDao() .scrollForIndexing(dbSession, projectUuid, context -> { ComponentDto dto = context.getResultObject(); bulk.add(newIndexRequest(toDocument(dto))); }); } bulk.stop(); }
@Test public void large_indexing() { // index has one replica assertThat(replicas()).isEqualTo(1); BulkIndexer indexer = new BulkIndexer(es.client(), INDEX_TYPE_FAKE, Size.LARGE); indexer.start(); // replicas are temporarily disabled assertThat(replicas()).isEqualTo(0); for (int i = 0; i < 10; i++) { indexer.add(newIndexRequest(i)); } IndexingResult result = indexer.stop(); assertThat(result.isSuccess()).isTrue(); assertThat(result.getSuccess()).isEqualTo(10); assertThat(result.getFailures()).isEqualTo(0); assertThat(result.getTotal()).isEqualTo(10); assertThat(count()).isEqualTo(10); // replicas are re-enabled assertThat(replicas()).isEqualTo(1); }
@Test public void listener_is_called_on_successful_requests() { FakeListener listener = new FakeListener(); BulkIndexer indexer = new BulkIndexer(es.client(), INDEX_TYPE_FAKE, Size.REGULAR, listener); indexer.start(); indexer.addDeletion(INDEX_TYPE_FAKE, "foo"); indexer.stop(); assertThat(listener.calledDocIds) .containsExactlyInAnyOrder(new DocId(INDEX_TYPE_FAKE, "foo")); assertThat(listener.calledResult.getSuccess()).isEqualTo(1); assertThat(listener.calledResult.getTotal()).isEqualTo(1); }
@Test public void listener_is_not_called_with_errors() { FakeListener listener = new FakeListener(); BulkIndexer indexer = new BulkIndexer(es.client(), INDEX_TYPE_FAKE, Size.REGULAR, listener); indexer.start(); indexer.add(newIndexRequestWithDocId("foo")); indexer.add(new IndexRequest("index_does_not_exist", "index_does_not_exist", "bar").source(emptyMap())); indexer.stop(); assertThat(listener.calledDocIds).containsExactly(new DocId(INDEX_TYPE_FAKE, "foo")); assertThat(listener.calledResult.getSuccess()).isEqualTo(1); assertThat(listener.calledResult.getTotal()).isEqualTo(2); }
@Test public void index_documents() { BulkIndexer indexer = new BulkIndexer(es.client(), INDEX_TYPE_FAKE, Size.REGULAR); indexer.start(); indexer.add(newIndexRequest(42)); indexer.add(newIndexRequest(78)); // request is not sent yet assertThat(count()).isEqualTo(0); // send remaining requests indexer.stop(); assertThat(count()).isEqualTo(2); }
@Test public void listener_is_called_even_if_deleting_a_doc_that_does_not_exist() { FakeListener listener = new FakeListener(); BulkIndexer indexer = new BulkIndexer(es.client(), INDEX_TYPE_FAKE, Size.REGULAR, listener); indexer.start(); indexer.add(newIndexRequestWithDocId("foo")); indexer.add(newIndexRequestWithDocId("bar")); indexer.stop(); assertThat(listener.calledDocIds) .containsExactlyInAnyOrder(new DocId(INDEX_TYPE_FAKE, "foo"), new DocId(INDEX_TYPE_FAKE, "bar")); assertThat(listener.calledResult.getSuccess()).isEqualTo(2); assertThat(listener.calledResult.getTotal()).isEqualTo(2); }
@Test public void log_requests_when_TRACE_level_is_enabled() { logTester.setLevel(LoggerLevel.TRACE); BulkIndexer indexer = new BulkIndexer(es.client(), INDEX_TYPE_FAKE, Size.REGULAR, new FakeListener()); indexer.start(); indexer.add(newIndexRequestWithDocId("foo")); indexer.addDeletion(INDEX_TYPE_FAKE, "foo"); indexer.add(newIndexRequestWithDocId("bar")); indexer.stop(); assertThat(logTester.logs(LoggerLevel.TRACE) .stream() .filter(log -> log.contains("Bulk[2 index requests on fakes/fake, 1 delete requests on fakes/fake]")) .count()).isNotZero(); }