private IndexRepositoryImpl createIndexRepo() throws IOException { ConcurrentHashMap fileAndChunkRegion = new ConcurrentHashMap(); RegionDirectory dir = new RegionDirectory(fileAndChunkRegion, fileSystemStats); IndexWriterConfig config = new IndexWriterConfig(analyzer); IndexWriter writer = new IndexWriter(dir, config); LuceneIndex index = Mockito.mock(LuceneIndex.class); Mockito.when(index.getFieldNames()).thenReturn(new String[] {"txt"}); return new IndexRepositoryImpl(region, writer, mapper, indexStats, null, null, "", index); }
@Override public int getAsInt() { if (isClosed() || !((BucketRegion) userRegion).getBucketAdvisor().isPrimary()) { stats.removeDocumentsSupplier(this); return 0; } try { return writer.numDocs(); } catch (AlreadyClosedException e) { // ignore return 0; } } }
/** * Test what happens when a bucket is destroyed. */ @Test public void destroyBucketShouldCreateNewIndexRepository() throws BucketNotFoundException, IOException { setUpMockBucket(0); IndexRepositoryImpl repo0 = (IndexRepositoryImpl) repoManager.getRepository(userRegion, 0, null); assertNotNull(repo0); checkRepository(repo0, 0); BucketRegion fileBucket0 = fileAndChunkBuckets.get(0); BucketRegion dataBucket0 = dataBuckets.get(0); // Simulate rebalancing of a bucket by marking the old bucket is destroyed // and creating a new bucket when(dataBucket0.isDestroyed()).thenReturn(true); setUpMockBucket(0); IndexRepositoryImpl newRepo0 = (IndexRepositoryImpl) repoManager.getRepository(userRegion, 0, null); assertNotEquals(repo0, newRepo0); checkRepository(newRepo0, 0); assertTrue(repo0.isClosed()); assertFalse(repo0.getWriter().isOpen()); }
protected void checkRepository(IndexRepositoryImpl repo0, int... bucketIds) { IndexWriter writer0 = repo0.getWriter(); RegionDirectory dir0 = (RegionDirectory) writer0.getDirectory(); boolean result = false; for (int bucketId : bucketIds) { BucketTargetingMap bucketTargetingMap = new BucketTargetingMap(fileAndChunkBuckets.get(bucketId), bucketId); result |= bucketTargetingMap.equals(dir0.getFileSystem().getFileAndChunkRegion()); } assertTrue(result); assertEquals(serializer, repo0.getSerializer()); }
public IndexRepositoryImpl(Region<?, ?> region, IndexWriter writer, LuceneSerializer serializer, LuceneIndexStats stats, Region<?, ?> userRegion, DistributedLockService lockService, String lockName, LuceneIndex index) throws IOException { this.region = region; this.userRegion = userRegion; this.writer = writer; searcherManager = createSearchManager(); this.serializer = serializer; this.stats = stats; documentCountSupplier = new DocumentCountSupplier(); stats.addDocumentsSupplier(documentCountSupplier); this.lockService = lockService; this.lockName = lockName; this.index = index; }
@Override protected void checkRepository(IndexRepositoryImpl repo0, int... bucketId) { IndexWriter writer0 = repo0.getWriter(); Directory dir0 = writer0.getDirectory(); assertTrue(dir0 instanceof NIOFSDirectory); }
singleIndexRepo.query(query, 100, collector); List<EntryScore<String>> singleResult = collector.getEntries().getHits(); distIR1.query(query, 100, collector1); collectors.add(collector1); distIR2.query(query, 100, collector2); collectors.add(collector2); distIR3.query(query, 100, collector3); collectors.add(collector3);
@Override public IndexRepository computeIndexRepository(final Integer bucketId, LuceneSerializer serializer, InternalLuceneIndex index, PartitionedRegion userRegion, IndexRepository oldRepository, PartitionedRepositoryManager partitionedRepositoryManager) throws IOException { final IndexRepository repo; if (oldRepository != null) { oldRepository.cleanup(); } LuceneRawIndex indexForRaw = (LuceneRawIndex) index; BucketRegion dataBucket = getMatchingBucket(userRegion, bucketId); Directory dir = null; if (indexForRaw.withPersistence()) { String bucketLocation = LuceneServiceImpl.getUniqueIndexName(index.getName(), index.getRegionPath() + "_" + bucketId); File location = new File(index.getName(), bucketLocation); if (!location.exists()) { location.mkdirs(); } dir = new NIOFSDirectory(location.toPath()); } else { dir = new RAMDirectory(); } IndexWriterConfig config = new IndexWriterConfig(indexForRaw.getAnalyzer()); IndexWriter writer = new IndexWriter(dir, config); return new IndexRepositoryImpl(null, writer, serializer, indexForRaw.getIndexStats(), dataBucket, null, "", indexForRaw); } }
IndexWriterConfig config = new IndexWriterConfig(indexForPR.getAnalyzer()); IndexWriter writer = new IndexWriter(dir, config); repo = new IndexRepositoryImpl(fileAndChunkBucket, writer, serializer, indexForPR.getIndexStats(), dataBucket, lockService, lockName, indexForPR); success = false;