/** * Starting point to create a Directory instance. * * @param metadataCache contains the metadata of stored elements * @param chunksCache cache containing the bulk of the index; this is the larger part of data * @param distLocksCache cache to store locks; should be replicated and not using a persistent CacheStore * @param indexName identifies the index; you can store different indexes in the same set of caches using different identifiers */ public static BuildContext newDirectoryInstance(Cache<?, ?> metadataCache, Cache<?, ?> chunksCache, Cache<?, ?> distLocksCache, String indexName) { return new DirectoryBuilderImpl(metadataCache, chunksCache, distLocksCache, indexName); }
public DirectoryBuilderImpl(Cache<?, ?> metadataCache, Cache<?, ?> chunksCache, Cache<?, ?> distLocksCache, String indexName) { this.metadataCache = checkValidConfiguration(checkNotNull(metadataCache, "metadataCache"), indexName); this.chunksCache = checkValidConfiguration(checkNotNull(chunksCache, "chunksCache"), indexName); this.distLocksCache = checkValidConfiguration(checkNotNull(distLocksCache, "distLocksCache"), indexName); this.indexName = checkNotNull(indexName, "indexName"); validateMetadataCache(metadataCache, indexName); }
@Override public BuildContext overrideWriteLocker(LockFactory lockFactory) { checkNotNull(lockFactory, "lockFactory"); this.lockFactory = lockFactory; return this; }
@Override public Directory create() { if (lockFactory == null) { lockFactory = makeDefaultLockFactory(); } if (srl == null) { srl = makeDefaultSegmentReadLocker(metadataCache, chunksCache, distLocksCache, indexName, affinitySegmentId); } if (deleteExecutor == null) { deleteExecutor = new WithinThreadExecutor(); } return new DirectoryLucene(metadataCache, chunksCache, distLocksCache, indexName, lockFactory, chunkSize, srl, writeFileListAsync, deleteExecutor, affinitySegmentId); }
@Test(expectedExceptions = CacheException.class, expectedExceptionsMessageRegExp = "Failed looking up TransactionManager. Check if any transaction manager is associated with Infinispan cache: 'lucene'") public void testLuceneIndexLockingWithCache() throws IOException { final String commonIndexName = "myIndex"; Cache cache1 = cache(0, "lucene"); LockFactory lockFactory = makeLockFactory(); lockFactory.obtainLock(new DirectoryBuilderImpl(cache1,cache1,cache1,commonIndexName).create(),"myLock"); }
@Override public BuildContext deleteOperationsExecutor(Executor executor) { checkNotNull(executor, "executor"); this.deleteExecutor = executor; return this; }
@Override public Directory create() { if (lockFactory == null) { lockFactory = makeDefaultLockFactory(); } if (srl == null) { srl = makeDefaultSegmentReadLocker(metadataCache, chunksCache, distLocksCache, indexName, affinitySegmentId); } if (deleteExecutor == null) { deleteExecutor = new WithinThreadExecutor(); } return new DirectoryLucene(metadataCache, chunksCache, distLocksCache, indexName, lockFactory, chunkSize, srl, writeFileListAsync, deleteExecutor, affinitySegmentId); }
@Test(expectedExceptions = CacheException.class, expectedExceptionsMessageRegExp = "Failed looking up TransactionManager: the cache is not running") public void testLuceneIndexLockingWithStoppedCache() throws IOException { final String commonIndexName = "myIndex"; Cache cache1 = cache(0, "lucene"); cache(0, "lucene").stop(); cache(1, "lucene").stop(); TestingUtil.killCacheManagers(cacheManagers); LockFactory lockFactory = makeLockFactory(); lockFactory.obtainLock(new DirectoryBuilderImpl(cache1,cache1,cache1,commonIndexName).create(),"myLock"); }
public DirectoryBuilderImpl(Cache<?, ?> metadataCache, Cache<?, ?> chunksCache, Cache<?, ?> distLocksCache, String indexName) { this.metadataCache = checkValidConfiguration(checkNotNull(metadataCache, "metadataCache"), indexName); this.chunksCache = checkValidConfiguration(checkNotNull(chunksCache, "chunksCache"), indexName); this.distLocksCache = checkValidConfiguration(checkNotNull(distLocksCache, "distLocksCache"), indexName); this.indexName = checkNotNull(indexName, "indexName"); validateMetadataCache(metadataCache, indexName); }
@Override public BuildContext overrideSegmentReadLocker(SegmentReadLocker srl) { checkNotNull(srl, "srl"); this.srl = srl; return this; }
@Override public Directory create() { if (lockFactory == null) { lockFactory = makeDefaultLockFactory(distLocksCache, indexName); } if (srl == null) { srl = makeDefaultSegmentReadLocker(metadataCache, chunksCache, distLocksCache, indexName); } if (LuceneVersionDetector.VERSION == 3) { return new DirectoryLuceneV3(metadataCache, chunksCache, indexName, lockFactory, chunkSize, srl); } else { Class<?>[] ctorType = new Class[]{ Cache.class, Cache.class, String.class, LockFactory.class, int.class, SegmentReadLocker.class }; Directory d; try { d = (Directory) DirectoryBuilderImpl.class.getClassLoader() .loadClass("org.infinispan.lucene.impl.DirectoryLuceneV4") .getConstructor(ctorType) .newInstance(metadataCache, chunksCache, indexName, lockFactory, chunkSize, srl); } catch (Exception e) { throw log.failedToCreateLucene4Directory(e); } return d; } }
/** * Starting point to create a Directory instance. * * @param metadataCache contains the metadata of stored elements * @param chunksCache cache containing the bulk of the index; this is the larger part of data * @param distLocksCache cache to store locks; should be replicated and not using a persistent CacheStore * @param indexName identifies the index; you can store different indexes in the same set of caches using * different identifiers */ public static BuildContext newDirectoryInstance(Cache<?, ?> metadataCache, Cache<?, ?> chunksCache, Cache<?, ?> distLocksCache, String indexName) { validateIndexCaches(indexName, metadataCache, chunksCache, distLocksCache); return new DirectoryBuilderImpl(metadataCache, chunksCache, distLocksCache, indexName); }
public DirectoryBuilderImpl(Cache<?, ?> metadataCache, Cache<?, ?> chunksCache, Cache<?, ?> distLocksCache, String indexName) { this.metadataCache = checkValidConfiguration(checkNotNull(metadataCache, "metadataCache"), indexName); this.chunksCache = checkValidConfiguration(checkNotNull(chunksCache, "chunksCache"), indexName); this.distLocksCache = checkValidConfiguration(checkNotNull(distLocksCache, "distLocksCache"), indexName); this.indexName = checkNotNull(indexName, "indexName"); validateMetadataCache(metadataCache, indexName); }
@Override public BuildContext overrideSegmentReadLocker(SegmentReadLocker srl) { checkNotNull(srl, "srl"); this.srl = srl; return this; }
/** * Starting point to create a Directory instance. * * @param metadataCache contains the metadata of stored elements * @param chunksCache cache containing the bulk of the index; this is the larger part of data * @param distLocksCache cache to store locks; should be replicated and not using a persistent CacheStore * @param indexName identifies the index; you can store different indexes in the same set of caches using * different identifiers */ public static BuildContext newDirectoryInstance(Cache<?, ?> metadataCache, Cache<?, ?> chunksCache, Cache<?, ?> distLocksCache, String indexName) { validateIndexCaches(indexName, metadataCache, chunksCache, distLocksCache); return new DirectoryBuilderImpl(metadataCache, chunksCache, distLocksCache, indexName); }
@Override public BuildContext overrideSegmentReadLocker(SegmentReadLocker srl) { checkNotNull(srl, "srl"); this.srl = srl; return this; }
@Override public BuildContext deleteOperationsExecutor(Executor executor) { checkNotNull(executor, "executor"); this.deleteExecutor = executor; return this; }
@Override public BuildContext overrideWriteLocker(LockFactory lockFactory) { checkNotNull(lockFactory, "lockFactory"); this.lockFactory = lockFactory; return this; }
@Override public BuildContext overrideWriteLocker(LockFactory lockFactory) { checkNotNull(lockFactory, "lockFactory"); this.lockFactory = lockFactory; return this; }
private static SegmentReadLocker makeDefaultSegmentReadLocker(Cache<?, ?> metadataCache, Cache<?, ?> chunksCache, Cache<?, ?> distLocksCache, String indexName) { checkNotNull(distLocksCache, "distLocksCache"); checkNotNull(indexName, "indexName"); return new DistributedSegmentReadLocker((Cache<Object, Integer>) distLocksCache, chunksCache, metadataCache, indexName); }