@Override public boolean isClosed() { return userRegion.isDestroyed() || !writer.isOpen(); }
@Test public void testClosesOldestIndexWriterWhenCacheSizeIsExceeded() throws Throwable { addIndex( "bar" ); addIndex( "baz" ); Config config = Config.defaults( cacheSizeConfig() ); dataSource = life.add( getLuceneDataSource( config ) ); IndexIdentifier fooIdentifier = identifier( "foo" ); IndexIdentifier barIdentifier = identifier( "bar" ); IndexIdentifier bazIdentifier = identifier( "baz" ); IndexWriter fooIndexWriter = dataSource.getIndexSearcher( fooIdentifier ).getWriter(); dataSource.getIndexSearcher( barIdentifier ); assertTrue( fooIndexWriter.isOpen() ); dataSource.getIndexSearcher( bazIdentifier ); assertFalse( fooIndexWriter.isOpen() ); }
/** * Test what happens when a bucket is destroyed. */ @Test public void destroyBucketShouldCreateNewIndexRepository() throws BucketNotFoundException, IOException { setUpMockBucket(0); IndexRepositoryImpl repo0 = (IndexRepositoryImpl) repoManager.getRepository(userRegion, 0, null); assertNotNull(repo0); checkRepository(repo0, 0); BucketRegion fileBucket0 = fileAndChunkBuckets.get(0); BucketRegion dataBucket0 = dataBuckets.get(0); // Simulate rebalancing of a bucket by marking the old bucket is destroyed // and creating a new bucket when(dataBucket0.isDestroyed()).thenReturn(true); setUpMockBucket(0); IndexRepositoryImpl newRepo0 = (IndexRepositoryImpl) repoManager.getRepository(userRegion, 0, null); assertNotEquals(repo0, newRepo0); checkRepository(newRepo0, 0); assertTrue(repo0.isClosed()); assertFalse(repo0.getWriter().isOpen()); }
@Test public void testRecreatesWriterWhenRequestedAgainAfterCacheEviction() throws Throwable { addIndex( "bar" ); addIndex( "baz" ); Config config = Config.defaults( cacheSizeConfig() ); dataSource = life.add( getLuceneDataSource( config ) ); IndexIdentifier fooIdentifier = identifier( "foo" ); IndexIdentifier barIdentifier = identifier( "bar" ); IndexIdentifier bazIdentifier = identifier( "baz" ); IndexWriter oldFooIndexWriter = dataSource.getIndexSearcher( fooIdentifier ).getWriter(); dataSource.getIndexSearcher( barIdentifier ); dataSource.getIndexSearcher( bazIdentifier ); IndexWriter newFooIndexWriter = dataSource.getIndexSearcher( fooIdentifier ).getWriter(); assertNotSame( oldFooIndexWriter, newFooIndexWriter ); assertTrue( newFooIndexWriter.isOpen() ); }
private boolean failOnTragicEvent(AlreadyClosedException ex) { final boolean engineFailed; // if we are already closed due to some tragic exception // we need to fail the engine. it might have already been failed before // but we are double-checking it's failed and closed if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) { final Exception tragicException; if (indexWriter.getTragicException() instanceof Exception) { tragicException = (Exception) indexWriter.getTragicException(); } else { tragicException = new RuntimeException(indexWriter.getTragicException()); } failEngine("already closed by tragic event on the index writer", tragicException); engineFailed = true; } else if (translog.isOpen() == false && translog.getTragicException() != null) { failEngine("already closed by tragic event on the translog", translog.getTragicException()); engineFailed = true; } else if (failedEngine.get() == null && isClosed.get() == false) { // we are closed but the engine is not failed yet? // this smells like a bug - we only expect ACE if we are in a fatal case ie. either translog or IW is closed by // a tragic event or has closed itself. if that is not the case we are in a buggy state and raise an assertion error throw new AssertionError("Unexpected AlreadyClosedException", ex); } else { engineFailed = false; } return engineFailed; }
@Override protected boolean maybeFailEngine(String source, Exception e) { boolean shouldFail = super.maybeFailEngine(source, e); if (shouldFail) { return true; } // Check for AlreadyClosedException -- ACE is a very special // exception that should only be thrown in a tragic event. we pass on the checks to failOnTragicEvent which will // throw and AssertionError if the tragic event condition is not met. if (e instanceof AlreadyClosedException) { return failOnTragicEvent((AlreadyClosedException)e); } else if (e != null && ((indexWriter.isOpen() == false && indexWriter.getTragicException() == e) || (translog.isOpen() == false && translog.getTragicException() == e))) { // this spot on - we are handling the tragic event exception here so we have to fail the engine // right away failEngine(source, e); return true; } return false; }
/** * @return true if the writer is null or closed */ private boolean writerClosed() { return writer == null || !writer.isOpen(); }
@Override public boolean isOpen() { return indexWriter.isOpen(); }
/** * Checks if a project index is open * * @return True if the index is open. False otherwise. */ @Override public boolean isOpen() { boolean result = false; if (indexWriter != null) { result = indexWriter.isOpen(); } return result; }
private void commitAndCloseWriter() throws IOException { if (indexWriter != null && indexWriter.isOpen()) { indexWriter.commit(); indexWriter.close(); closed.set(true); } }
private void reOpen() throws IOException { if (indexWriter != null && indexWriter.isOpen() && directory instanceof RAMDirectory) { // don't waste time reopening an in memory index return; } open(); }
public void open() throws StorageException { if (this.luceneIndexWriter == null || ! this.luceneIndexWriter.isOpen()) { try { Directory directory = FSDirectory.open(this.indexDirectory); IndexWriterConfig conf = new IndexWriterConfig(analyzer); this.luceneIndexWriter = new IndexWriter(directory, conf); this.isOpen = true; } catch (IOException e) { throw new StorageException(e.getMessage(), e); } } }
@Override public void flush() { try { if (!closed.get() && indexWriter != null && indexWriter.isOpen()) indexWriter.commit(); } catch (Exception e) { OLogManager.instance().error(this, "Error on flushing Lucene index", e); } }
public synchronized IndexWriter getIndexWriter() throws IOException { if (closed.get()) { throw new SailException("Index has been closed"); } if (indexWriter == null || !indexWriter.isOpen()) { IndexWriterConfig indexWriterConfig = getIndexWriterConfig(); indexWriter = new IndexWriter(directory, indexWriterConfig); } return indexWriter; }
@Override public synchronized void close() { if (searcherManager != null) { IOUtils.closeQuietly(searcherManager); searcherManager = null; } if (indexWriter != null && indexWriter.isOpen()) IOUtils.closeQuietly(indexWriter); }
/** * Commit any changes if the state of the index requires it. */ public synchronized void maybeRefresh() { if (this._writer == null || !this._writer.isOpen() || !isState(State.DIRTY)) return; try { LOGGER.debug("Reopen reader and searcher"); this._reader.maybeRefresh(); this._searcher.maybeRefresh(); state(State.CLEAN); } catch (Throwable ex) { LOGGER.error("Failed to reopen Index Searcher because of an I/O error", ex); } }
@Override public synchronized void close() { if (searcherTaxonomyManager != null) { IOUtils.closeQuietly(searcherTaxonomyManager); searcherTaxonomyManager = null; } if (taxonomyWriter != null) IOUtils.closeQuietly(taxonomyWriter); if (indexWriter != null && indexWriter.isOpen()) IOUtils.closeQuietly(indexWriter); } }
private void internalDelete() throws IOException { if (indexWriter != null && indexWriter.isOpen()) { close(); } final OAbstractPaginatedStorage storageLocalAbstract = (OAbstractPaginatedStorage) storage.getUnderlying(); if (storageLocalAbstract instanceof OLocalPaginatedStorage) { deleteIndexFolder(); } }
@Override public void close() throws IOException { if (COLLECT_METRICS) { MetricUtils.removeAllMetricsThatStartWith(MetricRegistry.name(METRIC_PREFIX, name)); } committerThread.close(); // commit will fail if writer is closed if (writer.isOpen()) { // flush first writer.flush(); commit(); writer.close(); } searcherManager.close(); }
@Override public void delete() { try { updateLastAccess(); openIfClosed(); if (indexWriter != null && indexWriter.isOpen()) { doClose(true); } final OAbstractPaginatedStorage storageLocalAbstract = (OAbstractPaginatedStorage) storage.getUnderlying(); if (storageLocalAbstract instanceof OLocalPaginatedStorage) { deleteIndexFolder(); } } catch (IOException e) { throw OException.wrapException(new OStorageException("Error during deletion of Lucene index " + name), e); } }