@Override public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { if (softDeleteEnabled == false) { throw new IllegalStateException("accessing changes snapshot requires soft-deletes enabled"); } ensureOpen(); refreshIfNeeded(source, toSeqNo); Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL); try { LuceneChangesSnapshot snapshot = new LuceneChangesSnapshot( searcher, mapperService, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, fromSeqNo, toSeqNo, requiredFullRange); searcher = null; return snapshot; } catch (Exception e) { try { maybeFailEngine("acquire changes snapshot", e); } catch (Exception inner) { e.addSuppressed(inner); } throw e; } finally { IOUtils.close(searcher); } }
final boolean tryRenewSyncCommit() { boolean renewed = false; try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); ensureCanFlush(); String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID); long translogGenOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); if (syncId != null && indexWriter.hasUncommittedChanges() && translog.totalOperationsByMinGen(translogGenOfLastCommit) == 0) { logger.trace("start renewing sync commit [{}]", syncId); commitIndexWriter(indexWriter, translog, syncId); logger.debug("successfully sync committed. sync id [{}].", syncId); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); renewed = true; } } catch (IOException ex) { maybeFailEngine("renew sync commit", ex); throw new EngineException(shardId, "failed to renew sync commit", ex); } if (renewed) { // refresh outside of the write lock // we have to refresh internal searcher here to ensure we release unreferenced segments. refresh("renew sync commit", SearcherScope.INTERNAL); } return renewed; }
return SyncedFlushResult.SUCCESS; } catch (IOException ex) { maybeFailEngine("sync commit", ex); throw new EngineException(shardId, "failed to sync commit", ex);
maybeFailEngine("flush", ex); throw ex; } finally {
} catch (Exception e) { try { maybeFailEngine("force merge", e); } catch (Exception inner) { e.addSuppressed(inner);
private ExternalSearcherManager createSearcherManager(SearchFactory externalSearcherFactory) throws EngineException { boolean success = false; SearcherManager internalSearcherManager = null; try { try { final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); internalSearcherManager = new SearcherManager(directoryReader, new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService())); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); ExternalSearcherManager externalSearcherManager = new ExternalSearcherManager(internalSearcherManager, externalSearcherFactory); success = true; return externalSearcherManager; } catch (IOException e) { maybeFailEngine("start", e); try { indexWriter.rollback(); } catch (IOException inner) { // iw is closed below e.addSuppressed(inner); } throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e); } } finally { if (success == false) { // release everything we created on a failure IOUtils.closeWhileHandlingException(internalSearcherManager, indexWriter); } } }
indexWriter.addDocument(doc); } catch (Exception ex) { if (maybeFailEngine("noop", ex)) { throw ex;
} catch (RuntimeException | IOException e) { try { maybeFailEngine("index", e); } catch (Exception inner) { e.addSuppressed(inner);
maybeFailEngine("realtime_get", e); // lets check if the translog has failed with a tragic event throw new EngineException(shardId, "failed to read operation from translog", e);
maybeFailEngine("index", e); } catch (Exception inner) { e.addSuppressed(inner);
final boolean tryRenewSyncCommit() { boolean renewed = false; try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); ensureCanFlush(); String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID); long translogGenOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); if (syncId != null && indexWriter.hasUncommittedChanges() && translog.totalOperationsByMinGen(translogGenOfLastCommit) == 0) { logger.trace("start renewing sync commit [{}]", syncId); commitIndexWriter(indexWriter, translog, syncId); logger.debug("successfully sync committed. sync id [{}].", syncId); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); renewed = true; } } catch (IOException ex) { maybeFailEngine("renew sync commit", ex); throw new EngineException(shardId, "failed to renew sync commit", ex); } if (renewed) { // refresh outside of the write lock // we have to refresh internal searcher here to ensure we release unreferenced segments. refresh("renew sync commit", SearcherScope.INTERNAL); } return renewed; }
@Override public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { // TODO: Should we defer the refresh until we really need it? ensureOpen(); refreshIfNeeded(source, toSeqNo); Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL); try { LuceneChangesSnapshot snapshot = new LuceneChangesSnapshot( searcher, mapperService, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, fromSeqNo, toSeqNo, requiredFullRange); searcher = null; return snapshot; } catch (Exception e) { try { maybeFailEngine("acquire changes snapshot", e); } catch (Exception inner) { e.addSuppressed(inner); } throw e; } finally { IOUtils.close(searcher); } }
final boolean tryRenewSyncCommit() { boolean renewed = false; try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); ensureCanFlush(); String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID); if (syncId != null && translog.totalOperations() == 0 && indexWriter.hasUncommittedChanges()) { logger.trace("start renewing sync commit [{}]", syncId); commitIndexWriter(indexWriter, translog, syncId); logger.debug("successfully sync committed. sync id [{}].", syncId); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); renewed = true; } } catch (IOException ex) { maybeFailEngine("renew sync commit", ex); throw new EngineException(shardId, "failed to renew sync commit", ex); } if (renewed) { // refresh outside of the write lock refresh("renew sync commit"); } return renewed; }
@Override public void delete(Delete delete) throws EngineException { try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); // NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments: innerDelete(delete); } catch (OutOfMemoryError | IllegalStateException | IOException t) { maybeFailEngine("delete", t); throw new DeleteFailedEngineException(shardId, delete, t); } maybePruneDeletedTombstones(); checkVersionMapRefresh(); }
private SearcherManager createSearcherManager() throws EngineException { boolean success = false; SearcherManager searcherManager = null; try { try { final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter, true), shardId); searcherManager = new SearcherManager(directoryReader, searcherFactory); lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store); success = true; return searcherManager; } catch (IOException e) { maybeFailEngine("start", e); try { indexWriter.rollback(); } catch (IOException e1) { // iw is closed below e.addSuppressed(e1); } throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e); } } finally { if (success == false) { // release everything we created on a failure IOUtils.closeWhileHandlingException(searcherManager, indexWriter); } } }
private SearcherManager createSearcherManager() throws EngineException { boolean success = false; SearcherManager searcherManager = null; try { try { final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); searcherManager = new SearcherManager(directoryReader, searcherFactory); lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store); success = true; return searcherManager; } catch (IOException e) { maybeFailEngine("start", e); try { indexWriter.rollback(); } catch (IOException inner) { // iw is closed below e.addSuppressed(inner); } throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e); } } finally { if (success == false) { // release everything we created on a failure IOUtils.closeWhileHandlingException(searcherManager, indexWriter); } } }
final boolean tryRenewSyncCommit() { boolean renewed = false; try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID); if (syncId != null && translog.totalOperations() == 0 && indexWriter.hasUncommittedChanges()) { logger.trace("start renewing sync commit [{}]", syncId); commitIndexWriter(indexWriter, translog, syncId); logger.debug("successfully sync committed. sync id [{}].", syncId); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); renewed = true; } } catch (IOException ex) { maybeFailEngine("renew sync commit", ex); throw new EngineException(shardId, "failed to renew sync commit", ex); } if (renewed) { // refresh outside of the write lock refresh("renew sync commit"); } return renewed; }
@Override public boolean index(Index index) throws EngineException { final boolean created; try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (index.origin() == Operation.Origin.RECOVERY) { // Don't throttle recovery operations created = innerIndex(index); } else { try (Releasable r = throttle.acquireThrottle()) { created = innerIndex(index); } } } catch (OutOfMemoryError | IllegalStateException | IOException t) { maybeFailEngine("index", t); throw new IndexFailedEngineException(shardId, index.type(), index.id(), t); } checkVersionMapRefresh(); return created; }
@Override public void create(Create create) throws EngineException { try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (create.origin() == Operation.Origin.RECOVERY) { // Don't throttle recovery operations innerCreate(create); } else { try (Releasable r = throttle.acquireThrottle()) { innerCreate(create); } } } catch (OutOfMemoryError | IllegalStateException | IOException t) { maybeFailEngine("create", t); throw new CreateFailedEngineException(shardId, create.type(), create.id(), t); } checkVersionMapRefresh(); }
private void innerDelete(DeleteByQuery delete) throws EngineException { try { Query query = delete.query(); if (delete.aliasFilter() != null) { query = new BooleanQuery.Builder() .add(query, Occur.MUST) .add(delete.aliasFilter(), Occur.FILTER) .build(); } if (delete.nested()) { query = new IncludeNestedDocsQuery(query, delete.parentFilter()); } indexWriter.deleteDocuments(query); translog.add(new Translog.DeleteByQuery(delete)); } catch (Throwable t) { maybeFailEngine("delete_by_query", t); throw new DeleteByQueryFailedEngineException(shardId, delete, t); } // TODO: This is heavy, since we refresh, but we must do this because we don't know which documents were in fact deleted (i.e., our // versionMap isn't updated), so we must force a cutover to a new reader to "see" the deletions: refresh("delete_by_query"); }