private EngineConfig newEngineConfig() { Sort indexSort = indexSortSupplier.get(); return new EngineConfig(shardId, shardRouting.allocationId().getId(), threadPool, indexSettings, warmer, store, indexSettings.getMergePolicy(), mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, indexCache.query(), cachingPolicy, translogConfig, IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Collections.singletonList(refreshListeners), Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), indexSort, circuitBreakerService, replicationTracker, () -> operationPrimaryTerm, tombstoneDocSupplier()); }
private IndexWriterConfig getIndexWriterConfig() { final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); iwc.setCommitOnClose(false); // we by default don't commit on close iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); MergePolicy mergePolicy = config().getMergePolicy(); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); iwc.setCodec(engineConfig.getCodec()); iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh if (config().getIndexSort() != null) { iwc.setIndexSort(config().getIndexSort());
final BiFunction<Long, Long, LocalCheckpointTracker> localCheckpointTrackerSupplier) { super(engineConfig); if (engineConfig.isAutoGeneratedIDsOptimizationEnabled() == false) { updateAutoIdTimestamp(Long.MAX_VALUE, true); this.uidField = engineConfig.getIndexSettings().isSingleType() ? IdFieldMapper.NAME : UidFieldMapper.NAME; final TranslogDeletionPolicy translogDeletionPolicy = new TranslogDeletionPolicy( engineConfig.getIndexSettings().getTranslogRetentionSize().getBytes(), engineConfig.getIndexSettings().getTranslogRetentionAge().getMillis() ); store.incRef(); boolean success = false; try { this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings()); throttle = new IndexThrottle(); try { translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier()); assert translog.getGeneration() != null; this.translog = translog; this.softDeleteEnabled = engineConfig.getIndexSettings().isSoftDeleteEnabled(); this.softDeletesPolicy = newSoftDeletesPolicy(); this.combinedDeletionPolicy = for (ReferenceManager.RefreshListener listener: engineConfig.getExternalRefreshListener()) { this.externalSearcherManager.addListener(listener);
protected Engine(EngineConfig engineConfig) { Objects.requireNonNull(engineConfig.getStore(), "Store must be provided to the engine"); this.engineConfig = engineConfig; this.shardId = engineConfig.getShardId(); this.allocationId = engineConfig.getAllocationId(); this.store = engineConfig.getStore(); // we use the engine class directly here to make sure all subclasses have the same logger name this.logger = Loggers.getLogger(Engine.class, engineConfig.getShardId()); this.eventListener = engineConfig.getEventListener(); }
@Override public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { IndexSearcher searcher = super.newSearcher(reader, previousReader); searcher.setQueryCache(engineConfig.getQueryCache()); searcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy()); searcher.setSimilarity(engineConfig.getSimilarity()); return searcher; } }
protected final DirectoryReader wrapReader(DirectoryReader reader, Function<DirectoryReader, DirectoryReader> readerWrapperFunction) throws IOException { reader = ElasticsearchDirectoryReader.wrap(reader, engineConfig.getShardId()); if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD); } return readerWrapperFunction.apply(reader); }
public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, LongSupplier globalCheckpointSupplier) { return new EngineConfig(openMode, config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(), config.getStore(), config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getForceNewHistoryUUID(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), config.getCircuitBreakerService(), globalCheckpointSupplier); }
protected Engine(EngineConfig engineConfig) { Objects.requireNonNull(engineConfig.getStore(), "Store must be provided to the engine"); Objects.requireNonNull(engineConfig.getDeletionPolicy(), "Snapshot deletion policy must be provided to the engine"); this.engineConfig = engineConfig; this.shardId = engineConfig.getShardId(); this.store = engineConfig.getStore(); this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name engineConfig.getIndexSettings().getSettings(), engineConfig.getShardId()); this.eventListener = engineConfig.getEventListener(); this.deletionPolicy = engineConfig.getDeletionPolicy(); }
long getGcDeletesInMillis() { return engineConfig.getIndexSettings().getGcDeletesInMillis(); }
public VersionLessInternalEngine(EngineConfig engineConfig) throws EngineException { super(engineConfig); openMode = engineConfig.getOpenMode(); if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_5_0_0_beta1)) { maxUnsafeAutoIdTimestamp.set(engineConfig.getMaxUnsafeAutoIdTimestamp()); this.uidField = engineConfig.getIndexSettings().isSingleType() ? IdFieldMapper.NAME : UidFieldMapper.NAME; boolean success = false; try { this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings()); throttle = new IndexThrottle(); this.searcherFactory = new SearchFactory(logger, isClosed, engineConfig); if (engineConfig.getRefreshListeners() != null) { searcherManager.addListener(engineConfig.getRefreshListeners());
boolean success = false; try { this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis(); this.indexingService = engineConfig.getIndexingService(); this.warmer = engineConfig.getWarmer(); mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings(), engineConfig.getMergeSchedulerConfig()); throttle = new IndexThrottle(); this.searcherFactory = new SearchFactory(logger, isClosed, engineConfig); final Translog.TranslogGeneration translogGeneration; try { final boolean create = engineConfig.isCreate(); writer = createWriter(create); indexWriter = writer; translog = openTranslog(engineConfig, writer, create || skipInitialTranslogRecovery || engineConfig.forceNewTranslog()); translogGeneration = translog.getGeneration(); assert translogGeneration != null;
super(config); try { Store store = config.getStore(); store.incRef(); DirectoryReader reader = null; this.translogStats = translogStats == null ? new TranslogStats(0, 0, 0, 0, 0) : translogStats; this.seqNoStats = seqNoStats == null ? buildSeqNoStats(lastCommittedSegmentInfos) : seqNoStats; reader = ElasticsearchDirectoryReader.wrap(open(directory), config.getShardId()); if (config.getIndexSettings().isSoftDeleteEnabled()) { reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD); this.indexCommit = reader.getIndexCommit(); this.searcherManager = new SearcherManager(reader, new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService())); this.docsStats = docsStats(lastCommittedSegmentInfos); this.indexWriterLock = indexWriterLock;
protected Engine(EngineConfig engineConfig) { Preconditions.checkNotNull(engineConfig.getStore(), "Store must be provided to the engine"); Preconditions.checkNotNull(engineConfig.getDeletionPolicy(), "Snapshot deletion policy must be provided to the engine"); this.engineConfig = engineConfig; this.shardId = engineConfig.getShardId(); this.store = engineConfig.getStore(); this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name engineConfig.getIndexSettings(), engineConfig.getShardId()); this.failedEngineListener = engineConfig.getFailedEngineListener(); this.deletionPolicy = engineConfig.getDeletionPolicy(); }
public MockEngineSupport(EngineConfig config, Class<? extends FilterDirectoryReader> wrapper) { Settings settings = config.getIndexSettings().getSettings(); shardId = config.getShardId(); filterCache = config.getQueryCache(); filterCachingPolicy = config.getQueryCachingPolicy(); final long seed = config.getIndexSettings().getValue(ESIntegTestCase.INDEX_TEST_SEED_SETTING); Random random = new Random(seed); final double ratio = WRAP_READER_RATIO.get(settings); boolean wrapReader = random.nextDouble() < ratio; if (logger.isTraceEnabled()) { logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), shardId, seed, wrapReader); } mockContext = new MockContext(random, wrapReader, wrapper, settings); this.searcherCloseable = new SearcherCloseable(); LuceneTestCase.closeAfterSuite(searcherCloseable); // only one suite closeable per Engine this.disableFlushOnClose = DISABLE_FLUSH_ON_CLOSE.get(settings); }
private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) throws IOException { final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); final String translogUUID = loadTranslogUUIDFromLastCommit(); // A translog checkpoint from 5.x index does not have translog_generation_key and Translog's ctor will read translog gen values // from translogDeletionPolicy. We need to bootstrap these values from the recovering commit before calling Translog ctor. if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0)) { final SegmentInfos lastCommitInfo = store.readLastCommittedSegmentsInfo(); final long minRequiredTranslogGen = Long.parseLong(lastCommitInfo.userData.get(Translog.TRANSLOG_GENERATION_KEY)); translogDeletionPolicy.setTranslogGenerationOfLastCommit(minRequiredTranslogGen); translogDeletionPolicy.setMinTranslogGenerationForRecovery(minRequiredTranslogGen); } // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier, engineConfig.getPrimaryTermSupplier()); }
private void pruneDeletedTombstones() { /* * We need to deploy two different trimming strategies for GC deletes on primary and replicas. Delete operations on primary * are remembered for at least one GC delete cycle and trimmed periodically. This is, at the moment, the best we can do on * primary for user facing APIs but this arbitrary time limit is problematic for replicas. On replicas however we should * trim only deletes whose seqno at most the local checkpoint. This requirement is explained as follows. * * Suppose o1 and o2 are two operations on the same document with seq#(o1) < seq#(o2), and o2 arrives before o1 on the replica. * o2 is processed normally since it arrives first; when o1 arrives it should be discarded: * - If seq#(o1) <= LCP, then it will be not be added to Lucene, as it was already previously added. * - If seq#(o1) > LCP, then it depends on the nature of o2: * *) If o2 is a delete then its seq# is recorded in the VersionMap, since seq#(o2) > seq#(o1) > LCP, * so a lookup can find it and determine that o1 is stale. * *) If o2 is an indexing then its seq# is either in Lucene (if refreshed) or the VersionMap (if not refreshed yet), * so a real-time lookup can find it and determine that o1 is stale. * * Here we prefer to deploy a single trimming strategy, which satisfies two constraints, on both primary and replicas because: * - It's simpler - no need to distinguish if an engine is running at primary mode or replica mode or being promoted. * - If a replica subsequently is promoted, user experience is maintained as that replica remembers deletes for the last GC cycle. * * However, the version map may consume less memory if we deploy two different trimming strategies for primary and replicas. */ final long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis(); final long maxTimestampToPrune = timeMSec - engineConfig.getIndexSettings().getGcDeletesInMillis(); versionMap.pruneTombstones(maxTimestampToPrune, localCheckpointTracker.getCheckpoint()); lastDeleteVersionPruneTimeMSec = timeMSec; }
SearchFactory(ESLogger logger, AtomicBoolean isEngineClosed, EngineConfig engineConfig) { super(engineConfig); warmer = engineConfig.getWarmer(); shardId = engineConfig.getShardId(); this.logger = logger; this.isEngineClosed = isEngineClosed; }
Function<DirectoryReader, DirectoryReader> readerWrapperFunction) { super(config); this.searcherFactory = new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService()); try { Store store = config.getStore(); store.incRef(); DirectoryReader reader = null;
LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName()); assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); if (randomBoolean()) { engine.config().setEnableGcDeletes(false);
if (useCompoundFile) { try { directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ); } catch (IOException e) {