void startRecovery() { store.incRef(); onGoingRecoveries.incrementAndGet(); }
@Override public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) { store.incRef(); return new IndexCommitRef(indexCommit, store::decRef); }
LocalShardSnapshot(IndexShard shard) { this.shard = shard; store = shard.store(); store.incRef(); boolean success = false; try { indexCommit = shard.acquireLastIndexCommit(true); success = true; } finally { if (success == false) { store.decRef(); } } }
private void refreshLastCommittedSegmentInfos() { /* * we have to inc-ref the store here since if the engine is closed by a tragic event * we don't acquire the write lock and wait until we have exclusive access. This might also * dec the store reference which can essentially close the store and unless we can inc the reference * we can't use it. */ store.incRef(); try { // reread the last committed segment infos lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); } catch (Exception e) { if (isClosed.get() == false) { try { logger.warn("failed to read latest segment infos on flush", e); } catch (Exception inner) { e.addSuppressed(inner); } if (Lucene.isCorruptionException(e)) { throw new FlushFailedEngineException(shardId, e); } } } finally { store.decRef(); } }
store.incRef(); try { if (failedEngine.get() != null) {
/** * Creates a new recovery target object that represents a recovery to the provided shard. * * @param indexShard local shard where we want to recover to * @param sourceNode source node of the recovery where we recover from * @param listener called when recovery is completed/failed * @param ensureClusterStateVersionCallback callback to ensure that the current node is at least on a cluster state with the provided * version; necessary for primary relocation so that new primary knows about all other ongoing * replica recoveries when replicating documents (see {@link RecoverySourceHandler}) */ public RecoveryTarget(final IndexShard indexShard, final DiscoveryNode sourceNode, final PeerRecoveryTargetService.RecoveryListener listener, final LongConsumer ensureClusterStateVersionCallback) { super("recovery_status"); this.cancellableThreads = new CancellableThreads(); this.recoveryId = idGenerator.incrementAndGet(); this.listener = listener; this.logger = Loggers.getLogger(getClass(), indexShard.shardId()); this.indexShard = indexShard; this.sourceNode = sourceNode; this.shardId = indexShard.shardId(); this.tempFilePrefix = RECOVERY_PREFIX + UUIDs.randomBase64UUID() + "."; this.store = indexShard.store(); this.ensureClusterStateVersionCallback = ensureClusterStateVersionCallback; // make sure the store is not released until we are done. store.incRef(); indexShard.recoveryStats().incCurrentAsTarget(); }
void sendFiles(Store store, StoreFileMetaData[] files, Function<StoreFileMetaData, OutputStream> outputStreamFactory) throws Exception { store.incRef(); try {
mp.setUpgradeInProgress(true, upgradeOnlyAncientSegments); store.incRef(); // increment the ref just to ensure nobody closes the store while we optimize try { if (onlyExpungeDeletes) {
store.incRef(); try { Engine engine;
store.incRef(); try { StopWatch stopWatch = new StopWatch().start();
store.incRef(); try { logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, metadata.name(), shardId);
store.incRef(); int indexIncrementalFileCount = 0; int indexTotalNumberOfFiles = 0;
void startRecovery() { store.incRef(); onGoingRecoveries.incrementAndGet(); }
void startRecovery() { store.incRef(); onGoingRecoveries.incrementAndGet(); }
try { Store store = config.getStore(); store.incRef(); DirectoryReader reader = null; Directory directory = store.directory();
@Override public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) { store.incRef(); return new IndexCommitRef(indexCommit, store::decRef); }
@Override public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) { store.incRef(); return new IndexCommitRef(indexCommit, store::decRef); }
store.incRef(); try { store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
engineConfig.getIndexSettings().getTranslogRetentionAge().getMillis() ); store.incRef(); IndexWriter writer = null; Translog translog = null;
SegmentInfos si = null; final Store store = indexShard.store(); store.incRef(); try { try {