@Override public Lock obtainLock(String name) throws IOException { return in.obtainLock(name); }
/** Create a new CheckIndex on the directory. */ public CheckIndex(Directory dir) throws IOException { this(dir, dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)); }
@Override public Lock obtainLock( String name ) throws IOException { return delegate.obtainLock( name ); }
@Override public Lock obtainLock(String name) throws IOException { return getDirectory(name).obtainLock(name); }
/** Acquires write locks on all the directories; be sure * to match with a call to {@link IOUtils#close} in a * finally clause. */ private List<Lock> acquireWriteLocks(Directory... dirs) throws IOException { List<Lock> locks = new ArrayList<>(dirs.length); for(int i=0;i<dirs.length;i++) { boolean success = false; try { Lock lock = dirs[i].obtainLock(WRITE_LOCK_NAME); locks.add(lock); success = true; } finally { if (success == false) { // Release all previously acquired locks: // TODO: addSuppressed? it could be many... IOUtils.closeWhileHandlingException(locks); } } } return locks; }
@Override public Lock obtainLock(String lockName) throws IOException { LockObtainFailedException failureReason = null; long maxSleepCount = lockWaitTimeout / pollInterval; long sleepCount = 0; do { try { return in.obtainLock(lockName); } catch (LockObtainFailedException failed) { if (failureReason == null) { failureReason = failed; } } try { Thread.sleep(pollInterval); } catch (InterruptedException ie) { throw new ThreadInterruptedException(ie); } } while (sleepCount++ < maxSleepCount || lockWaitTimeout == LOCK_OBTAIN_WAIT_FOREVER); // we failed to obtain the lock in the required time String reason = "Lock obtain timed out: " + this.toString(); if (failureReason != null) { reason += ": " + failureReason; } throw new LockObtainFailedException(reason, failureReason); }
writeLock = d.obtainLock(WRITE_LOCK_NAME);
try (Lock writeLock = directory().obtainLock(IndexWriter.WRITE_LOCK_NAME)) { for (Map.Entry<String, String> entry : entries) { String tempFile = entry.getKey();
/** * This method removes all lucene files from the given directory. It will first try to delete all commit points / segments * files to ensure broken commits or corrupted indices will not be opened in the future. If any of the segment files can't be deleted * this operation fails. */ public static void cleanLuceneIndex(Directory directory) throws IOException { try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { for (final String file : directory.listAll()) { if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { directory.deleteFile(file); // remove all segment_N files } } } try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setMergePolicy(NoMergePolicy.INSTANCE) // no merges .setCommitOnClose(false) // no commits .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append... { // do nothing and close this will kick of IndexFileDeleter which will remove all pending files } }
locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME); nodePaths[dirIndex] = new NodePath(dir); } catch (IOException e) {
try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { int foundSegmentFiles = 0; for (final String file : directory.listAll()) {
/** * Acquires, then releases, all {@code write.lock} files in the given * shard paths. The "write.lock" file is assumed to be under the shard * path's "index" directory as used by Elasticsearch. * * @throws LockObtainFailedException if any of the locks could not be acquired */ public static void acquireFSLockForPaths(IndexSettings indexSettings, Path... shardPaths) throws IOException { Lock[] locks = new Lock[shardPaths.length]; Directory[] dirs = new Directory[shardPaths.length]; try { for (int i = 0; i < shardPaths.length; i++) { // resolve the directory the shard actually lives in Path p = shardPaths[i].resolve("index"); // open a directory (will be immediately closed) on the shard's location dirs[i] = new SimpleFSDirectory(p, indexSettings.getValue(FsDirectoryService.INDEX_LOCK_FACTOR_SETTING)); // create a lock for the "write.lock" file try { locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME); } catch (IOException ex) { throw new LockObtainFailedException("unable to acquire " + IndexWriter.WRITE_LOCK_NAME + " for " + p, ex); } } } finally { IOUtils.closeWhileHandlingException(locks); IOUtils.closeWhileHandlingException(dirs); } }
@Override public Lock obtainLock(String name) throws IOException { return in.obtainLock(name); }
@Override public Lock obtainLock(String name) throws IOException { return in.obtainLock(name); }
/** Create a new CheckIndex on the directory. */ public CheckIndex(Directory dir) throws IOException { this(dir, dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)); }
indexWriterLock = obtainLock ? directory.obtainLock(IndexWriter.WRITE_LOCK_NAME) : null; this.lastCommittedSegmentInfos = Lucene.readSegmentInfos(directory); this.translogStats = translogStats == null ? new TranslogStats(0, 0, 0, 0, 0) : translogStats;
@Override public Lock obtainLock(String name) throws IOException { return getDirectory(name).obtainLock(name); }
try (Directory indexDir = indexDirectory) { try (Lock writeIndexLock = indexDir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
public static boolean isLocked(Directory directory) throws IOException { try { directory.obtainLock( org.apache.lucene.index.IndexWriter.WRITE_LOCK_NAME ).close(); return false; } catch (LockObtainFailedException failed) { return true; } }
private static boolean isLocked(Directory directory) throws IOException { try { directory.obtainLock(IndexWriter.WRITE_LOCK_NAME).close(); return false; } catch (LockObtainFailedException failed) { return true; } }