/** * Adds a new fileName in the list of files making up this index * @param fileName */ void addFileName(final String fileName) { writeLock.lock(); try { final FileListCacheValue fileList = getFileList(); boolean done = fileList.add(fileName); if (done) { updateFileList(fileList); if (trace) log.trace("Updated file listing: added " + fileName); } } finally { writeLock.unlock(); } }
long fileLength(final String name) { final FileMetadata fileMetadata = fileOps.getFileMetadata(name); if (fileMetadata == null) { return 0L; //as in FSDirectory (RAMDirectory throws an exception instead) } else { return fileMetadata.getSize(); } }
boolean fileExists(final String name) { return fileOps.fileExists(name); }
String[] list() { final Set<String> files = fileOps.getFileList(); //Careful! if you think you can optimize this array allocation, think again. //The _files_ are a concurrent structure, its size could vary in parallel: //the array population and dimensioning need to be performed atomically //to avoid trailing null elements in the returned array. final String[] array = files.toArray(new String[0]); return array; }
void deleteFile(final String name) { fileOps.deleteFileName(name); readLocks.deleteOrReleaseReadLock(name); if (log.isDebugEnabled()) { log.debugf("Removed file: %s from index: %s", name, indexName); } }
public DirectoryImplementor(Cache<?, ?> metadataCache, Cache<?, ?> chunksCache, String indexName, int chunkSize, SegmentReadLocker readLocker) { if (chunkSize <= 0) throw new IllegalArgumentException("chunkSize must be a positive integer"); this.metadataCache = (AdvancedCache<FileCacheKey, FileMetadata>) metadataCache.getAdvancedCache().withFlags(Flag.SKIP_INDEXING); this.chunksCache = (AdvancedCache<ChunkCacheKey, Object>) chunksCache.getAdvancedCache().withFlags(Flag.SKIP_INDEXING); this.indexName = indexName; this.chunkSize = chunkSize; this.fileOps = new FileListOperations(this.metadataCache, indexName); this.readLocks = readLocker; }
void renameFile(final String from, final String to) { final FileCacheKey fromKey = new FileCacheKey(indexName, from, affinitySegmentId); final FileMetadata metadata = metadataCache.get(fromKey); final int bufferSize = metadata.getBufferSize(); // preparation: copy all chunks to new keys int i = -1; Object ob; do { final ChunkCacheKey fromChunkKey = new ChunkCacheKey(indexName, from, ++i, bufferSize, affinitySegmentId); ob = chunksCache.get(fromChunkKey); if (ob == null) { break; } final ChunkCacheKey toChunkKey = new ChunkCacheKey(indexName, to, i, bufferSize, affinitySegmentId); chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES).put(toChunkKey, ob); } while (true); // rename metadata first metadataCache.put(new FileCacheKey(indexName, to, affinitySegmentId), metadata); fileOps.removeAndAdd(from, to); // now trigger deletion of old file chunks: readLocks.deleteOrReleaseReadLock(from); if (trace) { log.tracef("Renamed file from: %s to: %s in index %s from %s", from, to, indexName, getAddress(metadataCache)); } }
public void close() { if (currentChunkNumber==0) { //store current chunk, possibly resizing it storeCurrentBuffer(true); } else { //no need to resize first chunk, just store it: storeBufferAsChunk(this.firstChunkBuffer, 0); storeCurrentBuffer(true); } buffer = null; firstChunkBuffer = null; // override existing file header with updated accesstime metadataCache.withFlags(Flag.IGNORE_RETURN_VALUES).put(fileKey, file); fileOps.addFileName(this.fileKey.getFileName()); if (trace) { log.tracef("Closed IndexOutput for %s", fileKey); } }
String[] list() { return fileOps.listFilenames(); }
boolean fileExists(final String name) { return fileOps.getFileList().contains(name); }
void deleteFile(final String name) { fileOps.deleteFileName(name); readLocks.deleteOrReleaseReadLock(name); if (log.isDebugEnabled()) { log.debugf("Removed file: %s from index: %s from %s", name, indexName, getAddress(chunksCache)); } }
public DirectoryImplementor(Cache<?, ?> metadataCache, Cache<?, ?> chunksCache, Cache<?, ?> distLocksCache, String indexName, int chunkSize, SegmentReadLocker readLocker, boolean fileListUpdatedAsync, int affinitySegmentId) { this.affinitySegmentId = affinitySegmentId; if (chunkSize <= 0) throw new IllegalArgumentException("chunkSize must be a positive integer"); this.metadataCache = (AdvancedCache<FileCacheKey, FileMetadata>) metadataCache.getAdvancedCache().withFlags(Flag.SKIP_INDEXING); this.chunksCache = (AdvancedCache<ChunkCacheKey, Object>) chunksCache.getAdvancedCache().withFlags(Flag.SKIP_INDEXING); this.distLocksCache = (AdvancedCache<Object, Integer>) distLocksCache.getAdvancedCache().withFlags(Flag.SKIP_INDEXING); this.indexName = indexName; this.chunkSize = chunkSize; this.fileOps = new FileListOperations(this.metadataCache, indexName, fileListUpdatedAsync, this.affinitySegmentId); this.segmentsGenFileKey = new FileCacheKey(indexName, IndexFileNames.SEGMENTS, this.affinitySegmentId); this.readLocks = readLocker; }
void renameFile(final String from, final String to) { final FileCacheKey fromKey = new FileCacheKey(indexName, from, affinitySegmentId); final FileMetadata metadata = metadataCache.get(fromKey); final int bufferSize = metadata.getBufferSize(); // preparation: copy all chunks to new keys int i = -1; Object ob; do { final ChunkCacheKey fromChunkKey = new ChunkCacheKey(indexName, from, ++i, bufferSize, affinitySegmentId); ob = chunksCache.get(fromChunkKey); if (ob == null) { break; } final ChunkCacheKey toChunkKey = new ChunkCacheKey(indexName, to, i, bufferSize, affinitySegmentId); chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES).put(toChunkKey, ob); } while (true); // rename metadata first metadataCache.put(new FileCacheKey(indexName, to, affinitySegmentId), metadata); fileOps.removeAndAdd(from, to); // now trigger deletion of old file chunks: readLocks.deleteOrReleaseReadLock(from); if (trace) { log.tracef("Renamed file from: %s to: %s in index %s from %s", from, to, indexName, getAddress(metadataCache)); } }
public void close() { if (currentChunkNumber==0) { //store current chunk, possibly resizing it storeCurrentBuffer(true); } else { //no need to resize first chunk, just store it: storeBufferAsChunk(this.firstChunkBuffer, 0); storeCurrentBuffer(true); } buffer = null; firstChunkBuffer = null; // override existing file header with updated accesstime metadataCache.withFlags(Flag.IGNORE_RETURN_VALUES).put(fileKey, file); fileOps.addFileName(this.fileKey.getFileName()); if (trace) { log.tracef("Closed IndexOutput for %s", fileKey); } }
String[] list() { return fileOps.listFilenames(); }
/** * Optimized implementation to perform both a remove and an add * @param toRemove * @param toAdd */ public void removeAndAdd(final String toRemove, final String toAdd) { writeLock.lock(); try { FileListCacheValue fileList = getFileList(); boolean done = fileList.addAndRemove(toAdd, toRemove); if (done) { updateFileList(fileList); if (trace) { log.trace("Updated file listing: added " + toAdd + " and removed " + toRemove); } } } finally { writeLock.unlock(); } }
/** * @return an array containing all names of existing "files" */ public String[] listFilenames() { readLock.lock(); try { return getFileList().toArray(); } finally { readLock.unlock(); } }
long fileLength(final String name) { final FileMetadata fileMetadata = fileOps.getFileMetadata(name); if (fileMetadata == null) { return 0L; //as in FSDirectory (RAMDirectory throws an exception instead) } else { return fileMetadata.getSize(); } }
/** * {@inheritDoc} */ @Override public void deleteFile(String name) { ensureOpen(); fileOps.deleteFileName(name); readLocks.deleteOrReleaseReadLock(name); if (log.isDebugEnabled()) { log.debugf("Removed file: %s from index: %s", name, indexName); } }
public DirectoryImplementor(Cache<?, ?> metadataCache, Cache<?, ?> chunksCache, Cache<?, ?> distLocksCache, String indexName, int chunkSize, SegmentReadLocker readLocker, boolean fileListUpdatedAsync, int affinitySegmentId) { this.affinitySegmentId = affinitySegmentId; if (chunkSize <= 0) throw new IllegalArgumentException("chunkSize must be a positive integer"); this.metadataCache = (AdvancedCache<FileCacheKey, FileMetadata>) metadataCache.getAdvancedCache().withFlags(Flag.SKIP_INDEXING); this.chunksCache = (AdvancedCache<ChunkCacheKey, Object>) chunksCache.getAdvancedCache().withFlags(Flag.SKIP_INDEXING); this.distLocksCache = (AdvancedCache<Object, Integer>) distLocksCache.getAdvancedCache().withFlags(Flag.SKIP_INDEXING); this.indexName = indexName; this.chunkSize = chunkSize; this.fileOps = new FileListOperations(this.metadataCache, indexName, fileListUpdatedAsync, this.affinitySegmentId); this.segmentsGenFileKey = new FileCacheKey(indexName, IndexFileNames.SEGMENTS, this.affinitySegmentId); this.readLocks = readLocker; }