public InfinispanIndexInput(final IndexInputContext ctx) { super(ctx.fileKey.getFileName()); this.chunksCache = ctx.chunksCache; this.fileKey = ctx.fileKey; this.chunkSize = ctx.fileMetadata.getBufferSize(); this.fileLength = ctx.fileMetadata.getSize(); this.readLocks = ctx.readLocks; this.filename = fileKey.getFileName(); if (trace) { log.tracef("Opened new IndexInput for file:%s in index: %s", filename, fileKey.getIndexName()); } }
IndexInputContext openInput(final String name) throws IOException { final FileCacheKey fileKey = new FileCacheKey(indexName, name); final FileMetadata fileMetadata = metadataCache.get(fileKey); if (fileMetadata == null) { throw new FileNotFoundException("Error loading metadata for index file: " + fileKey); } else if (fileMetadata.getSize() <= fileMetadata.getBufferSize()) { //files smaller than chunkSize don't need a readLock return new IndexInputContext(chunksCache, fileKey, fileMetadata, null); } else { boolean locked = readLocks.acquireReadLock(name); if (!locked) { // safest reaction is to tell this file doesn't exist anymore. throw new FileNotFoundException("Error loading metadata for index file: " + fileKey); } return new IndexInputContext(chunksCache, fileKey, fileMetadata, readLocks); } }
public SingleChunkIndexInput(final IndexInputContext iic) { super(iic.fileKey.getFileName()); ChunkCacheKey key = new ChunkCacheKey(iic.fileKey.getIndexName(), iic.fileKey.getFileName(), 0, iic.fileMetadata.getBufferSize(), iic.affinitySegmentId); byte[] b = (byte[]) iic.chunksCache.get(key); if (b == null) { buffer = Util.EMPTY_BYTE_ARRAY; } else { buffer = b; } bufferPosition = 0; }
public SingleChunkIndexInput(final IndexInputContext iic) { super(iic.fileKey.getFileName()); ChunkCacheKey key = new ChunkCacheKey(iic.fileKey.getIndexName(), iic.fileKey.getFileName(), 0, iic.fileMetadata.getBufferSize()); byte[] b = (byte[]) iic.chunksCache.get(key); if (b == null) { buffer = new byte[0]; } else { buffer = b; } bufferPosition = 0; }
public SingleChunkIndexInput(final IndexInputContext iic) { super(iic.fileKey.getFileName()); ChunkCacheKey key = new ChunkCacheKey(iic.fileKey.getIndexName(), iic.fileKey.getFileName(), 0, iic.fileMetadata.getBufferSize(), iic.affinitySegmentId); byte[] b = (byte[]) iic.chunksCache.get(key); if (b == null) { buffer = new byte[0]; } else { buffer = b; } bufferPosition = 0; }
public InfinispanIndexInput(final IndexInputContext ctx) { super(ctx.fileKey.getFileName()); this.chunksCache = ctx.chunksCache; this.fileKey = ctx.fileKey; this.chunkSize = ctx.fileMetadata.getBufferSize(); this.fileLength = ctx.fileMetadata.getSize(); this.readLocks = ctx.readLocks; this.affinitySegmentId = ctx.affinitySegmentId; this.filename = fileKey.getFileName(); if (trace) { log.tracef("Opened new IndexInput for file:%s in index: %s", filename, fileKey.getIndexName()); } }
public InfinispanIndexInput(final IndexInputContext ctx) { super(ctx.fileKey.getFileName()); this.chunksCache = ctx.chunksCache; this.fileKey = ctx.fileKey; this.chunkSize = ctx.fileMetadata.getBufferSize(); this.fileLength = ctx.fileMetadata.getSize(); this.readLocks = ctx.readLocks; this.affinitySegmentId = ctx.affinitySegmentId; this.filename = fileKey.getFileName(); if (trace) { log.tracef("Opened new IndexInput for file:%s in index: %s", filename, fileKey.getIndexName()); } }
/** * {@inheritDoc} */ @Override public IndexInput openInput(String name) throws IOException { final FileCacheKey fileKey = new FileCacheKey(indexName, name); FileMetadata fileMetadata = metadataCache.get(fileKey); if (fileMetadata == null) { throw new FileNotFoundException("Error loading metadata for index file: " + fileKey); } else if (fileMetadata.getSize() <= fileMetadata.getBufferSize()) { //files smaller than chunkSize don't need a readLock IndexInputContext iic = new IndexInputContext(chunksCache, fileKey, fileMetadata, null); return new SingleChunkIndexInput(iic); } else { boolean locked = readLocks.acquireReadLock(name); if (!locked) { // safest reaction is to tell this file doesn't exist anymore. throw new FileNotFoundException("Error loading metadata for index file: " + fileKey); } IndexInputContext iic = new IndexInputContext(chunksCache, fileKey, fileMetadata, readLocks); return new InfinispanIndexInputV3(iic); } }
public void testAutoChunkingOnLargeFiles() throws IOException { Directory mockDirectory = createMockDirectory(); FileCacheKey k = new FileCacheKey(INDEX_NAME, FILE_NAME, segmentId); DirectoryLoaderAdaptor adaptor = new DirectoryLoaderAdaptor(mockDirectory, INDEX_NAME, AUTO_BUFFER, -1); Object loaded = adaptor.load(k); AssertJUnit.assertTrue(loaded instanceof FileMetadata); FileMetadata metadata = (FileMetadata)loaded; AssertJUnit.assertEquals(TEST_SIZE, metadata.getSize()); AssertJUnit.assertEquals(AUTO_BUFFER, metadata.getBufferSize()); }
void renameFile(final String from, final String to) { final FileCacheKey fromKey = new FileCacheKey(indexName, from, affinitySegmentId); final FileMetadata metadata = metadataCache.get(fromKey); final int bufferSize = metadata.getBufferSize(); // preparation: copy all chunks to new keys int i = -1; Object ob; do { final ChunkCacheKey fromChunkKey = new ChunkCacheKey(indexName, from, ++i, bufferSize, affinitySegmentId); ob = chunksCache.get(fromChunkKey); if (ob == null) { break; } final ChunkCacheKey toChunkKey = new ChunkCacheKey(indexName, to, i, bufferSize, affinitySegmentId); chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES).put(toChunkKey, ob); } while (true); // rename metadata first metadataCache.put(new FileCacheKey(indexName, to, affinitySegmentId), metadata); fileOps.removeAndAdd(from, to); // now trigger deletion of old file chunks: readLocks.deleteOrReleaseReadLock(from); if (trace) { log.tracef("Renamed file from: %s to: %s in index %s from %s", from, to, indexName, getAddress(metadataCache)); } }
void renameFile(final String from, final String to) { final FileCacheKey fromKey = new FileCacheKey(indexName, from); final FileMetadata metadata = metadataCache.get(fromKey); final int bufferSize = metadata.getBufferSize(); // preparation: copy all chunks to new keys int i = -1; Object ob; do { final ChunkCacheKey fromChunkKey = new ChunkCacheKey(indexName, from, ++i, bufferSize); ob = chunksCache.get(fromChunkKey); if (ob == null) { break; } final ChunkCacheKey toChunkKey = new ChunkCacheKey(indexName, to, i, bufferSize); chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES).put(toChunkKey, ob); } while (true); // rename metadata first metadataCache.put(new FileCacheKey(indexName, to), metadata); fileOps.removeAndAdd(from, to); // now trigger deletion of old file chunks: readLocks.deleteOrReleaseReadLock(from); if (log.isTraceEnabled()) { log.tracef("Renamed file from: %s to: %s in index %s", from, to, indexName); } }
void renameFile(final String from, final String to) { final FileCacheKey fromKey = new FileCacheKey(indexName, from, affinitySegmentId); final FileMetadata metadata = metadataCache.get(fromKey); final int bufferSize = metadata.getBufferSize(); // preparation: copy all chunks to new keys int i = -1; Object ob; do { final ChunkCacheKey fromChunkKey = new ChunkCacheKey(indexName, from, ++i, bufferSize, affinitySegmentId); ob = chunksCache.get(fromChunkKey); if (ob == null) { break; } final ChunkCacheKey toChunkKey = new ChunkCacheKey(indexName, to, i, bufferSize, affinitySegmentId); chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES).put(toChunkKey, ob); } while (true); // rename metadata first metadataCache.put(new FileCacheKey(indexName, to, affinitySegmentId), metadata); fileOps.removeAndAdd(from, to); // now trigger deletion of old file chunks: readLocks.deleteOrReleaseReadLock(from); if (trace) { log.tracef("Renamed file from: %s to: %s in index %s from %s", from, to, indexName, getAddress(metadataCache)); } }
public void renameFile(String from, String to) { ensureOpen(); final FileCacheKey fromKey = new FileCacheKey(indexName, from); final FileMetadata metadata = metadataCache.get(fromKey); final int bufferSize = metadata.getBufferSize(); // preparation: copy all chunks to new keys int i = -1; Object ob; do { ChunkCacheKey fromChunkKey = new ChunkCacheKey(indexName, from, ++i, bufferSize); ob = chunksCache.get(fromChunkKey); if (ob == null) { break; } ChunkCacheKey toChunkKey = new ChunkCacheKey(indexName, to, i, bufferSize); chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES).put(toChunkKey, ob); } while (true); // rename metadata first metadataCache.put(new FileCacheKey(indexName, to), metadata); fileOps.removeAndAdd(from, to); // now trigger deletion of old file chunks: readLocks.deleteOrReleaseReadLock(from); if (log.isTraceEnabled()) { log.tracef("Renamed file from: %s to: %s in index %s", from, to, indexName); } }
/** * For a given FileCacheKey return the total size of all chunks related to the file. * * @param fileCacheKey * the key to the file to inspect * @param cache * the cache storing the chunks * @return the total size adding all found chunks up */ public static long deepCountFileSize(FileCacheKey fileCacheKey, Cache cache, int affinitySegmentId) { String indexName = fileCacheKey.getIndexName(); String fileName = fileCacheKey.getFileName(); long accumulator = 0; FileMetadata metadata = (FileMetadata) cache.get(fileCacheKey); int bufferSize = metadata.getBufferSize(); for (int i = 0;; i++) { ChunkCacheKey chunkKey = new ChunkCacheKey(indexName, fileName, i, bufferSize, affinitySegmentId); byte[] buffer = (byte[]) cache.get(chunkKey); if (buffer == null) { AssertJUnit.assertFalse(cache.containsKey(chunkKey)); return accumulator; } else { assert buffer.length > 0; //check we don't store useless data accumulator += buffer.length; } } }
final FileMetadata file = (FileMetadata) metadataCache.remove(key); if (file != null) { //during optimization of index a same file could be deleted twice, so you could see a null here final int bufferSize = file.getBufferSize(); for (int i = 0; i < file.getNumberOfChunks(); i++) { ChunkCacheKey chunkKey = new ChunkCacheKey(indexName, filename, i, bufferSize);
final FileMetadata file = (FileMetadata) metadataCache.remove(key); if (file != null) { //during optimization of index a same file could be deleted twice, so you could see a null here final int bufferSize = file.getBufferSize(); AdvancedCache<?, ?> chunksCacheNoReturn = chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES); for (int i = 0; i < file.getNumberOfChunks(); i++) {
/** * Verified the file exists and has a specified value for readLock; * Consider that null should be interpreted as value 1; */ public static void assertFileExistsHavingRLCount(Cache cache, String fileName, String indexName, int expectedReadcount, int chunkSize, boolean expectRegisteredInFat, int affinitySegmentId) { FileListCacheValue fileList = (FileListCacheValue) cache.get(new FileListCacheKey(indexName,affinitySegmentId)); assertNotNull(fileList); assertTrue(fileList.contains(fileName) == expectRegisteredInFat); FileMetadata metadata = (FileMetadata) cache.get(new FileCacheKey(indexName, fileName, affinitySegmentId)); assertNotNull(metadata); long totalFileSize = metadata.getSize(); int chunkNumbers = (int)(totalFileSize / chunkSize); for (int i = 0; i < chunkNumbers; i++) { assertNotNull(cache.get(new ChunkCacheKey(indexName, fileName, i, metadata.getBufferSize(), affinitySegmentId))); } FileReadLockKey readLockKey = new FileReadLockKey(indexName,fileName, affinitySegmentId); Object value = cache.get(readLockKey); if (expectedReadcount <= 1) { assertTrue("readlock value is " + value, value == null || Integer.valueOf(1).equals(value)); } else { assertNotNull(value); assertTrue(value instanceof Integer); int v = (Integer)value; assertEquals(v, expectedReadcount); } }
final FileMetadata file = (FileMetadata) metadataCache.remove(key); if (file != null) { //during optimization of index a same file could be deleted twice, so you could see a null here final int bufferSize = file.getBufferSize(); AdvancedCache<?, ?> chunksCacheNoReturn = chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES); for (int i = 0; i < file.getNumberOfChunks(); i++) {