@Test public void testFileMetaData() { FileMetadata data1 = new FileMetadata(1024); FileMetadata data2 = new FileMetadata(2048); FileMetadata data3 = new FileMetadata(1024); FileMetadata data4 = data1; assert !data1.equals(new FileCacheKey("testIndex", "testFile", -1)); AssertJUnit.assertNotNull(data1); assert data1.equals(data4); assert data1.equals(data3); data3.setSize(2048); assert !data1.equals(data3); assert !data1.equals(data2); AssertJUnit.assertEquals("FileMetadata{ size=" + data1.getSize() + '}', data1.toString()); } }
public InfinispanIndexInput(final IndexInputContext ctx) { super(ctx.fileKey.getFileName()); this.chunksCache = ctx.chunksCache; this.fileKey = ctx.fileKey; this.chunkSize = ctx.fileMetadata.getBufferSize(); this.fileLength = ctx.fileMetadata.getSize(); this.readLocks = ctx.readLocks; this.filename = fileKey.getFileName(); if (trace) { log.tracef("Opened new IndexInput for file:%s in index: %s", filename, fileKey.getIndexName()); } }
private boolean isWritingOnLastChunk() { final int lastChunkNumber = file.getNumberOfChunks() - 1; return currentChunkNumber >= lastChunkNumber; }
/** * Load implementation for FileCacheKey: must return the metadata of the * requested file. */ private FileMetadata loadIntern(final FileCacheKey key) throws IOException { final String fileName = key.getFileName(); final long fileLength = directory.fileLength(fileName); // We're forcing the buffer size of a to-be-read segment to the full file size: final int bufferSize = (int) Math.min(fileLength, (long)autoChunkSize); final FileMetadata meta = new FileMetadata(bufferSize); meta.setSize(fileLength); return meta; }
/** * Load implementation for FileCacheKey: must return the metadata of the * requested file. */ private FileMetadata loadIntern(final FileCacheKey key) throws IOException { final String fileName = key.getFileName(); final long fileModified = directory.fileModified(fileName); final long fileLength = directory.fileLength(fileName); // We're forcing the buffer size of a to-be-read segment to the full file size: final int bufferSize = (int) Math.min(fileLength, (long)autoChunkSize); final FileMetadata meta = new FileMetadata(bufferSize); meta.setLastModified(fileModified); meta.setSize(fileLength); return meta; }
@Override public long length() { return file.getSize(); }
@Test public void roundingTest() { FileMetadata m = new FileMetadata(10); AssertJUnit.assertEquals(0, m.getNumberOfChunks()); m.setSize(10); AssertJUnit.assertEquals(1, m.getNumberOfChunks()); m.setSize(11); AssertJUnit.assertEquals(2, m.getNumberOfChunks()); m = new FileMetadata(11); m.setSize(11); AssertJUnit.assertEquals(1, m.getNumberOfChunks()); m.setSize(22); AssertJUnit.assertEquals(2, m.getNumberOfChunks()); m.setSize(31); m = new FileMetadata(10); m.setSize(31); AssertJUnit.assertEquals(4, m.getNumberOfChunks()); }
final FileMetadata file = (FileMetadata) metadataCache.remove(key); if (file != null) { //during optimization of index a same file could be deleted twice, so you could see a null here final int bufferSize = file.getBufferSize(); AdvancedCache<?, ?> chunksCacheNoReturn = chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES); for (int i = 0; i < file.getNumberOfChunks(); i++) { ChunkCacheKey chunkKey = new ChunkCacheKey(indexName, fileName, i, bufferSize, affinitySegmentId); if (trace) log.tracef("deleting chunk: %s", chunkKey); if (file != null && file.isMultiChunked()) { FileReadLockKey readLockKey = new FileReadLockKey(indexName, fileName, affinitySegmentId); if (trace) log.tracef("deleting readlock: %s", readLockKey);
public SingleChunkIndexInput(final IndexInputContext iic) { super(iic.fileKey.getFileName()); ChunkCacheKey key = new ChunkCacheKey(iic.fileKey.getIndexName(), iic.fileKey.getFileName(), 0, iic.fileMetadata.getBufferSize(), iic.affinitySegmentId); byte[] b = (byte[]) iic.chunksCache.get(key); if (b == null) { buffer = new byte[0]; } else { buffer = b; } bufferPosition = 0; }
@Override public FileMetadata readObject(ObjectInput input) throws IOException { long size = UnsignedNumeric.readUnsignedLong(input); int bufferSize = UnsignedNumeric.readUnsignedInt(input); return new FileMetadata(size, bufferSize); }
final FileMetadata file = (FileMetadata) metadataCache.remove(key); if (file != null) { //during optimization of index a same file could be deleted twice, so you could see a null here final int bufferSize = file.getBufferSize(); for (int i = 0; i < file.getNumberOfChunks(); i++) { ChunkCacheKey chunkKey = new ChunkCacheKey(indexName, filename, i, bufferSize); if (trace) log.tracef("deleting chunk: %s", chunkKey);
/** * Evaluates if the file is potentially being stored as fragmented into multiple chunks; * when it's a single chunk we don't need to apply readlocks. * @param filename * @return true if it is definitely fragmented, or if it's possibly fragmented. */ private boolean isMultiChunked(final String filename) { final FileCacheKey fileCacheKey = new FileCacheKey(indexName, filename, affinitySegmentId); final FileMetadata fileMetadata = metadataCache.get(fileCacheKey); if (fileMetadata==null) { //This might happen under high load when the metadata is being written //using putAsync; in such case we return true as it's the safest option. //Skipping the readlocks is just a performance optimisation, and this //condition is extremely rare. return true; } else { return fileMetadata.isMultiChunked(); } }
long fileLength(final String name) { final FileMetadata fileMetadata = fileOps.getFileMetadata(name); if (fileMetadata == null) { return 0L; //as in FSDirectory (RAMDirectory throws an exception instead) } else { return fileMetadata.getSize(); } }
/** * Load implementation for FileCacheKey: must return the metadata of the * requested file. */ private FileMetadata loadIntern(final FileCacheKey key) throws IOException { final String fileName = key.getFileName(); final long fileLength = directory.fileLength(fileName); // We're forcing the buffer size of a to-be-read segment to the full file size: final int bufferSize = (int) Math.min(fileLength, (long)autoChunkSize); final FileMetadata meta = new FileMetadata(bufferSize); meta.setSize(fileLength); return meta; }
final FileMetadata file = (FileMetadata) metadataCache.remove(key); if (file != null) { //during optimization of index a same file could be deleted twice, so you could see a null here final int bufferSize = file.getBufferSize(); AdvancedCache<?, ?> chunksCacheNoReturn = chunksCache.withFlags(Flag.IGNORE_RETURN_VALUES); for (int i = 0; i < file.getNumberOfChunks(); i++) { ChunkCacheKey chunkKey = new ChunkCacheKey(indexName, fileName, i, bufferSize, affinitySegmentId); if (trace) log.tracef("deleting chunk: %s", chunkKey); if (file != null && file.isMultiChunked()) { FileReadLockKey readLockKey = new FileReadLockKey(indexName, fileName, affinitySegmentId); if (trace) log.tracef("deleting readlock: %s", readLockKey);
public SingleChunkIndexInput(final IndexInputContext iic) { super(iic.fileKey.getFileName()); ChunkCacheKey key = new ChunkCacheKey(iic.fileKey.getIndexName(), iic.fileKey.getFileName(), 0, iic.fileMetadata.getBufferSize(), iic.affinitySegmentId); byte[] b = (byte[]) iic.chunksCache.get(key); if (b == null) { buffer = Util.EMPTY_BYTE_ARRAY; } else { buffer = b; } bufferPosition = 0; }
@Override public FileMetadata readObject(ObjectInput input) throws IOException { long size = UnsignedNumeric.readUnsignedLong(input); int bufferSize = UnsignedNumeric.readUnsignedInt(input); return new FileMetadata(size, bufferSize); }
/** * Evaluates if the file is potentially being stored as fragmented into multiple chunks; * when it's a single chunk we don't need to apply readlocks. * @param filename * @return true if it is definitely fragmented, or if it's possibly fragmented. */ private boolean isMultiChunked(final String filename) { final FileCacheKey fileCacheKey = new FileCacheKey(indexName, filename, affinitySegmentId); final FileMetadata fileMetadata = metadataCache.get(fileCacheKey); if (fileMetadata==null) { //This might happen under high load when the metadata is being written //using putAsync; in such case we return true as it's the safest option. //Skipping the readlocks is just a performance optimisation, and this //condition is extremely rare. return true; } else { return fileMetadata.isMultiChunked(); } }