/** * Guess in how many chunks we should split this file. Should return the same value consistently * for the same file (segments are immutable) so that a full segment can be rebuilt from the upper * layers without anyone actually specifying the chunks numbers. */ private int figureChunksNumber(String fileName) throws IOException { long fileLength = directory.fileLength(fileName); return figureChunksNumber(fileName, fileLength, autoChunkSize); }
/** * Loads all "entries" from the CacheLoader; considering this is actually a Lucene index, * that's going to transform segments in entries in a specific order, simplest entries first. * * @param entriesCollector loaded entries are collected in this set * @param maxEntries to limit amount of entries loaded */ protected <K, V> void loadAllEntries(final Set<MarshalledEntry<K, V>> entriesCollector, final int maxEntries, StreamingMarshaller marshaller) { int existingElements = entriesCollector.size(); int toLoadElements = maxEntries - existingElements; if (toLoadElements <= 0) { return; } HashSet<IndexScopedKey> keysCollector = new HashSet<>(); loadSomeKeys(keysCollector, Collections.EMPTY_SET, toLoadElements); for (IndexScopedKey key : keysCollector) { Object value = load(key); if (value != null) { MarshalledEntry cacheEntry = new MarshalledEntryImpl(key, value, null, marshaller); entriesCollector.add(cacheEntry); } } }
@Override public void stop() { for (Entry<String, DirectoryLoaderAdaptor> entry : openDirectories.entrySet()) { DirectoryLoaderAdaptor directory = entry.getValue(); directory.close(); } }
public void testSmallChunkLoading() throws IOException { Directory mockDirectory = createMockDirectory(); DirectoryLoaderAdaptor adaptor = new DirectoryLoaderAdaptor(mockDirectory, INDEX_NAME, AUTO_BUFFER, -1); Object loaded = adaptor.load(new ChunkCacheKey(INDEX_NAME, FILE_NAME, 0, AUTO_BUFFER, segmentId)); AssertJUnit.assertTrue(loaded instanceof byte[]); AssertJUnit.assertEquals(AUTO_BUFFER, ((byte[])loaded).length); loaded = adaptor.load(new ChunkCacheKey(INDEX_NAME, FILE_NAME, 5, AUTO_BUFFER, segmentId)); AssertJUnit.assertTrue(loaded instanceof byte[]); AssertJUnit.assertEquals(AUTO_BUFFER, ((byte[])loaded).length); final int lastChunk = (int)(TEST_SIZE / AUTO_BUFFER); final long lastChunkSize = TEST_SIZE % AUTO_BUFFER; AssertJUnit.assertEquals(9, lastChunkSize); loaded = adaptor.load(new ChunkCacheKey(INDEX_NAME, FILE_NAME, lastChunk, AUTO_BUFFER, segmentId)); AssertJUnit.assertTrue(loaded instanceof byte[]); AssertJUnit.assertEquals(lastChunkSize, ((byte[])loaded).length); }
@Override public Boolean visit(final FileCacheKey fileCacheKey) throws IOException { return DirectoryLoaderAdaptor.this.containsKeyIntern(fileCacheKey); }
@Override public MarshalledEntry load(final Object key) { if (key instanceof IndexScopedKey) { final IndexScopedKey indexKey = (IndexScopedKey)key; DirectoryLoaderAdaptor directoryAdaptor = getDirectory(indexKey); Object value = directoryAdaptor.load(indexKey); if (value != null) { return ctx.getMarshalledEntryFactory().newMarshalledEntry(key, value, null); } else { return null; } } else { log.cacheLoaderIgnoringKey(key); return null; } }
/** * Looks up the Directory adapter if it's already known, or attempts to initialize indexes. */ private DirectoryLoaderAdaptor getDirectory(final String indexName) { DirectoryLoaderAdaptor adapter = openDirectories.get(indexName); if (adapter == null) { synchronized (openDirectories) { adapter = openDirectories.get(indexName); if (adapter == null) { final File path = new File(this.rootDirectory, indexName); final FSDirectory directory = openLuceneDirectory(path); adapter = new DirectoryLoaderAdaptor(directory, indexName, autoChunkSize, affinitySegmentId); openDirectories.put(indexName, adapter); } } } return adapter; }
@Override public boolean contains(final Object key) { if (key instanceof IndexScopedKey) { final IndexScopedKey indexKey = (IndexScopedKey)key; final DirectoryLoaderAdaptor directoryAdaptor = getDirectory(indexKey); return directoryAdaptor.containsKey(indexKey); } else { log.cacheLoaderIgnoringKey(key); return false; } }
@Override public Void call() throws Exception { try { final HashSet<MarshalledEntry> allInternalEntries = new HashSet<>(); dir.loadAllEntries(allInternalEntries, Integer.MAX_VALUE, ctx.getMarshaller()); for (MarshalledEntry me : allInternalEntries) { if (taskContext.isStopped()) break; if (filter == null || filter.accept(me.getKey())) { task.processEntry(me, taskContext); } } return null; } catch (Exception e) { log.errorExecutingParallelStoreTask(e); throw e; } } });
/** * @param keysCollector the Set where to add loaded keys to * @param keysToExclude Could be null! * @throws org.infinispan.persistence.spi.PersistenceException */ protected void loadAllKeys(final HashSet<IndexScopedKey> keysCollector, final Set<IndexScopedKey> keysToExclude) { loadSomeKeys(keysCollector, keysToExclude, Integer.MAX_VALUE); }
public void testAutoChunkingOnLargeFiles() throws IOException { Directory mockDirectory = createMockDirectory(); FileCacheKey k = new FileCacheKey(INDEX_NAME, FILE_NAME, segmentId); DirectoryLoaderAdaptor adaptor = new DirectoryLoaderAdaptor(mockDirectory, INDEX_NAME, AUTO_BUFFER, -1); Object loaded = adaptor.load(k); AssertJUnit.assertTrue(loaded instanceof FileMetadata); FileMetadata metadata = (FileMetadata)loaded; AssertJUnit.assertEquals(TEST_SIZE, metadata.getSize()); AssertJUnit.assertEquals(AUTO_BUFFER, metadata.getBufferSize()); }
@Override public Boolean visit(final ChunkCacheKey chunkCacheKey) throws IOException { return DirectoryLoaderAdaptor.this.containsKeyIntern(chunkCacheKey); }
@Override public MarshalledEntry load(final Object key) { if (key instanceof IndexScopedKey) { final IndexScopedKey indexKey = (IndexScopedKey)key; DirectoryLoaderAdaptor directoryAdaptor = getDirectory(indexKey); Object value = directoryAdaptor.load(indexKey); if (value != null) { return ctx.getMarshalledEntryFactory().newMarshalledEntry(key, value, null); } else { return null; } } else { log.cacheLoaderIgnoringKey(key); return null; } }
/** * Looks up the Directory adapter if it's already known, or attempts to initialize indexes. */ private DirectoryLoaderAdaptor getDirectory(final String indexName) { DirectoryLoaderAdaptor adapter = openDirectories.get(indexName); if (adapter == null) { synchronized (openDirectories) { adapter = openDirectories.get(indexName); if (adapter == null) { final File path = new File(this.rootDirectory, indexName); final FSDirectory directory = openLuceneDirectory(path); adapter = new DirectoryLoaderAdaptor(directory, indexName, autoChunkSize, affinitySegmentId); openDirectories.put(indexName, adapter); } } } return adapter; }
@Override public boolean contains(final Object key) { if (key instanceof IndexScopedKey) { final IndexScopedKey indexKey = (IndexScopedKey)key; final DirectoryLoaderAdaptor directoryAdaptor = getDirectory(indexKey); return directoryAdaptor.containsKey(indexKey); } else { log.cacheLoaderIgnoringKey(key); return false; } }
@Override public Void call() throws Exception { try { final HashSet<MarshalledEntry> allInternalEntries = new HashSet<MarshalledEntry>(); dir.loadAllEntries(allInternalEntries, Integer.MAX_VALUE, ctx.getMarshaller()); for (MarshalledEntry me : allInternalEntries) { if (taskContext.isStopped()) break; if (filter == null || filter.shouldLoadKey(me.getKey())) { task.processEntry(me, taskContext); } } return null; } catch (Exception e) { log.errorExecutingParallelStoreTask(e); throw e; } } });
/** * Guess in how many chunks we should split this file. Should return the same value consistently * for the same file (segments are immutable) so that a full segment can be rebuilt from the upper * layers without anyone actually specifying the chunks numbers. */ private int figureChunksNumber(String fileName) throws IOException { long fileLength = directory.fileLength(fileName); return figureChunksNumber(fileName, fileLength, autoChunkSize); }
/** * Loads all "entries" from the CacheLoader; considering this is actually a Lucene index, * that's going to transform segments in entries in a specific order, simplest entries first. * * @param entriesCollector loaded entries are collected in this set * @param maxEntries to limit amount of entries loaded * @throws PersistenceException */ protected void loadAllEntries(final HashSet<MarshalledEntry> entriesCollector, final int maxEntries, StreamingMarshaller marshaller) { int existingElements = entriesCollector.size(); int toLoadElements = maxEntries - existingElements; if (toLoadElements <= 0) { return; } HashSet<IndexScopedKey> keysCollector = new HashSet<IndexScopedKey>(); loadSomeKeys(keysCollector, Collections.EMPTY_SET, toLoadElements); for (IndexScopedKey key : keysCollector) { Object value = load(key); if (value != null) { MarshalledEntry cacheEntry = new MarshalledEntryImpl(key, value, null, marshaller); entriesCollector.add(cacheEntry); } } }
@Override public Boolean visit(final ChunkCacheKey chunkCacheKey) throws IOException { return DirectoryLoaderAdaptor.this.containsKeyIntern(chunkCacheKey); }
@Override public MarshalledEntry load(final Object key) { if (key instanceof IndexScopedKey) { final IndexScopedKey indexKey = (IndexScopedKey)key; DirectoryLoaderAdaptor directoryAdaptor = getDirectory(indexKey); Object value = directoryAdaptor.load(indexKey); if (value != null) { return ctx.getMarshalledEntryFactory().newMarshalledEntry(key, value, null); } else { return null; } } else { log.cacheLoaderIgnoringKey(key); return null; } }