public void put(RepositoryKey repo, ChunkInfo info, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(repo, info, buf.getWriteBuffer()); }
public void remove(RepositoryKey repo, ChunkKey chunk, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.remove(repo, chunk, buf.getWriteBuffer()); }
public void put(RepositoryKey repo, ChunkInfo info, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(repo, info, buf.getWriteBuffer()); }
public void remove(RepositoryKey repo, ChunkKey chunk, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.remove(repo, chunk, buf.getWriteBuffer()); }
public void add(ObjectIndexKey objId, ObjectInfo info, WriteBuffer buffer) throws DhtException { // During addition, the cache is not populated. This prevents a // race condition when the cache is cold. Readers need to scan // the database and ensure the oldest ObjectInfo is loaded into // the cache in order to allow PackChunk to break delta cycles. // // This does have a small performance penalty, as recently added // objects are often read not long after they were written. But // without good multi-system transaction support between the // cache and the underlying storage we cannot do better. // db.add(objId, info, ((CacheBuffer) buffer).getWriteBuffer()); }
public void add(ObjectIndexKey objId, ObjectInfo info, WriteBuffer buffer) throws DhtException { // During addition, the cache is not populated. This prevents a // race condition when the cache is cold. Readers need to scan // the database and ensure the oldest ObjectInfo is loaded into // the cache in order to allow PackChunk to break delta cycles. // // This does have a small performance penalty, as recently added // objects are often read not long after they were written. But // without good multi-system transaction support between the // cache and the underlying storage we cannot do better. // db.add(objId, info, ((CacheBuffer) buffer).getWriteBuffer()); }
public void remove(ObjectIndexKey objId, ChunkKey chunk, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.remove(objId, chunk, buf.getWriteBuffer()); // TODO This suffers from a race condition. The removal from the // cache can occur before the database update takes place, and a // concurrent reader might re-populate the cache with the stale data. // buf.remove(ns.key(objId)); }
public void put(RepositoryKey repo, CachedPackInfo info, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(repo, info, buf.getWriteBuffer()); buf.removeAfterFlush(nsCachedPack.key(repo)); }
public void remove(RepositoryKey repo, CachedPackKey key, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.remove(repo, key, buf.getWriteBuffer()); buf.removeAfterFlush(nsCachedPack.key(repo)); } }
public void put(RepositoryKey repo, CachedPackInfo info, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(repo, info, buf.getWriteBuffer()); buf.removeAfterFlush(nsCachedPack.key(repo)); }
public void remove(RepositoryKey repo, CachedPackKey key, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.remove(repo, key, buf.getWriteBuffer()); buf.removeAfterFlush(nsCachedPack.key(repo)); } }
public void remove(ObjectIndexKey objId, ChunkKey chunk, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.remove(objId, chunk, buf.getWriteBuffer()); // TODO This suffers from a race condition. The removal from the // cache can occur before the database update takes place, and a // concurrent reader might re-populate the cache with the stale data. // buf.remove(ns.key(objId)); }
public void remove(ChunkKey key, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; buf.remove(nsChunk.key(key)); buf.remove(nsMeta.key(key)); db.remove(key, buf.getWriteBuffer()); }
public void remove(ChunkKey key, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; buf.remove(nsChunk.key(key)); buf.remove(nsMeta.key(key)); db.remove(key, buf.getWriteBuffer()); }
public void put(PackChunk.Members chunk, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(chunk, buf.getWriteBuffer()); // Only store fragmented meta. This is all callers should ask for. if (chunk.hasMeta() && chunk.getMeta().getFragmentCount() != 0) { buf.put(nsMeta.key(chunk.getChunkKey()), chunk.getMeta().toByteArray()); } if (chunk.hasChunkData()) buf.put(nsChunk.key(chunk.getChunkKey()), encode(chunk)); else buf.removeAfterFlush(nsChunk.key(chunk.getChunkKey())); }
public void put(PackChunk.Members chunk, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(chunk, buf.getWriteBuffer()); // Only store fragmented meta. This is all callers should ask for. if (chunk.hasMeta() && chunk.getMeta().getFragmentCount() != 0) { buf.put(nsMeta.key(chunk.getChunkKey()), chunk.getMeta().toByteArray()); } if (chunk.hasChunkData()) buf.put(nsChunk.key(chunk.getChunkKey()), encode(chunk)); else buf.removeAfterFlush(nsChunk.key(chunk.getChunkKey())); }