public CacheBuffer newWriteBuffer() { return new CacheBuffer(database.newWriteBuffer(), client, options); } }
public void remove(ObjectIndexKey objId, ChunkKey chunk, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.remove(objId, chunk, buf.getWriteBuffer()); // TODO This suffers from a race condition. The removal from the // cache can occur before the database update takes place, and a // concurrent reader might re-populate the cache with the stale data. // buf.remove(ns.key(objId)); }
/** * Schedule storing (or replacing) a key in the cache. * * @param key * key to store. * @param value * new value to store. * @throws DhtException * a prior flush failed. */ public void put(CacheKey key, byte[] value) throws DhtException { modify(CacheService.Change.put(key, value)); }
public void put(RepositoryKey repo, CachedPackInfo info, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(repo, info, buf.getWriteBuffer()); buf.removeAfterFlush(nsCachedPack.key(repo)); }
/** * Schedule any cache change. * * @param op * the cache operation. * @throws DhtException * a prior flush failed. */ public void modify(CacheService.Change op) throws DhtException { int sz = op.getKey().getBytes().length; if (op.getData() != null) sz += op.getData().length; if (add(sz)) { if (pending == null) pending = newList(); pending.add(op); queued(sz); } else { client.modify(singleton(op), wrap(none, sz)); } }
public void put(PackChunk.Members chunk, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(chunk, buf.getWriteBuffer()); // Only store fragmented meta. This is all callers should ask for. if (chunk.hasMeta() && chunk.getMeta().getFragmentCount() != 0) { buf.put(nsMeta.key(chunk.getChunkKey()), chunk.getMeta().toByteArray()); } if (chunk.hasChunkData()) buf.put(nsChunk.key(chunk.getChunkKey()), encode(chunk)); else buf.removeAfterFlush(nsChunk.key(chunk.getChunkKey())); }
public void put(RepositoryKey repo, ChunkInfo info, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(repo, info, buf.getWriteBuffer()); }
@Override protected void startQueuedOperations(int bytes) throws DhtException { client.modify(pending, wrap(none, bytes)); pending = null; }
/** * Schedule a removal only after the underlying database flushes. * <p> * Unlike {@link #remove(CacheKey)}, these removals are buffered until the * application calls {@link #flush()} and aren't sent to the cache service * until after the underlying database flush() operation is completed * successfully. * * @param key * key to remove. */ public void removeAfterFlush(CacheKey key) { if (afterFlush == null) afterFlush = newList(); afterFlush.add(CacheService.Change.remove(key)); }
/** * Schedule any cache change. * * @param op * the cache operation. * @throws DhtException * a prior flush failed. */ public void modify(CacheService.Change op) throws DhtException { int sz = op.getKey().getBytes().length; if (op.getData() != null) sz += op.getData().length; if (add(sz)) { if (pending == null) pending = newList(); pending.add(op); queued(sz); } else { client.modify(singleton(op), wrap(none, sz)); } }
public void put(PackChunk.Members chunk, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(chunk, buf.getWriteBuffer()); // Only store fragmented meta. This is all callers should ask for. if (chunk.hasMeta() && chunk.getMeta().getFragmentCount() != 0) { buf.put(nsMeta.key(chunk.getChunkKey()), chunk.getMeta().toByteArray()); } if (chunk.hasChunkData()) buf.put(nsChunk.key(chunk.getChunkKey()), encode(chunk)); else buf.removeAfterFlush(nsChunk.key(chunk.getChunkKey())); }
public void put(RepositoryKey repo, CachedPackInfo info, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(repo, info, buf.getWriteBuffer()); buf.removeAfterFlush(nsCachedPack.key(repo)); }
public void put(RepositoryKey repo, ChunkInfo info, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.put(repo, info, buf.getWriteBuffer()); }
@Override protected void startQueuedOperations(int bytes) throws DhtException { client.modify(pending, wrap(none, bytes)); pending = null; }
/** * Schedule a removal only after the underlying database flushes. * <p> * Unlike {@link #remove(CacheKey)}, these removals are buffered until the * application calls {@link #flush()} and aren't sent to the cache service * until after the underlying database flush() operation is completed * successfully. * * @param key * key to remove. */ public void removeAfterFlush(CacheKey key) { if (afterFlush == null) afterFlush = newList(); afterFlush.add(CacheService.Change.remove(key)); }
public void remove(ChunkKey key, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; buf.remove(nsChunk.key(key)); buf.remove(nsMeta.key(key)); db.remove(key, buf.getWriteBuffer()); }
public void remove(RepositoryKey repo, CachedPackKey key, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.remove(repo, key, buf.getWriteBuffer()); buf.removeAfterFlush(nsCachedPack.key(repo)); } }
/** * Schedule storing (or replacing) a key in the cache. * * @param key * key to store. * @param value * new value to store. * @throws DhtException * a prior flush failed. */ public void put(CacheKey key, byte[] value) throws DhtException { modify(CacheService.Change.put(key, value)); }
public void remove(RepositoryKey repo, ChunkKey chunk, WriteBuffer buffer) throws DhtException { CacheBuffer buf = (CacheBuffer) buffer; db.remove(repo, chunk, buf.getWriteBuffer()); }
public CacheBuffer newWriteBuffer() { return new CacheBuffer(database.newWriteBuffer(), client, options); } }