private HintsBuffer(ByteBuffer slab) { this.slab = slab; position = new AtomicInteger(); offsets = new ConcurrentHashMap<>(); appendOrder = new OpOrder(); }
private void reclaim(final Memtable memtable) { // issue a read barrier for reclaiming the memory, and offload the wait to another thread final OpOrder.Barrier readBarrier = readOrdering.newBarrier(); readBarrier.issue(); reclaimExecutor.execute(new WrappedRunnable() { public void runMayThrow() throws InterruptedException, ExecutionException { readBarrier.await(); memtable.setDiscarded(); } }); } }
void discardUnusedTail() { // We guard this with the OpOrdering instead of synchronised due to potential dead-lock with ACLSM.advanceAllocatingFrom() // Ensures endOfBuffer update is reflected in the buffer end position picked up by sync(). // This actually isn't strictly necessary, as currently all calls to discardUnusedTail are executed either by the thread // running sync or within a mutation already protected by this OpOrdering, but to prevent future potential mistakes, // we duplicate the protection here so that the contract between discardUnusedTail() and sync() is more explicit. try (OpOrder.Group group = appendOrder.start()) { while (true) { int prev = allocatePosition.get(); int next = endOfBuffer + 1; if (prev >= next) { // Already stopped allocating, might also be closed. assert buffer == null || prev == buffer.capacity() + 1; return; } if (allocatePosition.compareAndSet(prev, next)) { // Stopped allocating now. Can only succeed once, no further allocation or discardUnusedTail can succeed. endOfBuffer = prev; assert buffer != null && next == buffer.capacity() + 1; return; } } } }
private static int estimateRowOverhead(final int count) { // calculate row overhead try (final OpOrder.Group group = new OpOrder().start()) { int rowOverhead; MemtableAllocator allocator = MEMORY_POOL.newAllocator(); ConcurrentNavigableMap<PartitionPosition, Object> partitions = new ConcurrentSkipListMap<>(); final Object val = new Object(); for (int i = 0 ; i < count ; i++) partitions.put(allocator.clone(new BufferDecoratedKey(new LongToken(i), ByteBufferUtil.EMPTY_BYTE_BUFFER), group), val); double avgSize = ObjectSizes.measureDeep(partitions) / (double) count; rowOverhead = (int) ((avgSize - Math.floor(avgSize)) < 0.05 ? Math.floor(avgSize) : Math.ceil(avgSize)); rowOverhead -= ObjectSizes.measureDeep(new LongToken(0)); rowOverhead += AtomicBTreePartition.EMPTY_SIZE; allocator.setDiscarding(); allocator.setDiscarded(); return rowOverhead; } }
/** * Wait for any appends or discardUnusedTail() operations started before this method was called */ void waitForModifications() { // issue a barrier and wait for it appendOrder.awaitNewBarrier(); }
private static int estimateRowOverhead(final int count) { // calculate row overhead final OpOrder.Group group = new OpOrder().start(); int rowOverhead; MemtableAllocator allocator = MEMORY_POOL.newAllocator(); ConcurrentNavigableMap<RowPosition, Object> rows = new ConcurrentSkipListMap<>(); final Object val = new Object(); for (int i = 0 ; i < count ; i++) rows.put(allocator.clone(new BufferDecoratedKey(new LongToken((long) i), ByteBufferUtil.EMPTY_BYTE_BUFFER), group), val); double avgSize = ObjectSizes.measureDeep(rows) / (double) count; rowOverhead = (int) ((avgSize - Math.floor(avgSize)) < 0.05 ? Math.floor(avgSize) : Math.ceil(avgSize)); rowOverhead -= ObjectSizes.measureDeep(new LongToken((long) 0)); rowOverhead += AtomicBTreeColumns.EMPTY_SIZE; allocator.setDiscarding(); allocator.setDiscarded(); return rowOverhead; } }
/** * Wait for any appends started before this method was called. */ void waitForModifications() { appendOrder.awaitNewBarrier(); // issue a barrier and wait for it }
void discardUnusedTail() { // We guard this with the OpOrdering instead of synchronised due to potential dead-lock with ACLSM.advanceAllocatingFrom() // Ensures endOfBuffer update is reflected in the buffer end position picked up by sync(). // This actually isn't strictly necessary, as currently all calls to discardUnusedTail are executed either by the thread // running sync or within a mutation already protected by this OpOrdering, but to prevent future potential mistakes, // we duplicate the protection here so that the contract between discardUnusedTail() and sync() is more explicit. try (OpOrder.Group group = appendOrder.start()) { while (true) { int prev = allocatePosition.get(); int next = endOfBuffer + 1; if (prev >= next) { // Already stopped allocating, might also be closed. assert buffer == null || prev == buffer.capacity() + 1; return; } if (allocatePosition.compareAndSet(prev, next)) { // Stopped allocating now. Can only succeed once, no further allocation or discardUnusedTail can succeed. endOfBuffer = prev; assert buffer != null && next == buffer.capacity() + 1; return; } } } }
private static int estimateRowOverhead(final int count) { // calculate row overhead try (final OpOrder.Group group = new OpOrder().start()) { int rowOverhead; MemtableAllocator allocator = MEMORY_POOL.newAllocator(); ConcurrentNavigableMap<PartitionPosition, Object> partitions = new ConcurrentSkipListMap<>(); final Object val = new Object(); for (int i = 0 ; i < count ; i++) partitions.put(allocator.clone(new BufferDecoratedKey(new LongToken(i), ByteBufferUtil.EMPTY_BYTE_BUFFER), group), val); double avgSize = ObjectSizes.measureDeep(partitions) / (double) count; rowOverhead = (int) ((avgSize - Math.floor(avgSize)) < 0.05 ? Math.floor(avgSize) : Math.ceil(avgSize)); rowOverhead -= ObjectSizes.measureDeep(new LongToken(0)); rowOverhead += AtomicBTreePartition.EMPTY_SIZE; allocator.setDiscarding(); allocator.setDiscarded(); return rowOverhead; } }
public void awaitNewBarrier() { Barrier barrier = newBarrier(); barrier.issue(); barrier.await(); }
/** * Wait for any appends started before this method was called. */ void waitForModifications() { appendOrder.awaitNewBarrier(); // issue a barrier and wait for it }
private HintsBuffer(ByteBuffer slab) { this.slab = slab; position = new AtomicInteger(); offsets = new ConcurrentHashMap<>(); appendOrder = new OpOrder(); }
void discardUnusedTail() { // We guard this with the OpOrdering instead of synchronised due to potential dead-lock with ACLSM.advanceAllocatingFrom() // Ensures endOfBuffer update is reflected in the buffer end position picked up by sync(). // This actually isn't strictly necessary, as currently all calls to discardUnusedTail are executed either by the thread // running sync or within a mutation already protected by this OpOrdering, but to prevent future potential mistakes, // we duplicate the protection here so that the contract between discardUnusedTail() and sync() is more explicit. try (OpOrder.Group group = appendOrder.start()) { while (true) { int prev = allocatePosition.get(); int next = endOfBuffer + 1; if (prev >= next) { // Already stopped allocating, might also be closed. assert buffer == null || prev == buffer.capacity() + 1; return; } if (allocatePosition.compareAndSet(prev, next)) { // Stopped allocating now. Can only succeed once, no further allocation or discardUnusedTail can succeed. endOfBuffer = prev; assert buffer != null && next == buffer.capacity() + 1; return; } } } }
private static int estimateRowOverhead(final int count) { // calculate row overhead try (final OpOrder.Group group = new OpOrder().start()) { int rowOverhead; MemtableAllocator allocator = MEMORY_POOL.newAllocator(); ConcurrentNavigableMap<PartitionPosition, Object> partitions = new ConcurrentSkipListMap<>(); final Object val = new Object(); for (int i = 0 ; i < count ; i++) partitions.put(allocator.clone(new BufferDecoratedKey(new LongToken(i), ByteBufferUtil.EMPTY_BYTE_BUFFER), group), val); double avgSize = ObjectSizes.measureDeep(partitions) / (double) count; rowOverhead = (int) ((avgSize - Math.floor(avgSize)) < 0.05 ? Math.floor(avgSize) : Math.ceil(avgSize)); rowOverhead -= ObjectSizes.measureDeep(new LongToken(0)); rowOverhead += AtomicBTreePartition.EMPTY_SIZE; allocator.setDiscarding(); allocator.setDiscarded(); return rowOverhead; } }
public void awaitNewBarrier() { Barrier barrier = newBarrier(); barrier.issue(); barrier.await(); }
/** * Wait for any appends or discardUnusedTail() operations started before this method was called */ void waitForModifications() { // issue a barrier and wait for it appendOrder.awaitNewBarrier(); }
private HintsBuffer(ByteBuffer slab) { this.slab = slab; position = new AtomicInteger(); offsets = new ConcurrentHashMap<>(); appendOrder = new OpOrder(); }
void discardUnusedTail() { // we guard this with the OpOrdering instead of synchronised due to potential dead-lock with CLSM.advanceAllocatingFrom() // this actually isn't strictly necessary, as currently all calls to discardUnusedTail occur within a block // already protected by this OpOrdering, but to prevent future potential mistakes, we duplicate the protection here // so that the contract between discardUnusedTail() and sync() is more explicit. try (OpOrder.Group group = appendOrder.start()) { while (true) { int prev = allocatePosition.get(); // we set allocatePosition past buffer.capacity() to make sure we always set discardedTailFrom int next = buffer.capacity() + 1; if (prev == next) return; if (allocatePosition.compareAndSet(prev, next)) { discardedTailFrom = prev; return; } } } }
private static int estimateRowOverhead(final int count) { // calculate row overhead try (final OpOrder.Group group = new OpOrder().start()) { int rowOverhead; MemtableAllocator allocator = MEMORY_POOL.newAllocator(); ConcurrentNavigableMap<PartitionPosition, Object> partitions = new ConcurrentSkipListMap<>(); final Object val = new Object(); for (int i = 0 ; i < count ; i++) partitions.put(allocator.clone(new BufferDecoratedKey(new LongToken(i), ByteBufferUtil.EMPTY_BYTE_BUFFER), group), val); double avgSize = ObjectSizes.measureDeep(partitions) / (double) count; rowOverhead = (int) ((avgSize - Math.floor(avgSize)) < 0.05 ? Math.floor(avgSize) : Math.ceil(avgSize)); rowOverhead -= ObjectSizes.measureDeep(new LongToken(0)); rowOverhead += AtomicBTreePartition.EMPTY_SIZE; allocator.setDiscarding(); allocator.setDiscarded(); return rowOverhead; } }
public void awaitNewBarrier() { Barrier barrier = newBarrier(); barrier.issue(); barrier.await(); }