/** * @param tag Tag to initialize RW lock. * @return Relative pointer of the allocated page. * @throws GridOffHeapOutOfMemoryException If failed to allocate. */ private long allocateFreePage(int tag) throws GridOffHeapOutOfMemoryException { long limit = region.address() + region.size(); while (true) { long lastIdx = GridUnsafe.getLongVolatile(null, lastAllocatedIdxPtr); // Check if we have enough space to allocate a page. if (pagesBase + (lastIdx + 1) * sysPageSize > limit) return INVALID_REL_PTR; if (GridUnsafe.compareAndSwapLong(null, lastAllocatedIdxPtr, lastIdx, lastIdx + 1)) { long absPtr = pagesBase + lastIdx * sysPageSize; assert lastIdx <= PageIdUtils.MAX_PAGE_NUM : lastIdx; long pageIdx = fromSegmentIndex(idx, lastIdx); assert pageIdx != INVALID_REL_PTR; writePageId(absPtr, pageIdx); GridUnsafe.putLong(absPtr, PAGE_MARKER); rwLock.init(absPtr + LOCK_OFFSET, tag); allocatedPages.incrementAndGet(); memMetrics.updateTotalAllocatedPages(1L); return pageIdx; } } }
/** * @param pageId Page ID. * @return Relative pointer of the allocated page. * @throws GridOffHeapOutOfMemoryException If failed to allocate new free page. */ private long allocateFreePage(long pageId) throws GridOffHeapOutOfMemoryException { long limit = region.address() + region.size(); while (true) { long lastIdx = GridUnsafe.getLong(lastAllocatedIdxPtr); // Check if we have enough space to allocate a page. if (pagesBase + (lastIdx + 1) * sysPageSize > limit) return INVALID_REL_PTR; if (GridUnsafe.compareAndSwapLong(null, lastAllocatedIdxPtr, lastIdx, lastIdx + 1)) { long absPtr = pagesBase + lastIdx * sysPageSize; assert (lastIdx & SEGMENT_INDEX_MASK) == 0L; long relative = relative(lastIdx); assert relative != INVALID_REL_PTR; PageHeader.initNew(absPtr, relative); rwLock.init(absPtr + PAGE_LOCK_OFFSET, PageIdUtils.tag(pageId)); return relative; } } }
rwLock.init(absPtr + PAGE_LOCK_OFFSET, PageIdUtils.tag(pageId));
/** * @param tag Tag to initialize RW lock. * @return Relative pointer of the allocated page. * @throws GridOffHeapOutOfMemoryException If failed to allocate. */ private long allocateFreePage(int tag) throws GridOffHeapOutOfMemoryException { long limit = region.address() + region.size(); while (true) { long lastIdx = GridUnsafe.getLongVolatile(null, lastAllocatedIdxPtr); // Check if we have enough space to allocate a page. if (pagesBase + (lastIdx + 1) * sysPageSize > limit) return INVALID_REL_PTR; if (GridUnsafe.compareAndSwapLong(null, lastAllocatedIdxPtr, lastIdx, lastIdx + 1)) { long absPtr = pagesBase + lastIdx * sysPageSize; assert lastIdx <= PageIdUtils.MAX_PAGE_NUM : lastIdx; long pageIdx = fromSegmentIndex(idx, lastIdx); assert pageIdx != INVALID_REL_PTR; writePageId(absPtr, pageIdx); GridUnsafe.putLong(absPtr, PAGE_MARKER); rwLock.init(absPtr + LOCK_OFFSET, tag); allocatedPages.incrementAndGet(); memMetrics.updateTotalAllocatedPages(1L); return pageIdx; } } }
/** * @param pageId Page ID. * @return Relative pointer of the allocated page. * @throws GridOffHeapOutOfMemoryException If failed to allocate new free page. */ private long allocateFreePage(long pageId) throws GridOffHeapOutOfMemoryException { long limit = region.address() + region.size(); while (true) { long lastIdx = GridUnsafe.getLong(lastAllocatedIdxPtr); // Check if we have enough space to allocate a page. if (pagesBase + (lastIdx + 1) * sysPageSize > limit) return INVALID_REL_PTR; if (GridUnsafe.compareAndSwapLong(null, lastAllocatedIdxPtr, lastIdx, lastIdx + 1)) { long absPtr = pagesBase + lastIdx * sysPageSize; assert (lastIdx & SEGMENT_INDEX_MASK) == 0L; long relative = relative(lastIdx); assert relative != INVALID_REL_PTR; PageHeader.initNew(absPtr, relative); rwLock.init(absPtr + PAGE_LOCK_OFFSET, PageIdUtils.tag(pageId)); return relative; } } }
rwLock.init(absPtr + PAGE_LOCK_OFFSET, PageIdUtils.tag(pageId));