/** * Open a database, creating it if necessary. */ static _LocalDatabase open(DatabaseConfig config) throws IOException { config = config.clone(); _LocalDatabase db = new _LocalDatabase(config, OPEN_REGULAR); db.finishInit(config); return db; }
@Override public Runnable deleteIndex(Index index) throws IOException { // Design note: This is a Database method instead of an Index method because it offers // an extra degree of safety. See notes in renameIndex. return accessTree(index).drop(false); }
/** * Returns a new or recycled _Node instance, latched exclusively and marked * dirty. Caller must hold commit lock. */ _Node allocDirtyFragmentNode() throws IOException { _Node node = allocDirtyNode(); nodeMapPut(node); /*P*/ // [ // node.type(TYPE_FRAGMENT); /*P*/ // ] return node; }
/** * @param nodeId must not be zero * @return non-null _Node with exclusive latch held */ private _Node removeInode(long nodeId) throws IOException { _Node node = nodeMapGetAndRemove(nodeId); if (node == null) { node = allocLatchedNode(nodeId, _NodeContext.MODE_UNEVICTABLE); /*P*/ // [ // node.type(TYPE_FRAGMENT); /*P*/ // ] readNode(node, nodeId); } return node; }
int voffset = remainder; while (true) { _Node node = allocDirtyFragmentNode(); try { encodeInt48LE(newValue, poffset, node.mId); close(e); } else { try { deleteFragment(decodeUnsignedInt48LE(newValue, poffset)); close(e); int voffset = 0; while (true) { _Node node = allocDirtyFragmentNode(); try { encodeInt48LE(newValue, poffset, node.mId); p_clear(page, remainder, pageSize(page)); break; close(e); } else { try { deleteFragment(decodeUnsignedInt48LE(newValue, poffset));
long page = inode.mPage; level--; long levelCap = db.levelCap(level); childNode = db.allocDirtyFragmentNode(); if (partial) { childNode = db.nodeMapLoadFragmentExclusive(childNodeId, partial); try { if (!db.markFragmentDirty(childNode)) { db.close(e); throw e; } finally {
final _Tree tree = accessTree(index); idKey = newKey(KEY_TYPE_INDEX_ID, tree.mIdBytes); trashIdKey = newKey(KEY_TYPE_TRASH_ID, tree.mIdBytes); oldNameKey = newKey(KEY_TYPE_INDEX_NAME, oldName); newNameKey = newKey(KEY_TYPE_INDEX_NAME, newName); txn = newNoRedoTransaction(redoTxnId); try { txn.lockExclusive(mRegistryKeyMap.mId, idKey); if (redoTxnId == 0 && (redo = txnRedoWriter()) != null) { long commitPos;
long levelCap = levelCap(level); int childNodeCount = childNodeCount(vlength, levelCap); _Node childNode = allocDirtyFragmentNode(); p_int48PutLE(page, poffset, childNode.mId); poffset += 6; p_copyFromArray(value, voffset, childPage, 0, len); p_clear(childPage, len, pageSize(childPage)); childNode.releaseExclusive(); } else { try { writeMultilevelFragments(level, childNode, value, voffset, len); } finally { childNode.releaseExclusive(); p_clear(page, poffset, pageSize(page));
_Node node = nodeMapGet(nodeId); node = allocLatchedNode(nodeId); node.mId = nodeId; _Node existing = nodeMapPutIfAbsent(node); if (existing == null) { break; readNode(node, nodeId); nodeMapRemove(node); node.mId = 0; node.releaseExclusive();
off += 6; len -= 6; deleteFragment(nodeId); _Node inode = removeInode(inodeId); int levels = calculateInodeLevels(vLen); deleteMultilevelFragments(levels, inode, vLen);
final long vLen = _LocalDatabase.decodeFullFragmentedValueLength(header, page, loc); _Node inode = mDatabase.nodeMapLoadFragment(inodeId); int level = mDatabase.calculateInodeLevels(vLen); long levelCap = mDatabase.levelCap(level); long childNodeId = p_uint48GetLE(inode.mPage, ((int) (pos / levelCap)) * 6); inode.releaseShared(); return 0; inode = mDatabase.nodeMapLoadFragment(childNodeId); pos %= levelCap;
_Node node = nodeMapLoadFragment(nodeId); pagesRead++; try { long page = node.mPage; pLen = Math.min((int) vLen, pageSize(page)); if (value != null) { p_copyToArray(page, 0, value, vOff, pLen); _Node inode = nodeMapLoadFragment(inodeId); pagesRead++; int levels = calculateInodeLevels(vLen); pagesRead += readMultilevelFragments(levels, inode, value, vOff, vLen);
final long fLen = _LocalDatabase.decodeFullFragmentedValueLength(fHeader, page, loc); byte[] fullValue = db.reconstruct(page, fHeaderLoc, vLen); int max = db.mMaxFragmentedEntrySize - (vHeaderLoc - kHeaderLoc); newValue = db.fragment(fullValue, fullValue.length, max, 0); } catch (Throwable e) { node.releaseExclusive(); rightNode = db.allocDirtyFragmentNode(); p_clear(rightNode.mPage, fInlineLen, pageSize); shrinkage = 2 + fInlineLen - 6; db.deleteNode(rightNode, true); db.close(e); _Node inode; try { inode = db.allocDirtyFragmentNode(); } catch (Throwable e) { node.releaseExclusive();
final _Node fNode = mDatabase.nodeMapLoadFragment(fNodeId); p_copyToArray(fNode.mPage, fNodeOff, b, bOff, amt); fNode.releaseShared(); } else { _LocalDatabase db = mDatabase; final _Node inode = db.nodeMapLoadFragment(inodeId); final int levels = db.calculateInodeLevels(vLen); readMultilevelFragments(pos, levels, inode, b, bOff, bLen); int levels = mDatabase.calculateInodeLevels(vLen - inlineLen); if (mDatabase.levelCap(levels) < vLen) { int newLevels = mDatabase.calculateInodeLevels(vLen); if (newLevels <= levels) { throw new AssertionError(); _Node upper = mDatabase.allocDirtyFragmentNode(); long upage = upper.mPage; p_int48PutLE(upage, 0, inode.mId); final _Node fNode = mDatabase.allocDirtyFragmentNode(); try { p_int48PutLE(page, loc, fNode.mId); db.nodeMapLoadFragmentExclusive(fNodeId, amt < pageSize(page)); try { if (db.markFragmentDirty(fNode)) {
/** * @param loc location of root inode in the page * @return dirtied root inode with exclusive latch held */ private static _Node prepareMultilevelWrite(_LocalDatabase db, long page, int loc) throws IOException { final _Node inode; final long inodeId = p_uint48GetLE(page, loc); if (inodeId == 0) { // Writing into a sparse value. Allocate a node and point to it. inode = db.allocDirtyFragmentNode(); p_clear(inode.mPage, 0, pageSize(db, inode.mPage)); } else { inode = db.nodeMapLoadFragmentExclusive(inodeId, true); try { if (!db.markFragmentDirty(inode)) { // Already dirty, so no need to update the pointer. return inode; } } catch (Throwable e) { inode.releaseExclusive(); throw e; } } p_int48PutLE(page, loc, inode.mId); return inode; }
_Node child = db.allocDirtyNode(); db.nodeMapPut(child); p_copy(newRootPage, 0, child.mPage, 0, db.pageSize()); } else { newRootPage = child.mPage;
newNode = db.allocDirtyNode(_NodeUsageList.MODE_UNEVICTABLE); } catch (DatabaseFullException e) { db.capacityLimitOverride(-1); try { newNode = db.allocDirtyNode(_NodeUsageList.MODE_UNEVICTABLE); } finally { db.capacityLimitOverride(0); db.nodeMapPut(newNode);
long dest = db.removeSparePage(); db.addSparePage(dest); dest = page; } else { db.addSparePage(page); mPage = dest;
applyCachePrimer(config); _Tree trashed = openNextTrashedTree(null); recoveryComplete(config.mReplRecoveryStartNanos); initialCheckpoint = true; if (txns != null) { new Thread(() -> { invokeRecoveryHandler(txns, mRedoWriter); }).start(); mRecoveredTransactions = null;
applyCachePrimer(config); _Tree trashed = openNextTrashedTree(null); throw e; recoveryComplete(config.mReplRecoveryStartNanos); initialCheckpoint = true;