/** * Checks the lock ownership for the given key. * * <p><i>Note: This method is intended for advanced use cases.</i> * * @return {@link LockResult#UNOWNED UNOWNED}, {@link * LockResult#OWNED_SHARED OWNED_SHARED}, {@link * LockResult#OWNED_UPGRADABLE OWNED_UPGRADABLE}, or {@link * LockResult#OWNED_EXCLUSIVE OWNED_EXCLUSIVE} */ public final LockResult lockCheck(long indexId, byte[] key) { return manager().check(this, indexId, key, hash(indexId, key)); }
/** * Returns true if a shared lock can be granted for the given key. Caller must hold the * node latch which contains the key. * * @param locker optional locker */ final boolean isAvailable(LockOwner locker, long indexId, byte[] key, int hash) { // Note that no LockHT latch is acquired. The current thread is not required to // immediately observe the activity of other threads acting upon the same lock. If // another thread has just acquired an exclusive lock, it must still acquire the node // latch before any changes can be made. return getLockHT(hash).isAvailable(locker, indexId, key, hash); }
final Locker lockSharedLocal(long indexId, byte[] key, int hash) throws LockFailureException { Locker locker = localLocker(); LockResult result = getLockHT(hash) .tryLock(TYPE_SHARED, locker, indexId, key, hash, mDefaultTimeoutNanos); if (result.isHeld()) { return locker; } throw locker.failed(TYPE_SHARED, result, mDefaultTimeoutNanos); }
protected final int keyHash() { int hash = mKeyHash; if (hash == 0) { mKeyHash = hash = LockManager.hash(mTree.mId, mKey); } return hash; }
/** * Acquire a shared lock, with infinite timeout, but don't push the lock into the owned * lock stack. Returns the lock which was acquired, or null if already owned. */ final Lock lockSharedNoPush(long indexId, byte[] key) throws LockFailureException { int hash = hash(indexId, key); LockManager.LockHT ht = mManager.getLockHT(hash); Lock lock; LockResult result; ht.acquireExclusive(); try { lock = ht.lockAccess(indexId, key, hash); result = lock.tryLockShared(ht, this, -1); } finally { ht.releaseExclusive(); } if (!result.isHeld()) { throw failed(TYPE_SHARED, result, -1); } return result == LockResult.ACQUIRED ? lock : null; }
int lockType = local.lockMode().repeatable; if (lockType != 0) { int hash = LockManager.hash(mId, key); local.lock(lockType, mId, key, hash, local.mLockTimeoutNanos); } else { if ((local != null && local.lockMode() != LockMode.READ_COMMITTED) || mLockManager.isAvailable (local, mId, key, keyHash = LockManager.hash(mId, key))) mLockManager.isAvailable(local, mId, key, keyHash = LockManager.hash(mId, key)))
/** * Returns true if a shared lock can be granted for the given key. Caller must hold the * node latch which contains the key. * * @param locker optional locker */ final boolean isLockAvailable(Locker locker, byte[] key, int hash) { return mLockManager.isAvailable(locker, mId, key, hash); }
final Locker lockExclusiveLocal(byte[] key, int hash) throws LockFailureException { return mLockManager.lockExclusiveLocal(mId, key, hash); }
final Locker lockSharedLocal(byte[] key, int hash) throws LockFailureException { return mLockManager.lockSharedLocal(mId, key, hash); }
/** * @param lockType type of lock requested; TYPE_SHARED, TYPE_UPGRADABLE, or TYPE_EXCLUSIVE * @param hash hash of lock key requested */ DeadlockSet newDeadlockSet(int lockType, int hash) { DeadlockSet.OwnerInfo[] infoSet = new DeadlockSet.OwnerInfo[mLocks.size()]; final LockManager manager = mOrigin.mManager; int i = 0; for (Lock lock : mLocks) { DeadlockSet.OwnerInfo info = new DeadlockSet.OwnerInfo(); infoSet[i] = info; info.mIndexId = lock.mIndexId; Index ix = manager.indexById(info.mIndexId); if (ix != null) { info.mIndexName = ix.getName(); } byte[] key = lock.mKey; if (key != null) { key = key.clone(); } info.mKey = key; info.mAttachment = lock.findOwnerAttachment(mOrigin, lockType, hash); i++; } return new DeadlockSet(infoSet); }
@Override public final LockResult skip(long amount) throws IOException { if (amount == 0) { LocalTransaction txn = mTxn; if (txn != null && txn != Transaction.BOGUS) { byte[] key = mKey; if (key != null) { return txn.mManager.check(txn, mTree.mId, key, keyHash()); } } return LockResult.UNOWNED; } try { CursorFrame frame = leafSharedNotSplit(); if (amount > 0) { if (amount > 1 && (frame = skipNextGap(frame, amount - 1, null)) == null) { return LockResult.UNOWNED; } return next(mTxn, frame); } else { if (amount < -1 && (frame = skipPreviousGap(frame, -1 - amount, null)) == null) { return LockResult.UNOWNED; } return previous(mTxn, frame); } } catch (Throwable e) { throw handleException(e, false); } }
tree.mLockManager.ghosted(tree.mId, key, keyHash, frame);
mLockManager.close();
@Override public boolean store(long indexId, byte[] key, byte[] value) throws IOException { Index ix = getIndex(indexId); // Allow side-effect free operations to be performed before acquiring latch. mOpLatch.acquireShared(); // Locks must be acquired in their original order to avoid // deadlock, so don't allow another task thread to run yet. Locker locker = mDatabase.mLockManager.localLocker(); locker.lockExclusive(indexId, key, INFINITE_TIMEOUT); // Allow another task thread to run while operation completes. nextTask(); try { while (ix != null) { try { ix.store(Transaction.BOGUS, key, value); break; } catch (ClosedIndexException e) { // User closed the shared index reference, so re-open it. ix = openIndex(indexId, null); } } } finally { locker.scopeUnlockAll(); } // Only release if no exception. mOpLatch.releaseShared(); notifyStore(ix, key, value); // Return false to prevent RedoDecoder from looping back. return false; }
protected final int keyHash() { int hash = mKeyHash; if (hash == 0) { mKeyHash = hash = LockManager.hash(mTree.mId, mKey); } return hash; }
final Locker lockExclusiveLocal(long indexId, byte[] key, int hash) throws LockFailureException { Locker locker = localLocker(); LockResult result = getLockHT(hash) .tryLock(TYPE_EXCLUSIVE, locker, indexId, key, hash, mDefaultTimeoutNanos); if (result.isHeld()) { return locker; } throw locker.failed(TYPE_EXCLUSIVE, result, mDefaultTimeoutNanos, hash); }
/** * Acquire an upgradable lock, with infinite timeout, but don't push the lock into the * owned lock stack. Returns the lock which was acquired, or null if already owned. */ final Lock lockUpgradableNoPush(long indexId, byte[] key) throws LockFailureException { int hash = hash(indexId, key); LockManager.LockHT ht = mManager.getLockHT(hash); Lock lock; LockResult result; ht.acquireExclusive(); try { lock = ht.lockAccess(indexId, key, hash); result = lock.tryLockUpgradable(ht, this, -1); } finally { ht.releaseExclusive(); } if (!result.isHeld()) { throw failed(TYPE_UPGRADABLE, result, -1); } return result == LockResult.ACQUIRED ? lock : null; }
int lockType = local.lockMode().repeatable; if (lockType != 0) { int hash = LockManager.hash(mId, key); local.lock(lockType, mId, key, hash, local.mLockTimeoutNanos); } else { if ((local != null && local.lockMode() != LockMode.READ_COMMITTED) || mLockManager.isAvailable (local, mId, key, keyHash = LockManager.hash(mId, key))) mLockManager.isAvailable(local, mId, key, keyHash = LockManager.hash(mId, key)))
/** * Returns true if a shared lock can be granted for the given key. Caller must hold the * node latch which contains the key. * * @param locker optional locker */ final boolean isLockAvailable(Locker locker, byte[] key, int hash) { return mLockManager.isAvailable(locker, mId, key, hash); }