@Override public void beforeReadAccess(Object obj, long field) { if (vr) { readHash = LockTable.hash(obj, field); // Lock entry in read mode (might throw an exception) readLock = LockTable.lock(this, readHash, id, false); if (readLock >= 0) { synchronized (writeSet) { // Mutual exclusion on write set to allow other transaction to drop locks if ((status.get() & STATUS_MASK) != TX_ACTIVE) { // We have been killed: drop lock we just acquired (not in write set) LockTable.setAndReleaseLock(readHash, readLock); // Abort throw KILLED_EXCEPTION; } // Add to write set (for being able to drop lock later) writeSet.addRead(readHash, obj, field, readLock); } } } else { readHash = LockTable.hash(obj, field); // Check if the field is locked (may throw an exception) readLock = LockTable.checkLock(this, readHash, id); } }
synchronized int unlockReference(LockTable lset, Lockable ref, Object qualifier, Object group) { // look for locks matching our reference and qualifier. HashMap dl = (HashMap) groups.get(group); if (dl == null) return 0; Lock lockInGroup = lset.unlockReference(this, ref, qualifier, dl); if (lockInGroup == null) { return 0; } if (lockInGroup.getCount() == 1) { if (dl.isEmpty()) { groups.remove(group); saveGroup(dl); if ((callbackGroup != null) && group.equals(callbackGroup)) { nextLimitCall = limit; } } return 1; } // the lock item will be left in the group lockInGroup.count--; dl.put(lockInGroup, lockInGroup); return 1; }
@Override public void beforeReadAccess(Object obj, long field, int advice) { ReadFieldAccess next = readSet.getNext(); currentReadFieldAccess = next; next.init(obj, field, advice); // Check the read is still valid lastReadLock = LockTable.checkLock(next.hashCode(), localClock); }
private boolean onReadAccess(Object obj, long field, Type type) { if (vr) { // Visible read if (readLock == LockTable.LOCKED_WRITE) { // We already own that lock in write mode WriteFieldAccess w = writeSet.get(readHash, obj, field); if (w == null) return false; readValue = w.getValue(); return true; } else { // We already own that lock in read mode return false; } } else { // Invisible read if ((status.get() & STATUS_MASK) != TX_ACTIVE) { // We have been killed: abort throw KILLED_EXCEPTION; } if (readLock == LockTable.LOCKED_WRITE) { // We already own that lock in write mode WriteFieldAccess w = writeSet.get(readHash, obj, field); if (w == null) return false; readValue = w.getValue(); return true; } else if (readLock == LockTable.LOCKED_READ) { // We already own that lock in read mode return false; } boolean b = false; while (true) { while (readLock <= endTime) { // Re-read timestamp (check for race) long lock = LockTable.checkLock(this, readHash, id); if (lock != readLock) { readLock = lock; readValue = Field.getValue(obj, field, type); b = true; continue; } // We have read a valid value (in snapshot) if (readWriteHint) { // Save to read set readSet.add(obj, field, readHash, lock); } return b; } // Try to extend snapshot if (!(readWriteHint && extend())) { throw EXTEND_FAILURE_EXCEPTION; } } } }
private WriteFieldAccess onReadAccess0(Object obj, long field, int advice) { ReadFieldAccess current = currentReadFieldAccess; int hash = current.hashCode(); // Check the read is still valid LockTable.checkLock(hash, localClock, lastReadLock); // Check if it is already included in the write set return writeSet.contains(current); }
private void onWriteAccess(Object obj, long field, Object value, Type type) { if (!readWriteHint) { // Change hint to read-write readWriteMarkers.insert(atomicBlockId, true); throw READ_ONLY_FAILURE_EXCEPTION; } int hash = LockTable.hash(obj, field); // Lock entry in write mode (might throw an exception) long timestamp = LockTable.lock(this, hash, id, true); synchronized (writeSet) { // Mutual exclusion on write set to allow other transaction to drop locks if ((status.get() & STATUS_MASK) != TX_ACTIVE) { // We have been killed if (timestamp >= 0) { // Drop lock we just acquired (not in write set) LockTable.setAndReleaseLock(hash, timestamp); } // Abort throw KILLED_EXCEPTION; } if (timestamp < 0) { // We already own that lock writeSet.appendWrite(hash, obj, field, value, type); } else { // Add to write set if (timestamp > endTime) { // Handle write-after-read if (readSet.contains(obj, field)) { // Abort LockTable.setAndReleaseLock(hash, timestamp); throw WRITE_FAILURE_EXCEPTION; } // We delay validation until later (although we could already validate once here) } // Add to write set writeSet.addWrite(hash, obj, field, value, type, timestamp); } } }
/** Unlock all the locks in a group and then remove the group. */ synchronized void unlockGroup(LockTable lset, Object group) { HashMap dl = (HashMap) groups.remove(group); if (dl == null) return; for (Iterator list = dl.keySet().iterator(); list.hasNext(); ) { lset.unlock((Lock) list.next(), 0); } if ((callbackGroup != null) && group.equals(callbackGroup)) { nextLimitCall = limit; } saveGroup(dl); }
public boolean conflict(int other, ConflictType type, int hash, long lock) { if (cm != null) { Context tx = threads.get(other); if (cm.arbitrate(this, tx, type) == ContentionManager.KILL_OTHER) { // We win synchronized (tx.writeSet) { // Mutual exclusion on write set to drop locks if (lock == LockTable.readLock(hash)) { // The other transaction still owns the lock kill(tx); } } return true; } } return false; }
/** Unlock all locks in the group that match the key */ synchronized void unlockGroup(LockTable lset, Object group, Matchable key) { HashMap dl = (HashMap) groups.get(group); if (dl == null) return; // no group at all boolean allUnlocked = true; for (Iterator e = dl.keySet().iterator(); e.hasNext(); ) { Lock lock = (Lock) e.next(); if (!key.match(lock.getLockable())) { allUnlocked = false; continue; } lset.unlock(lock, 0); e.remove(); } if (allUnlocked) { groups.remove(group); saveGroup(dl); if ((callbackGroup != null) && group.equals(callbackGroup)) { nextLimitCall = limit; } } }