// are protected by the lightweight
// lock itself), and any heavy_monitor
// structures attached to it.
-# define HEAVY 2 // There may be heavyweight locks
- // associated with this cache entry.
+# define HEAVY 2 // Heavyweight locks associated with this
+ // hash entry may be held.
// The lightweight entry is still valid,
// if the leading bits of the address
// field are nonzero.
- // Set if heavy_count is > 0 .
+ // If the LOCKED bit is clear, then this is
+ // set exactly when heavy_count is > 0 .
// Stored redundantly so a single
// compare-and-swap works in the easy case.
+ // If HEAVY is not set, it is safe to use
+ // an available lightweight lock entry
+ // without checking if there is an existing
+ // heavyweight lock for the same object.
+ // (There may be one, but it won't be held
+ // or waited for.)
# define REQUEST_CONVERSION 4 // The lightweight lock is held. But
// one or more other threads have tried
// to acquire the lock, and hence request
// conversion to heavyweight status.
+ // The heavyweight lock is already allocated.
+ // Threads requesting conversion are
+ // waiting on the condition variable associated
+ // with the heavyweight lock.
+ // Not used for conversion due to
+ // Object.wait() calls.
# define FLAGS (LOCKED | HEAVY | REQUEST_CONVERSION)
volatile _Jv_ThreadId_t light_thr_id;
// Thr_id of holder of lightweight lock.
volatile unsigned short light_count;
// Number of times the lightweight lock
// is held minus one. Zero if lightweight
- // lock is not held.
+ // lock is not held. Only updated by
+ // lightweight lock holder or, in one
+ // case, while holding the LOCKED bit in
+ // a state in which there can be no
+ // lightweight lock holder.
unsigned short heavy_count; // Total number of times heavyweight locks
// associated with this hash entry are held
// or waiting to be acquired.
// Threads in wait() are included eventhough
// they have temporarily released the lock.
+ // Protected by LOCKED bit.
+ // Threads requesting conversion to heavyweight
+ // status are also included.
struct heavy_lock * heavy_locks;
// Chain of heavy locks. Protected
// by lockbit for he. Locks may
fprintf(stderr, "lock hash entry = %p, index = %d, address = 0x%lx\n"
"\tlight_thr_id = 0x%lx, light_count = %d, "
"heavy_count = %d\n\theavy_locks:", he,
- he - light_locks, he -> address, he -> light_thr_id,
+ he - light_locks, (unsigned long)(he -> address),
+ (unsigned long)(he -> light_thr_id),
he -> light_count, he -> heavy_count);
print_hl_list(he -> heavy_locks);
fprintf(stderr, "\n");
}
#endif /* LOCK_DEBUG */
+#ifdef LOCK_LOG
+ // Log locking operations. For debugging only.
+ // Logging is intended to be as unintrusive as possible.
+ // Log calls are made after an operation completes, and hence
+ // may not completely reflect actual synchronization ordering.
+ // The choice of events to log is currently a bit haphazard.
+ // The intent is that if we have to track down any other bugs
+ // inthis code, we extend the logging as appropriate.
+ typedef enum
+ {
+ ACQ_LIGHT, ACQ_LIGHT2, ACQ_HEAVY, ACQ_HEAVY2, PROMOTE, REL_LIGHT,
+ REL_HEAVY, REQ_CONV, PROMOTE2, WAIT_START, WAIT_END, NOTIFY, NOTIFY_ALL
+ } event_type;
+
+ struct lock_history
+ {
+ event_type tp;
+ obj_addr_t addr; // Often includes flags.
+ _Jv_ThreadId_t thr;
+ };
+
+ const int LOG_SIZE = 128; // Power of 2.
+
+ lock_history lock_log[LOG_SIZE];
+
+ volatile obj_addr_t log_next = 0;
+ // Next location in lock_log.
+ // Really an int, but we need compare_and_swap.
+
+ static void add_log_entry(event_type t, obj_addr_t a, _Jv_ThreadId_t th)
+ {
+ obj_addr_t my_entry;
+ obj_addr_t next_entry;
+ do
+ {
+ my_entry = log_next;
+ next_entry = ((my_entry + 1) & (LOG_SIZE - 1));
+ }
+ while (!compare_and_swap(&log_next, my_entry, next_entry));
+ lock_log[my_entry].tp = t;
+ lock_log[my_entry].addr = a;
+ lock_log[my_entry].thr = th;
+ }
+
+# define LOG(t, a, th) add_log_entry(t, a, th)
+#else /* !LOCK_LOG */
+# define LOG(t, a, th)
+#endif
+
static bool mp = false; // Known multiprocesssor.
// Wait for roughly 2^n units, touching as little memory as possible.
// Remove all heavy locks on the list. Note that the only possible way
// in which a lock may still be in use is if it's in the process of
// being unlocked.
+// FIXME: Why does this unlock the hash entry? I think that
+// could now be done more cleanly in MonitorExit.
static void
remove_all_heavy (hash_entry *he, obj_addr_t new_address_val)
{
// Count fields are set correctly. Heavy_count was also zero,
// but can change asynchronously.
// This path is hopefully both fast and the most common.
+ LOG(ACQ_LIGHT, addr, self);
return;
}
address = he -> address;
}
else
{
+ JvAssert(!(address & LOCKED));
// Lightweight lock is held, but by somone else.
// Spin a few times. This avoids turning this into a heavyweight
// lock if the current holder is about to release it.
+ // FIXME: Does this make sense on a uniprocessor, where
+ // it actually yields? It's probably cheaper to convert.
for (unsigned int i = 0; i < N_SPINS; ++i)
{
- if ((he -> address & ~LOCKED) != (address & ~LOCKED)) goto retry;
+ if ((he -> address & ~LOCKED) != address) goto retry;
spin(i);
}
- address &= ~LOCKED;
if (!compare_and_swap(&(he -> address), address, address | LOCKED ))
{
wait_unlocked(he);
JvAssert(he -> address == address | LOCKED );
release_set(&(he -> address), (address | REQUEST_CONVERSION | HEAVY));
// release lock on he
+ LOG(REQ_CONV, (address | REQUEST_CONVERSION | HEAVY), self);
while ((he -> address & ~FLAGS) == (address & ~FLAGS))
{
// Once converted, the lock has to retain heavyweight
}
}
obj_addr_t was_heavy = (address & HEAVY);
- address &= ~LOCKED;
- if (!compare_and_swap(&(he -> address), address, (address | LOCKED )))
+ if ((address & LOCKED) ||
+ !compare_and_swap(&(he -> address), address, (address | LOCKED )))
{
wait_unlocked(he);
goto retry;
// Can't convert a nonexistent lightweight lock.
heavy_lock *hl;
hl = (was_heavy? find_heavy(addr, he) : 0);
+ // The CAS succeeded, so was_heavy is still accurate.
if (0 == hl)
{
// It is OK to use the lighweight lock, since either the
// heavyweight lock does not exist, or none of the
- // heavyweight locks currently exist. Future threads
+ // heavyweight locks are currently in use. Future threads
// trying to acquire the lock will see the lightweight
// one first and use that.
he -> light_thr_id = self; // OK, since nobody else can hold
JvAssert(he -> light_count == 0);
JvAssert(was_heavy == (he -> address & HEAVY));
release_set(&(he -> address), (addr | was_heavy));
+ LOG(ACQ_LIGHT2, addr | was_heavy, self);
}
else
{
++ (he -> heavy_count);
JvAssert(0 == (address & ~HEAVY));
release_set(&(he -> address), HEAVY);
+ LOG(ACQ_HEAVY, addr | was_heavy, self);
_Jv_MutexLock(&(hl->si.mutex));
keep_live(addr);
}
heavy_lock *hl = get_heavy(addr, he);
++ (he -> heavy_count);
release_set(&(he -> address), address | HEAVY);
+ LOG(ACQ_HEAVY2, address | HEAVY, self);
_Jv_MutexLock(&(hl->si.mutex));
keep_live(addr);
}
he -> light_thr_id = INVALID_THREAD_ID;
if (compare_and_swap_release(&(he -> address), address,
address & HEAVY))
- return;
+ {
+ LOG(REL_LIGHT, address & HEAVY, self);
+ return;
+ }
else
{
he -> light_thr_id = light_thr_id; // Undo prior damage.
# ifdef LOCK_DEBUG
fprintf(stderr, "Lightweight lock held by other thread\n\t"
"light_thr_id = 0x%lx, self = 0x%lx, "
- "address = 0x%lx, pid = %d\n",
- light_thr_id, self, address, getpid());
+ "address = 0x%lx, heavy_count = %d, pid = %d\n",
+ light_thr_id, self, (unsigned long)address,
+ he -> heavy_count, getpid());
print_he(he);
for(;;) {}
# endif
// lock.
he -> light_thr_id = INVALID_THREAD_ID;
release_set(&(he -> address), HEAVY);
+ LOG(PROMOTE, address, self);
// lightweight lock now unused.
_Jv_CondNotifyAll(&(hl->si.condition), &(hl->si.mutex));
_Jv_MutexUnlock(&(hl->si.mutex));
print_he(he);
for(;;) {}
# endif
+ release_set(&(he -> address), address);
throw new java::lang::IllegalMonitorStateException(
JvNewStringLatin1("current thread not owner"));
}
release_set(&(he -> address), address);
_Jv_MutexUnlock(&(hl->si.mutex));
}
+ LOG(REL_HEAVY, addr, self);
keep_live(addr);
}
obj_addr_t address;
unsigned hash = JV_SYNC_HASH(addr);
hash_entry * he = light_locks + hash;
- _Jv_ThreadId_t self = _Jv_ThreadSelf();
JvAssert(!(addr & FLAGS));
+ address = he -> address;
+ // Try it the easy way first:
+ if (address == 0) return true;
+ _Jv_ThreadId_t self = _Jv_ThreadSelf();
+ if ((address & ~(HEAVY | REQUEST_CONVERSION)) == addr)
+ // Fails if entry is LOCKED.
+ // I can't asynchronously become or stop being the holder.
+ return he -> light_thr_id != self;
retry:
// Acquire the hash table entry lock
- address = ((he -> address) & ~LOCKED);
+ address &= ~LOCKED;
if (!compare_and_swap(&(he -> address), address, address | LOCKED))
{
wait_unlocked(he);
bool not_mine;
- if (!(address & ~FLAGS))
- not_mine = true;
- else if ((address & ~FLAGS) == addr)
+ if ((address & ~FLAGS) == addr)
not_mine = (he -> light_thr_id != self);
else
{
wait_unlocked(he);
goto retry;
}
- // address does not have the lock bit set. We hold the lock on he.
+ // address did not have the lock bit set. We now hold the lock on he.
if ((address & ~FLAGS) == addr)
{
// Convert to heavyweight.
// Again release the he lock after acquiring the mutex.
he -> light_thr_id = INVALID_THREAD_ID;
release_set(&(he -> address), HEAVY); // lightweight lock now unused.
+ LOG(PROMOTE2, addr, self);
if (address & REQUEST_CONVERSION)
- _Jv_CondNotify (&(hl->si.condition), &(hl->si.mutex));
+ _Jv_CondNotifyAll (&(hl->si.condition), &(hl->si.mutex));
+ // Since we do this before we do a CondWait, we guarantee that
+ // threads waiting on requested conversion are awoken before
+ // a real wait on the same condition variable.
+ // No other notification can occur in the interim, since
+ // we hold the heavy lock, and notifications are made
+ // without acquiring it.
}
else /* We should hold the heavyweight lock. */
{
}
JvAssert(address & HEAVY);
}
+ LOG(WAIT_START, addr, self);
switch (_Jv_CondWait (&(hl->si.condition), &(hl->si.mutex), timeout, nanos))
{
case _JV_NOT_OWNER:
if (Thread::interrupted ())
throw new InterruptedException;
}
+ LOG(WAIT_END, addr, self);
}
void
hl = find_heavy(addr, he);
// Hl can't disappear since we point to the underlying object.
// It's important that we release the lock bit before the notify, since
- // otherwise we will try to wake up thee target while we still hold the
+ // otherwise we will try to wake up the target while we still hold the
// bit. This results in lock bit contention, which we don't handle
// terribly well.
release_set(&(he -> address), address); // unlock
("current thread not owner"));
return;
}
+ // We know that we hold the heavyweight lock at this point,
+ // and the lightweight lock is not in use.
result = _Jv_CondNotify(&(hl->si.condition), &(hl->si.mutex));
+ LOG(NOTIFY, addr, self);
keep_live(addr);
if (__builtin_expect (result, 0))
throw new IllegalMonitorStateException(JvNewStringLatin1
("current thread not owner"));
}
result = _Jv_CondNotifyAll(&(hl->si.condition), &(hl->si.mutex));
+ LOG(NOTIFY_ALL, addr, self);
if (__builtin_expect (result, 0))
throw new IllegalMonitorStateException(JvNewStringLatin1
("current thread not owner"));