1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build dragonfly freebsd linux
10 "runtime/internal/atomic"
14 // For gccgo, while we still have C runtime code, use go:linkname to
15 // export some functions.
19 //go:linkname noteclear
20 //go:linkname notewakeup
21 //go:linkname notesleep
22 //go:linkname notetsleep
23 //go:linkname notetsleepg
25 // This implementation depends on OS-specific implementations of
27 // futexsleep(addr *uint32, val uint32, ns int64)
29 // if *addr == val { sleep }
30 // Might be woken up spuriously; that's allowed.
31 // Don't sleep longer than ns; ns < 0 means forever.
33 // futexwakeup(addr *uint32, cnt uint32)
34 // If any procs are sleeping on addr, wake up at most cnt.
46 // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
47 // mutex_sleeping means that there is presumably at least one sleeping thread.
48 // Note that there can be spinning threads during all states - they do not
49 // affect mutex's state.
51 // We use the uintptr mutex.key and note.key as a uint32.
53 func key32(p *uintptr) *uint32 {
54 return (*uint32)(unsafe.Pointer(p))
58 lockWithRank(l, getLockRank(l))
61 func lock2(l *mutex) {
65 throw("runtime·lock: lock count")
69 // Speculative grab for lock.
70 v := atomic.Xchg(key32(&l.key), mutex_locked)
71 if v == mutex_unlocked {
75 // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
76 // depending on whether there is a thread sleeping
77 // on this mutex. If we ever change l->key from
78 // MUTEX_SLEEPING to some other value, we must be
79 // careful to change it back to MUTEX_SLEEPING before
80 // returning, to ensure that the sleeping thread gets
84 // On uniprocessors, no point spinning.
85 // On multiprocessors, spin for ACTIVE_SPIN attempts.
91 // Try for lock, spinning.
92 for i := 0; i < spin; i++ {
93 for l.key == mutex_unlocked {
94 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
98 procyield(active_spin_cnt)
101 // Try for lock, rescheduling.
102 for i := 0; i < passive_spin; i++ {
103 for l.key == mutex_unlocked {
104 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
112 v = atomic.Xchg(key32(&l.key), mutex_sleeping)
113 if v == mutex_unlocked {
116 wait = mutex_sleeping
117 futexsleep(key32(&l.key), mutex_sleeping, -1)
121 func unlock(l *mutex) {
125 func unlock2(l *mutex) {
126 v := atomic.Xchg(key32(&l.key), mutex_unlocked)
127 if v == mutex_unlocked {
128 throw("unlock of unlocked lock")
130 if v == mutex_sleeping {
131 futexwakeup(key32(&l.key), 1)
137 throw("runtime·unlock: lock count")
139 // if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
140 // gp.stackguard0 = stackPreempt
144 // One-time notifications.
145 func noteclear(n *note) {
149 func notewakeup(n *note) {
150 old := atomic.Xchg(key32(&n.key), 1)
152 print("notewakeup - double wakeup (", old, ")\n")
153 throw("notewakeup - double wakeup")
155 futexwakeup(key32(&n.key), 1)
158 func notesleep(n *note) {
161 throw("notesleep not on g0")
164 if *cgo_yield != nil {
165 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
168 for atomic.Load(key32(&n.key)) == 0 {
170 futexsleep(key32(&n.key), 0, ns)
171 if *cgo_yield != nil {
172 asmcgocall(*cgo_yield, nil)
178 // May run with m.p==nil if called from notetsleep, so write barriers
183 func notetsleep_internal(n *note, ns int64) bool {
187 if *cgo_yield != nil {
188 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
191 for atomic.Load(key32(&n.key)) == 0 {
193 futexsleep(key32(&n.key), 0, ns)
194 if *cgo_yield != nil {
195 asmcgocall(*cgo_yield, nil)
202 if atomic.Load(key32(&n.key)) != 0 {
206 deadline := nanotime() + ns
208 if *cgo_yield != nil && ns > 10e6 {
212 futexsleep(key32(&n.key), 0, ns)
213 if *cgo_yield != nil {
214 asmcgocall(*cgo_yield, nil)
217 if atomic.Load(key32(&n.key)) != 0 {
226 return atomic.Load(key32(&n.key)) != 0
229 func notetsleep(n *note, ns int64) bool {
231 if gp != gp.m.g0 && gp.m.preemptoff != "" {
232 throw("notetsleep not on g0")
235 return notetsleep_internal(n, ns)
238 // same as runtime·notetsleep, but called on user g (not g0)
239 // calls only nosplit functions between entersyscallblock/exitsyscall
240 func notetsleepg(n *note, ns int64) bool {
243 throw("notetsleepg on g0")
247 ok := notetsleep_internal(n, ns)
252 func beforeIdle(int64) (*g, bool) {
256 func checkTimeouts() {}