1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // GOMAXPROCS=10 go test
21 func HammerSemaphore(s *uint32, loops int, cdone chan bool) {
22 for i := 0; i < loops; i++ {
24 Runtime_Semrelease(s, false, 0)
29 func TestSemaphore(t *testing.T) {
33 for i := 0; i < 10; i++ {
34 go HammerSemaphore(s, 1000, c)
36 for i := 0; i < 10; i++ {
41 func BenchmarkUncontendedSemaphore(b *testing.B) {
44 HammerSemaphore(s, b.N, make(chan bool, 2))
47 func BenchmarkContendedSemaphore(b *testing.B) {
52 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
55 go HammerSemaphore(s, b.N/2, c)
56 go HammerSemaphore(s, b.N/2, c)
61 func HammerMutex(m *Mutex, loops int, cdone chan bool) {
62 for i := 0; i < loops; i++ {
69 func TestMutex(t *testing.T) {
70 if n := runtime.SetMutexProfileFraction(1); n != 0 {
71 t.Logf("got mutexrate %d expected 0", n)
73 defer runtime.SetMutexProfileFraction(0)
76 for i := 0; i < 10; i++ {
77 go HammerMutex(m, 1000, c)
79 for i := 0; i < 10; i++ {
84 var misuseTests = []struct {
155 if len(os.Args) == 3 && os.Args[1] == "TESTMISUSE" {
156 for _, test := range misuseTests {
157 if test.name == os.Args[2] {
159 defer func() { recover() }()
162 fmt.Printf("test completed\n")
166 fmt.Printf("unknown test\n")
171 func TestMutexMisuse(t *testing.T) {
172 testenv.MustHaveExec(t)
173 for _, test := range misuseTests {
174 out, err := exec.Command(os.Args[0], "TESTMISUSE", test.name).CombinedOutput()
175 if err == nil || !strings.Contains(string(out), "unlocked") {
176 t.Errorf("%s: did not find failure with message about unlocked lock: %s\n%s\n", test.name, err, out)
181 func TestMutexFairness(t *testing.T) {
183 stop := make(chan bool)
188 time.Sleep(100 * time.Microsecond)
197 done := make(chan bool)
199 for i := 0; i < 10; i++ {
200 time.Sleep(100 * time.Microsecond)
208 case <-time.After(10 * time.Second):
209 t.Fatalf("can't acquire Mutex in 10 seconds")
213 func BenchmarkMutexUncontended(b *testing.B) {
214 type PaddedMutex struct {
218 b.RunParallel(func(pb *testing.PB) {
227 func benchmarkMutex(b *testing.B, slack, work bool) {
232 b.RunParallel(func(pb *testing.PB) {
238 for i := 0; i < 100; i++ {
248 func BenchmarkMutex(b *testing.B) {
249 benchmarkMutex(b, false, false)
252 func BenchmarkMutexSlack(b *testing.B) {
253 benchmarkMutex(b, true, false)
256 func BenchmarkMutexWork(b *testing.B) {
257 benchmarkMutex(b, false, true)
260 func BenchmarkMutexWorkSlack(b *testing.B) {
261 benchmarkMutex(b, true, true)
264 func BenchmarkMutexNoSpin(b *testing.B) {
265 // This benchmark models a situation where spinning in the mutex should be
266 // non-profitable and allows to confirm that spinning does not do harm.
267 // To achieve this we create excess of goroutines most of which do local work.
268 // These goroutines yield during local work, so that switching from
269 // a blocked goroutine to other goroutines is profitable.
270 // As a matter of fact, this benchmark still triggers some spinning in the mutex.
272 var acc0, acc1 uint64
274 b.RunParallel(func(pb *testing.PB) {
276 var data [4 << 10]uint64
277 for i := 0; pb.Next(); i++ {
284 for i := 0; i < len(data); i += 4 {
287 // Elaborate way to say runtime.Gosched
288 // that does not put the goroutine onto global runq.
298 func BenchmarkMutexSpin(b *testing.B) {
299 // This benchmark models a situation where spinning in the mutex should be
300 // profitable. To achieve this we create a goroutine per-proc.
301 // These goroutines access considerable amount of local data so that
302 // unnecessary rescheduling is penalized by cache misses.
304 var acc0, acc1 uint64
305 b.RunParallel(func(pb *testing.PB) {
306 var data [16 << 10]uint64
307 for i := 0; pb.Next(); i++ {
312 for i := 0; i < len(data); i += 4 {