libgo: export NetBSD-specific types in mksysinfo.sh
[gcc.git] / libgo / go / runtime / debug_test.go
1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // TODO: This test could be implemented on all (most?) UNIXes if we
6 // added syscall.Tgkill more widely.
7
8 // We skip all of these tests under race mode because our test thread
9 // spends all of its time in the race runtime, which isn't a safe
10 // point.
11
12 // +build ignore_for_gccgo
13 // +build amd64
14 // +build linux
15 // +build !race
16
17 package runtime_test
18
19 import (
20 "fmt"
21 "io/ioutil"
22 "regexp"
23 "runtime"
24 "runtime/debug"
25 "sync/atomic"
26 "syscall"
27 "testing"
28 )
29
30 func startDebugCallWorker(t *testing.T) (g *runtime.G, after func()) {
31 // This can deadlock if run under a debugger because it
32 // depends on catching SIGTRAP, which is usually swallowed by
33 // a debugger.
34 skipUnderDebugger(t)
35
36 // This can deadlock if there aren't enough threads or if a GC
37 // tries to interrupt an atomic loop (see issue #10958). We
38 // use 8 Ps so there's room for the debug call worker,
39 // something that's trying to preempt the call worker, and the
40 // goroutine that's trying to stop the call worker.
41 ogomaxprocs := runtime.GOMAXPROCS(8)
42 ogcpercent := debug.SetGCPercent(-1)
43
44 // ready is a buffered channel so debugCallWorker won't block
45 // on sending to it. This makes it less likely we'll catch
46 // debugCallWorker while it's in the runtime.
47 ready := make(chan *runtime.G, 1)
48 var stop uint32
49 done := make(chan error)
50 go debugCallWorker(ready, &stop, done)
51 g = <-ready
52 return g, func() {
53 atomic.StoreUint32(&stop, 1)
54 err := <-done
55 if err != nil {
56 t.Fatal(err)
57 }
58 runtime.GOMAXPROCS(ogomaxprocs)
59 debug.SetGCPercent(ogcpercent)
60 }
61 }
62
63 func debugCallWorker(ready chan<- *runtime.G, stop *uint32, done chan<- error) {
64 runtime.LockOSThread()
65 defer runtime.UnlockOSThread()
66
67 ready <- runtime.Getg()
68
69 x := 2
70 debugCallWorker2(stop, &x)
71 if x != 1 {
72 done <- fmt.Errorf("want x = 2, got %d; register pointer not adjusted?", x)
73 }
74 close(done)
75 }
76
77 // Don't inline this function, since we want to test adjusting
78 // pointers in the arguments.
79 //
80 //go:noinline
81 func debugCallWorker2(stop *uint32, x *int) {
82 for atomic.LoadUint32(stop) == 0 {
83 // Strongly encourage x to live in a register so we
84 // can test pointer register adjustment.
85 *x++
86 }
87 *x = 1
88 }
89
90 func debugCallTKill(tid int) error {
91 return syscall.Tgkill(syscall.Getpid(), tid, syscall.SIGTRAP)
92 }
93
94 // skipUnderDebugger skips the current test when running under a
95 // debugger (specifically if this process has a tracer). This is
96 // Linux-specific.
97 func skipUnderDebugger(t *testing.T) {
98 pid := syscall.Getpid()
99 status, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/status", pid))
100 if err != nil {
101 t.Logf("couldn't get proc tracer: %s", err)
102 return
103 }
104 re := regexp.MustCompile(`TracerPid:\s+([0-9]+)`)
105 sub := re.FindSubmatch(status)
106 if sub == nil {
107 t.Logf("couldn't find proc tracer PID")
108 return
109 }
110 if string(sub[1]) == "0" {
111 return
112 }
113 t.Skip("test will deadlock under a debugger")
114 }
115
116 func TestDebugCall(t *testing.T) {
117 g, after := startDebugCallWorker(t)
118 defer after()
119
120 // Inject a call into the debugCallWorker goroutine and test
121 // basic argument and result passing.
122 var args struct {
123 x int
124 yRet int
125 }
126 fn := func(x int) (yRet int) {
127 return x + 1
128 }
129 args.x = 42
130 if _, err := runtime.InjectDebugCall(g, fn, &args, debugCallTKill, false); err != nil {
131 t.Fatal(err)
132 }
133 if args.yRet != 43 {
134 t.Fatalf("want 43, got %d", args.yRet)
135 }
136 }
137
138 func TestDebugCallLarge(t *testing.T) {
139 g, after := startDebugCallWorker(t)
140 defer after()
141
142 // Inject a call with a large call frame.
143 const N = 128
144 var args struct {
145 in [N]int
146 out [N]int
147 }
148 fn := func(in [N]int) (out [N]int) {
149 for i := range in {
150 out[i] = in[i] + 1
151 }
152 return
153 }
154 var want [N]int
155 for i := range args.in {
156 args.in[i] = i
157 want[i] = i + 1
158 }
159 if _, err := runtime.InjectDebugCall(g, fn, &args, debugCallTKill, false); err != nil {
160 t.Fatal(err)
161 }
162 if want != args.out {
163 t.Fatalf("want %v, got %v", want, args.out)
164 }
165 }
166
167 func TestDebugCallGC(t *testing.T) {
168 g, after := startDebugCallWorker(t)
169 defer after()
170
171 // Inject a call that performs a GC.
172 if _, err := runtime.InjectDebugCall(g, runtime.GC, nil, debugCallTKill, false); err != nil {
173 t.Fatal(err)
174 }
175 }
176
177 func TestDebugCallGrowStack(t *testing.T) {
178 g, after := startDebugCallWorker(t)
179 defer after()
180
181 // Inject a call that grows the stack. debugCallWorker checks
182 // for stack pointer breakage.
183 if _, err := runtime.InjectDebugCall(g, func() { growStack(nil) }, nil, debugCallTKill, false); err != nil {
184 t.Fatal(err)
185 }
186 }
187
188 //go:nosplit
189 func debugCallUnsafePointWorker(gpp **runtime.G, ready, stop *uint32) {
190 // The nosplit causes this function to not contain safe-points
191 // except at calls.
192 runtime.LockOSThread()
193 defer runtime.UnlockOSThread()
194
195 *gpp = runtime.Getg()
196
197 for atomic.LoadUint32(stop) == 0 {
198 atomic.StoreUint32(ready, 1)
199 }
200 }
201
202 func TestDebugCallUnsafePoint(t *testing.T) {
203 skipUnderDebugger(t)
204
205 // This can deadlock if there aren't enough threads or if a GC
206 // tries to interrupt an atomic loop (see issue #10958).
207 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
208 defer debug.SetGCPercent(debug.SetGCPercent(-1))
209
210 // Test that the runtime refuses call injection at unsafe points.
211 var g *runtime.G
212 var ready, stop uint32
213 defer atomic.StoreUint32(&stop, 1)
214 go debugCallUnsafePointWorker(&g, &ready, &stop)
215 for atomic.LoadUint32(&ready) == 0 {
216 runtime.Gosched()
217 }
218
219 _, err := runtime.InjectDebugCall(g, func() {}, nil, debugCallTKill, true)
220 if msg := "call not at safe point"; err == nil || err.Error() != msg {
221 t.Fatalf("want %q, got %s", msg, err)
222 }
223 }
224
225 func TestDebugCallPanic(t *testing.T) {
226 skipUnderDebugger(t)
227
228 // This can deadlock if there aren't enough threads.
229 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(8))
230
231 ready := make(chan *runtime.G)
232 var stop uint32
233 defer atomic.StoreUint32(&stop, 1)
234 go func() {
235 runtime.LockOSThread()
236 defer runtime.UnlockOSThread()
237 ready <- runtime.Getg()
238 for atomic.LoadUint32(&stop) == 0 {
239 }
240 }()
241 g := <-ready
242
243 p, err := runtime.InjectDebugCall(g, func() { panic("test") }, nil, debugCallTKill, false)
244 if err != nil {
245 t.Fatal(err)
246 }
247 if ps, ok := p.(string); !ok || ps != "test" {
248 t.Fatalf("wanted panic %v, got %v", "test", p)
249 }
250 }