9f5191bc661542d21b5621f8f32c61a6cde01260
[gcc.git] / libgo / go / runtime / stubs.go
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 package runtime
6
7 import (
8 "runtime/internal/sys"
9 "unsafe"
10 )
11
12 // Should be a built-in for unsafe.Pointer?
13 //go:nosplit
14 func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
15 return unsafe.Pointer(uintptr(p) + x)
16 }
17
18 // getg returns the pointer to the current g.
19 // The compiler rewrites calls to this function into instructions
20 // that fetch the g directly (from TLS or from the dedicated register).
21 func getg() *g
22
23 // mcall switches from the g to the g0 stack and invokes fn(g),
24 // where g is the goroutine that made the call.
25 // mcall saves g's current PC/SP in g->sched so that it can be restored later.
26 // It is up to fn to arrange for that later execution, typically by recording
27 // g in a data structure, causing something to call ready(g) later.
28 // mcall returns to the original goroutine g later, when g has been rescheduled.
29 // fn must not return at all; typically it ends by calling schedule, to let the m
30 // run other goroutines.
31 //
32 // mcall can only be called from g stacks (not g0, not gsignal).
33 //
34 // This must NOT be go:noescape: if fn is a stack-allocated closure,
35 // fn puts g on a run queue, and g executes before fn returns, the
36 // closure will be invalidated while it is still executing.
37 func mcall(fn func(*g))
38
39 // systemstack runs fn on a system stack.
40 //
41 // It is common to use a func literal as the argument, in order
42 // to share inputs and outputs with the code around the call
43 // to system stack:
44 //
45 // ... set up y ...
46 // systemstack(func() {
47 // x = bigcall(y)
48 // })
49 // ... use x ...
50 //
51 // For the gc toolchain this permits running a function that requires
52 // additional stack space in a context where the stack can not be
53 // split. We don't really need additional stack space in gccgo, since
54 // stack splitting is handled separately. But to keep things looking
55 // the same, we do switch to the g0 stack here if necessary.
56 func systemstack(fn func()) {
57 gp := getg()
58 mp := gp.m
59 if gp == mp.g0 || gp == mp.gsignal {
60 fn()
61 } else if gp == mp.curg {
62 fn1 := func(origg *g) {
63 fn()
64 gogo(origg)
65 }
66 mcall(*(*func(*g))(noescape(unsafe.Pointer(&fn1))))
67 } else {
68 badsystemstack()
69 }
70 }
71
72 var badsystemstackMsg = "fatal: systemstack called from unexpected goroutine"
73
74 //go:nosplit
75 //go:nowritebarrierrec
76 func badsystemstack() {
77 sp := stringStructOf(&badsystemstackMsg)
78 write(2, sp.str, int32(sp.len))
79 }
80
81 // memclrNoHeapPointers clears n bytes starting at ptr.
82 //
83 // Usually you should use typedmemclr. memclrNoHeapPointers should be
84 // used only when the caller knows that *ptr contains no heap pointers
85 // because either:
86 //
87 // *ptr is initialized memory and its type is pointer-free, or
88 //
89 // *ptr is uninitialized memory (e.g., memory that's being reused
90 // for a new allocation) and hence contains only "junk".
91 //
92 // The (CPU-specific) implementations of this function are in memclr_*.s.
93 //go:noescape
94 func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
95
96 //go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers
97 func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
98 memclrNoHeapPointers(ptr, n)
99 }
100
101 // memmove copies n bytes from "from" to "to".
102 //go:noescape
103 func memmove(to, from unsafe.Pointer, n uintptr)
104
105 //go:linkname reflect_memmove reflect.memmove
106 func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
107 memmove(to, from, n)
108 }
109
110 //go:noescape
111 //extern __builtin_memcmp
112 func memcmp(a, b unsafe.Pointer, size uintptr) int32
113
114 // exported value for testing
115 var hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
116
117 //go:nosplit
118 func fastrand() uint32 {
119 mp := getg().m
120 // Implement xorshift64+: 2 32-bit xorshift sequences added together.
121 // Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
122 // Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
123 // This generator passes the SmallCrush suite, part of TestU01 framework:
124 // http://simul.iro.umontreal.ca/testu01/tu01.html
125 s1, s0 := mp.fastrand[0], mp.fastrand[1]
126 s1 ^= s1 << 17
127 s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
128 mp.fastrand[0], mp.fastrand[1] = s0, s1
129 return s0 + s1
130 }
131
132 //go:nosplit
133 func fastrandn(n uint32) uint32 {
134 // This is similar to fastrand() % n, but faster.
135 // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
136 return uint32(uint64(fastrand()) * uint64(n) >> 32)
137 }
138
139 //go:linkname sync_fastrand sync.fastrand
140 func sync_fastrand() uint32 { return fastrand() }
141
142 // in asm_*.s
143 //go:noescape
144 func memequal(a, b unsafe.Pointer, size uintptr) bool
145
146 // noescape hides a pointer from escape analysis. noescape is
147 // the identity function but escape analysis doesn't think the
148 // output depends on the input. noescape is inlined and currently
149 // compiles down to zero instructions.
150 // USE CAREFULLY!
151 //go:nosplit
152 func noescape(p unsafe.Pointer) unsafe.Pointer {
153 x := uintptr(p)
154 return unsafe.Pointer(x ^ 0)
155 }
156
157 //go:noescape
158 func jmpdefer(fv *funcval, argp uintptr)
159 func exit1(code int32)
160 func setg(gg *g)
161
162 //extern __builtin_trap
163 func breakpoint()
164
165 func asminit() {}
166
167 //go:linkname reflectcall runtime.reflectcall
168 //go:noescape
169 func reflectcall(fntype *functype, fn *funcval, isInterface, isMethod bool, params, results *unsafe.Pointer)
170
171 func procyield(cycles uint32)
172
173 type neverCallThisFunction struct{}
174
175 // goexit is the return stub at the top of every goroutine call stack.
176 // Each goroutine stack is constructed as if goexit called the
177 // goroutine's entry point function, so that when the entry point
178 // function returns, it will return to goexit, which will call goexit1
179 // to perform the actual exit.
180 //
181 // This function must never be called directly. Call goexit1 instead.
182 // gentraceback assumes that goexit terminates the stack. A direct
183 // call on the stack will cause gentraceback to stop walking the stack
184 // prematurely and if there is leftover state it may panic.
185 func goexit(neverCallThisFunction)
186
187 // publicationBarrier performs a store/store barrier (a "publication"
188 // or "export" barrier). Some form of synchronization is required
189 // between initializing an object and making that object accessible to
190 // another processor. Without synchronization, the initialization
191 // writes and the "publication" write may be reordered, allowing the
192 // other processor to follow the pointer and observe an uninitialized
193 // object. In general, higher-level synchronization should be used,
194 // such as locking or an atomic pointer write. publicationBarrier is
195 // for when those aren't an option, such as in the implementation of
196 // the memory manager.
197 //
198 // There's no corresponding barrier for the read side because the read
199 // side naturally has a data dependency order. All architectures that
200 // Go supports or seems likely to ever support automatically enforce
201 // data dependency ordering.
202 func publicationBarrier()
203
204 // getcallerpc returns the program counter (PC) of its caller's caller.
205 // getcallersp returns the stack pointer (SP) of its caller's caller.
206 // The implementation may be a compiler intrinsic; there is not
207 // necessarily code implementing this on every platform.
208 //
209 // For example:
210 //
211 // func f(arg1, arg2, arg3 int) {
212 // pc := getcallerpc()
213 // sp := getcallersp()
214 // }
215 //
216 // These two lines find the PC and SP immediately following
217 // the call to f (where f will return).
218 //
219 // The call to getcallerpc and getcallersp must be done in the
220 // frame being asked about.
221 //
222 // The result of getcallersp is correct at the time of the return,
223 // but it may be invalidated by any subsequent call to a function
224 // that might relocate the stack in order to grow or shrink it.
225 // A general rule is that the result of getcallersp should be used
226 // immediately and can only be passed to nosplit functions.
227
228 //go:noescape
229 func getcallerpc() uintptr
230
231 //go:noescape
232 func getcallersp() uintptr // implemented as an intrinsic on all platforms
233
234 func asmcgocall(fn, arg unsafe.Pointer) int32 {
235 throw("asmcgocall")
236 return 0
237 }
238
239 // argp used in Defer structs when there is no argp.
240 const _NoArgs = ^uintptr(0)
241
242 //extern __builtin_prefetch
243 func prefetch(addr unsafe.Pointer, rw int32, locality int32)
244
245 func prefetcht0(addr uintptr) {
246 prefetch(unsafe.Pointer(addr), 0, 3)
247 }
248
249 func prefetcht1(addr uintptr) {
250 prefetch(unsafe.Pointer(addr), 0, 2)
251 }
252
253 func prefetcht2(addr uintptr) {
254 prefetch(unsafe.Pointer(addr), 0, 1)
255 }
256
257 func prefetchnta(addr uintptr) {
258 prefetch(unsafe.Pointer(addr), 0, 0)
259 }
260
261 // round n up to a multiple of a. a must be a power of 2.
262 func round(n, a uintptr) uintptr {
263 return (n + a - 1) &^ (a - 1)
264 }
265
266 // checkASM returns whether assembly runtime checks have passed.
267 func checkASM() bool {
268 return true
269 }
270
271 func eqstring(x, y string) bool {
272 a := stringStructOf(&x)
273 b := stringStructOf(&y)
274 if a.len != b.len {
275 return false
276 }
277 if a.str == b.str {
278 return true
279 }
280 return memequal(a.str, b.str, uintptr(a.len))
281 }
282
283 // For gccgo this is in the C code.
284 func osyield()
285
286 //extern __go_syscall6
287 func syscall(trap uintptr, a1, a2, a3, a4, a5, a6 uintptr) uintptr
288
289 // For gccgo, to communicate from the C code to the Go code.
290 //go:linkname setIsCgo runtime.setIsCgo
291 func setIsCgo() {
292 iscgo = true
293 }
294
295 // For gccgo, to communicate from the C code to the Go code.
296 //go:linkname setSupportAES runtime.setSupportAES
297 func setSupportAES(v bool) {
298 support_aes = v
299 }
300
301 // Here for gccgo.
302 func errno() int
303
304 // For gccgo these are written in C.
305 func entersyscall()
306 func entersyscallblock()
307
308 // For gccgo to call from C code, so that the C code and the Go code
309 // can share the memstats variable for now.
310 //go:linkname getMstats runtime.getMstats
311 func getMstats() *mstats {
312 return &memstats
313 }
314
315 // Get signal trampoline, written in C.
316 func getSigtramp() uintptr
317
318 // The sa_handler field is generally hidden in a union, so use C accessors.
319 //go:noescape
320 func getSigactionHandler(*_sigaction) uintptr
321
322 //go:noescape
323 func setSigactionHandler(*_sigaction, uintptr)
324
325 // Retrieve fields from the siginfo_t and ucontext_t pointers passed
326 // to a signal handler using C, as they are often hidden in a union.
327 // Returns and, if available, PC where signal occurred.
328 func getSiginfo(*_siginfo_t, unsafe.Pointer) (sigaddr uintptr, sigpc uintptr)
329
330 // Implemented in C for gccgo.
331 func dumpregs(*_siginfo_t, unsafe.Pointer)
332
333 // Implemented in C for gccgo.
334 func setRandomNumber(uint32)
335
336 // Temporary for gccgo until we port proc.go.
337 //go:linkname getsched runtime.getsched
338 func getsched() *schedt {
339 return &sched
340 }
341
342 // Temporary for gccgo until we port proc.go.
343 //go:linkname getCgoHasExtraM runtime.getCgoHasExtraM
344 func getCgoHasExtraM() *bool {
345 return &cgoHasExtraM
346 }
347
348 // Temporary for gccgo until we port proc.go.
349 //go:linkname getAllP runtime.getAllP
350 func getAllP() **p {
351 return &allp[0]
352 }
353
354 // Temporary for gccgo until we port proc.go.
355 //go:linkname allocg runtime.allocg
356 func allocg() *g {
357 return new(g)
358 }
359
360 // Temporary for gccgo until we port the garbage collector.
361 //go:linkname getallglen runtime.getallglen
362 func getallglen() uintptr {
363 return allglen
364 }
365
366 // Temporary for gccgo until we port the garbage collector.
367 //go:linkname getallg runtime.getallg
368 func getallg(i int) *g {
369 return allgs[i]
370 }
371
372 // Temporary for gccgo until we port the garbage collector.
373 //go:linkname getallm runtime.getallm
374 func getallm() *m {
375 return allm
376 }
377
378 // Throw and rethrow an exception.
379 func throwException()
380 func rethrowException()
381
382 // Fetch the size and required alignment of the _Unwind_Exception type
383 // used by the stack unwinder.
384 func unwindExceptionSize() uintptr
385
386 // Temporary for gccgo until C code no longer needs it.
387 //go:nosplit
388 //go:linkname getPanicking runtime.getPanicking
389 func getPanicking() uint32 {
390 return panicking
391 }
392
393 // Called by C code to set the number of CPUs.
394 //go:linkname setncpu runtime.setncpu
395 func setncpu(n int32) {
396 ncpu = n
397 }
398
399 // Called by C code to set the page size.
400 //go:linkname setpagesize runtime.setpagesize
401 func setpagesize(s uintptr) {
402 if physPageSize == 0 {
403 physPageSize = s
404 }
405 }
406
407 // Called by C code during library initialization.
408 //go:linkname runtime_m0 runtime.runtime_m0
409 func runtime_m0() *m {
410 return &m0
411 }
412
413 // Temporary for gccgo until we port mgc.go.
414 //go:linkname runtime_g0 runtime.runtime_g0
415 func runtime_g0() *g {
416 return &g0
417 }
418
419 const uintptrMask = 1<<(8*sys.PtrSize) - 1
420
421 type bitvector struct {
422 n int32 // # of bits
423 bytedata *uint8
424 }
425
426 // ptrbit returns the i'th bit in bv.
427 // ptrbit is less efficient than iterating directly over bitvector bits,
428 // and should only be used in non-performance-critical code.
429 // See adjustpointers for an example of a high-efficiency walk of a bitvector.
430 func (bv *bitvector) ptrbit(i uintptr) uint8 {
431 b := *(addb(bv.bytedata, i/8))
432 return (b >> (i % 8)) & 1
433 }
434
435 // bool2int returns 0 if x is false or 1 if x is true.
436 func bool2int(x bool) int {
437 if x {
438 return 1
439 }
440 return 0
441 }
442
443 // abort crashes the runtime in situations where even throw might not
444 // work. In general it should do something a debugger will recognize
445 // (e.g., an INT3 on x86). A crash in abort is recognized by the
446 // signal handler, which will attempt to tear down the runtime
447 // immediately.
448 func abort()
449
450 // usestackmaps is true if stack map (precise stack scan) is enabled.
451 var usestackmaps bool
452
453 // probestackmaps detects whether there are stack maps.
454 //go:linkname probestackmaps runtime.probestackmaps
455 func probestackmaps() bool
456
457 // For the math/bits packages for gccgo.
458 //go:linkname getDivideError runtime.getDivideError
459 func getDivideError() error {
460 return divideError
461 }
462
463 // For the math/bits packages for gccgo.
464 //go:linkname getOverflowError runtime.getOverflowError
465 func getOverflowError() error {
466 return overflowError
467 }