77b624ba61216c31c764258bc4a3aacd44e0126f
[gcc.git] / libgo / go / runtime / proc.go
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 package runtime
6
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
11 )
12
13 // Functions called by C code.
14 //go:linkname main runtime.main
15 //go:linkname goparkunlock runtime.goparkunlock
16 //go:linkname newextram runtime.newextram
17 //go:linkname acquirep runtime.acquirep
18 //go:linkname releasep runtime.releasep
19 //go:linkname incidlelocked runtime.incidlelocked
20 //go:linkname schedinit runtime.schedinit
21 //go:linkname ready runtime.ready
22 //go:linkname gcprocs runtime.gcprocs
23 //go:linkname stopm runtime.stopm
24 //go:linkname handoffp runtime.handoffp
25 //go:linkname wakep runtime.wakep
26 //go:linkname stoplockedm runtime.stoplockedm
27 //go:linkname schedule runtime.schedule
28 //go:linkname execute runtime.execute
29 //go:linkname goexit1 runtime.goexit1
30 //go:linkname reentersyscall runtime.reentersyscall
31 //go:linkname reentersyscallblock runtime.reentersyscallblock
32 //go:linkname exitsyscall runtime.exitsyscall
33 //go:linkname gfget runtime.gfget
34 //go:linkname helpgc runtime.helpgc
35 //go:linkname kickoff runtime.kickoff
36 //go:linkname mstart1 runtime.mstart1
37 //go:linkname globrunqput runtime.globrunqput
38 //go:linkname pidleget runtime.pidleget
39
40 // Exported for test (see runtime/testdata/testprogcgo/dropm_stub.go).
41 //go:linkname getm runtime.getm
42
43 // Function called by misc/cgo/test.
44 //go:linkname lockedOSThread runtime.lockedOSThread
45
46 // C functions for thread and context management.
47 func newosproc(*m)
48 func malg(bool, bool, *unsafe.Pointer, *uintptr) *g
49 func resetNewG(*g, *unsafe.Pointer, *uintptr)
50 func gogo(*g)
51 func setGContext()
52 func makeGContext(*g, unsafe.Pointer, uintptr)
53 func getTraceback(me, gp *g)
54 func gtraceback(*g)
55 func _cgo_notify_runtime_init_done()
56 func alreadyInCallers() bool
57
58 // Functions created by the compiler.
59 //extern __go_init_main
60 func main_init()
61
62 //extern main.main
63 func main_main()
64
65 var buildVersion = sys.TheVersion
66
67 // Goroutine scheduler
68 // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
69 //
70 // The main concepts are:
71 // G - goroutine.
72 // M - worker thread, or machine.
73 // P - processor, a resource that is required to execute Go code.
74 // M must have an associated P to execute Go code, however it can be
75 // blocked or in a syscall w/o an associated P.
76 //
77 // Design doc at https://golang.org/s/go11sched.
78
79 // Worker thread parking/unparking.
80 // We need to balance between keeping enough running worker threads to utilize
81 // available hardware parallelism and parking excessive running worker threads
82 // to conserve CPU resources and power. This is not simple for two reasons:
83 // (1) scheduler state is intentionally distributed (in particular, per-P work
84 // queues), so it is not possible to compute global predicates on fast paths;
85 // (2) for optimal thread management we would need to know the future (don't park
86 // a worker thread when a new goroutine will be readied in near future).
87 //
88 // Three rejected approaches that would work badly:
89 // 1. Centralize all scheduler state (would inhibit scalability).
90 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
91 // is a spare P, unpark a thread and handoff it the thread and the goroutine.
92 // This would lead to thread state thrashing, as the thread that readied the
93 // goroutine can be out of work the very next moment, we will need to park it.
94 // Also, it would destroy locality of computation as we want to preserve
95 // dependent goroutines on the same thread; and introduce additional latency.
96 // 3. Unpark an additional thread whenever we ready a goroutine and there is an
97 // idle P, but don't do handoff. This would lead to excessive thread parking/
98 // unparking as the additional threads will instantly park without discovering
99 // any work to do.
100 //
101 // The current approach:
102 // We unpark an additional thread when we ready a goroutine if (1) there is an
103 // idle P and there are no "spinning" worker threads. A worker thread is considered
104 // spinning if it is out of local work and did not find work in global run queue/
105 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
106 // Threads unparked this way are also considered spinning; we don't do goroutine
107 // handoff so such threads are out of work initially. Spinning threads do some
108 // spinning looking for work in per-P run queues before parking. If a spinning
109 // thread finds work it takes itself out of the spinning state and proceeds to
110 // execution. If it does not find work it takes itself out of the spinning state
111 // and then parks.
112 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
113 // new threads when readying goroutines. To compensate for that, if the last spinning
114 // thread finds work and stops spinning, it must unpark a new spinning thread.
115 // This approach smooths out unjustified spikes of thread unparking,
116 // but at the same time guarantees eventual maximal CPU parallelism utilization.
117 //
118 // The main implementation complication is that we need to be very careful during
119 // spinning->non-spinning thread transition. This transition can race with submission
120 // of a new goroutine, and either one part or another needs to unpark another worker
121 // thread. If they both fail to do that, we can end up with semi-persistent CPU
122 // underutilization. The general pattern for goroutine readying is: submit a goroutine
123 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
124 // The general pattern for spinning->non-spinning transition is: decrement nmspinning,
125 // #StoreLoad-style memory barrier, check all per-P work queues for new work.
126 // Note that all this complexity does not apply to global run queue as we are not
127 // sloppy about thread unparking when submitting to global queue. Also see comments
128 // for nmspinning manipulation.
129
130 var (
131 m0 m
132 g0 g
133 )
134
135 // main_init_done is a signal used by cgocallbackg that initialization
136 // has been completed. It is made before _cgo_notify_runtime_init_done,
137 // so all cgo calls can rely on it existing. When main_init is complete,
138 // it is closed, meaning cgocallbackg can reliably receive from it.
139 var main_init_done chan bool
140
141 // runtimeInitTime is the nanotime() at which the runtime started.
142 var runtimeInitTime int64
143
144 // Value to use for signal mask for newly created M's.
145 var initSigmask sigset
146
147 // The main goroutine.
148 func main() {
149 g := getg()
150
151 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
152 // Using decimal instead of binary GB and MB because
153 // they look nicer in the stack overflow failure message.
154 if sys.PtrSize == 8 {
155 maxstacksize = 1000000000
156 } else {
157 maxstacksize = 250000000
158 }
159
160 // Record when the world started.
161 runtimeInitTime = nanotime()
162
163 systemstack(func() {
164 newm(sysmon, nil)
165 })
166
167 // Lock the main goroutine onto this, the main OS thread,
168 // during initialization. Most programs won't care, but a few
169 // do require certain calls to be made by the main thread.
170 // Those can arrange for main.main to run in the main thread
171 // by calling runtime.LockOSThread during initialization
172 // to preserve the lock.
173 lockOSThread()
174
175 if g.m != &m0 {
176 throw("runtime.main not on m0")
177 }
178
179 // Defer unlock so that runtime.Goexit during init does the unlock too.
180 needUnlock := true
181 defer func() {
182 if needUnlock {
183 unlockOSThread()
184 }
185 }()
186
187 main_init_done = make(chan bool)
188 if iscgo {
189 _cgo_notify_runtime_init_done()
190 }
191
192 fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
193 fn()
194 close(main_init_done)
195
196 needUnlock = false
197 unlockOSThread()
198
199 // For gccgo we have to wait until after main is initialized
200 // to enable GC, because initializing main registers the GC roots.
201 gcenable()
202
203 if isarchive || islibrary {
204 // A program compiled with -buildmode=c-archive or c-shared
205 // has a main, but it is not executed.
206 return
207 }
208 fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
209 fn()
210 if raceenabled {
211 racefini()
212 }
213
214 // Make racy client program work: if panicking on
215 // another goroutine at the same time as main returns,
216 // let the other goroutine finish printing the panic trace.
217 // Once it does, it will exit. See issue 3934.
218 if panicking != 0 {
219 gopark(nil, nil, "panicwait", traceEvGoStop, 1)
220 }
221
222 exit(0)
223 for {
224 var x *int32
225 *x = 0
226 }
227 }
228
229 // os_beforeExit is called from os.Exit(0).
230 //go:linkname os_beforeExit os.runtime_beforeExit
231 func os_beforeExit() {
232 if raceenabled {
233 racefini()
234 }
235 }
236
237 // start forcegc helper goroutine
238 func init() {
239 expectSystemGoroutine()
240 go forcegchelper()
241 }
242
243 func forcegchelper() {
244 setSystemGoroutine()
245
246 forcegc.g = getg()
247 for {
248 lock(&forcegc.lock)
249 if forcegc.idle != 0 {
250 throw("forcegc: phase error")
251 }
252 atomic.Store(&forcegc.idle, 1)
253 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
254 // this goroutine is explicitly resumed by sysmon
255 if debug.gctrace > 0 {
256 println("GC forced")
257 }
258 gcStart(gcBackgroundMode, true)
259 }
260 }
261
262 //go:nosplit
263
264 // Gosched yields the processor, allowing other goroutines to run. It does not
265 // suspend the current goroutine, so execution resumes automatically.
266 func Gosched() {
267 mcall(gosched_m)
268 }
269
270 // Puts the current goroutine into a waiting state and calls unlockf.
271 // If unlockf returns false, the goroutine is resumed.
272 // unlockf must not access this G's stack, as it may be moved between
273 // the call to gopark and the call to unlockf.
274 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
275 mp := acquirem()
276 gp := mp.curg
277 status := readgstatus(gp)
278 if status != _Grunning && status != _Gscanrunning {
279 throw("gopark: bad g status")
280 }
281 mp.waitlock = lock
282 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
283 gp.waitreason = reason
284 mp.waittraceev = traceEv
285 mp.waittraceskip = traceskip
286 releasem(mp)
287 // can't do anything that might move the G between Ms here.
288 mcall(park_m)
289 }
290
291 // Puts the current goroutine into a waiting state and unlocks the lock.
292 // The goroutine can be made runnable again by calling goready(gp).
293 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
294 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
295 }
296
297 func goready(gp *g, traceskip int) {
298 systemstack(func() {
299 ready(gp, traceskip, true)
300 })
301 }
302
303 //go:nosplit
304 func acquireSudog() *sudog {
305 // Delicate dance: the semaphore implementation calls
306 // acquireSudog, acquireSudog calls new(sudog),
307 // new calls malloc, malloc can call the garbage collector,
308 // and the garbage collector calls the semaphore implementation
309 // in stopTheWorld.
310 // Break the cycle by doing acquirem/releasem around new(sudog).
311 // The acquirem/releasem increments m.locks during new(sudog),
312 // which keeps the garbage collector from being invoked.
313 mp := acquirem()
314 pp := mp.p.ptr()
315 if len(pp.sudogcache) == 0 {
316 lock(&sched.sudoglock)
317 // First, try to grab a batch from central cache.
318 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
319 s := sched.sudogcache
320 sched.sudogcache = s.next
321 s.next = nil
322 pp.sudogcache = append(pp.sudogcache, s)
323 }
324 unlock(&sched.sudoglock)
325 // If the central cache is empty, allocate a new one.
326 if len(pp.sudogcache) == 0 {
327 pp.sudogcache = append(pp.sudogcache, new(sudog))
328 }
329 }
330 n := len(pp.sudogcache)
331 s := pp.sudogcache[n-1]
332 pp.sudogcache[n-1] = nil
333 pp.sudogcache = pp.sudogcache[:n-1]
334 if s.elem != nil {
335 throw("acquireSudog: found s.elem != nil in cache")
336 }
337 releasem(mp)
338 return s
339 }
340
341 //go:nosplit
342 func releaseSudog(s *sudog) {
343 if s.elem != nil {
344 throw("runtime: sudog with non-nil elem")
345 }
346 if s.selectdone != nil {
347 throw("runtime: sudog with non-nil selectdone")
348 }
349 if s.next != nil {
350 throw("runtime: sudog with non-nil next")
351 }
352 if s.prev != nil {
353 throw("runtime: sudog with non-nil prev")
354 }
355 if s.waitlink != nil {
356 throw("runtime: sudog with non-nil waitlink")
357 }
358 if s.c != nil {
359 throw("runtime: sudog with non-nil c")
360 }
361 gp := getg()
362 if gp.param != nil {
363 throw("runtime: releaseSudog with non-nil gp.param")
364 }
365 mp := acquirem() // avoid rescheduling to another P
366 pp := mp.p.ptr()
367 if len(pp.sudogcache) == cap(pp.sudogcache) {
368 // Transfer half of local cache to the central cache.
369 var first, last *sudog
370 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
371 n := len(pp.sudogcache)
372 p := pp.sudogcache[n-1]
373 pp.sudogcache[n-1] = nil
374 pp.sudogcache = pp.sudogcache[:n-1]
375 if first == nil {
376 first = p
377 } else {
378 last.next = p
379 }
380 last = p
381 }
382 lock(&sched.sudoglock)
383 last.next = sched.sudogcache
384 sched.sudogcache = first
385 unlock(&sched.sudoglock)
386 }
387 pp.sudogcache = append(pp.sudogcache, s)
388 releasem(mp)
389 }
390
391 // funcPC returns the entry PC of the function f.
392 // It assumes that f is a func value. Otherwise the behavior is undefined.
393 // For gccgo note that this differs from the gc implementation; the gc
394 // implementation adds sys.PtrSize to the address of the interface
395 // value, but GCC's alias analysis decides that that can not be a
396 // reference to the second field of the interface, and in some cases
397 // it drops the initialization of the second field as a dead store.
398 //go:nosplit
399 func funcPC(f interface{}) uintptr {
400 i := (*iface)(unsafe.Pointer(&f))
401 return **(**uintptr)(i.data)
402 }
403
404 func lockedOSThread() bool {
405 gp := getg()
406 return gp.lockedm != nil && gp.m.lockedg != nil
407 }
408
409 var (
410 allgs []*g
411 allglock mutex
412 )
413
414 func allgadd(gp *g) {
415 if readgstatus(gp) == _Gidle {
416 throw("allgadd: bad status Gidle")
417 }
418
419 lock(&allglock)
420 allgs = append(allgs, gp)
421 allglen = uintptr(len(allgs))
422
423 // Grow GC rescan list if necessary.
424 if len(allgs) > cap(work.rescan.list) {
425 lock(&work.rescan.lock)
426 l := work.rescan.list
427 // Let append do the heavy lifting, but keep the
428 // length the same.
429 work.rescan.list = append(l[:cap(l)], 0)[:len(l)]
430 unlock(&work.rescan.lock)
431 }
432 unlock(&allglock)
433 }
434
435 const (
436 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
437 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
438 _GoidCacheBatch = 16
439 )
440
441 // The bootstrap sequence is:
442 //
443 // call osinit
444 // call schedinit
445 // make & queue new G
446 // call runtime·mstart
447 //
448 // The new G calls runtime·main.
449 func schedinit() {
450 _m_ := &m0
451 _g_ := &g0
452 _m_.g0 = _g_
453 _m_.curg = _g_
454 _g_.m = _m_
455 setg(_g_)
456
457 sched.maxmcount = 10000
458
459 mallocinit()
460 mcommoninit(_g_.m)
461 alginit() // maps must not be used before this call
462
463 msigsave(_g_.m)
464 initSigmask = _g_.m.sigmask
465
466 goargs()
467 goenvs()
468 parsedebugvars()
469 gcinit()
470
471 sched.lastpoll = uint64(nanotime())
472 procs := ncpu
473 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
474 procs = n
475 }
476 if procs > _MaxGomaxprocs {
477 procs = _MaxGomaxprocs
478 }
479 if procresize(procs) != nil {
480 throw("unknown runnable goroutine during bootstrap")
481 }
482
483 if buildVersion == "" {
484 // Condition should never trigger. This code just serves
485 // to ensure runtime·buildVersion is kept in the resulting binary.
486 buildVersion = "unknown"
487 }
488 }
489
490 func dumpgstatus(gp *g) {
491 _g_ := getg()
492 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
493 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
494 }
495
496 func checkmcount() {
497 // sched lock is held
498 if sched.mcount > sched.maxmcount {
499 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
500 throw("thread exhaustion")
501 }
502 }
503
504 func mcommoninit(mp *m) {
505 _g_ := getg()
506
507 // g0 stack won't make sense for user (and is not necessary unwindable).
508 if _g_ != _g_.m.g0 {
509 callers(1, mp.createstack[:])
510 }
511
512 mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks())
513 if mp.fastrand == 0 {
514 mp.fastrand = 0x49f6428a
515 }
516
517 lock(&sched.lock)
518 mp.id = sched.mcount
519 sched.mcount++
520 checkmcount()
521 mpreinit(mp)
522
523 // Add to allm so garbage collector doesn't free g->m
524 // when it is just in a register or thread-local storage.
525 mp.alllink = allm
526
527 // NumCgoCall() iterates over allm w/o schedlock,
528 // so we need to publish it safely.
529 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
530 unlock(&sched.lock)
531 }
532
533 // Mark gp ready to run.
534 func ready(gp *g, traceskip int, next bool) {
535 if trace.enabled {
536 traceGoUnpark(gp, traceskip)
537 }
538
539 status := readgstatus(gp)
540
541 // Mark runnable.
542 _g_ := getg()
543 _g_.m.locks++ // disable preemption because it can be holding p in a local var
544 if status&^_Gscan != _Gwaiting {
545 dumpgstatus(gp)
546 throw("bad g->status in ready")
547 }
548
549 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
550 casgstatus(gp, _Gwaiting, _Grunnable)
551 runqput(_g_.m.p.ptr(), gp, next)
552 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
553 wakep()
554 }
555 _g_.m.locks--
556 }
557
558 func gcprocs() int32 {
559 // Figure out how many CPUs to use during GC.
560 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
561 lock(&sched.lock)
562 n := gomaxprocs
563 if n > ncpu {
564 n = ncpu
565 }
566 if n > _MaxGcproc {
567 n = _MaxGcproc
568 }
569 if n > sched.nmidle+1 { // one M is currently running
570 n = sched.nmidle + 1
571 }
572 unlock(&sched.lock)
573 return n
574 }
575
576 func needaddgcproc() bool {
577 lock(&sched.lock)
578 n := gomaxprocs
579 if n > ncpu {
580 n = ncpu
581 }
582 if n > _MaxGcproc {
583 n = _MaxGcproc
584 }
585 n -= sched.nmidle + 1 // one M is currently running
586 unlock(&sched.lock)
587 return n > 0
588 }
589
590 func helpgc(nproc int32) {
591 _g_ := getg()
592 lock(&sched.lock)
593 pos := 0
594 for n := int32(1); n < nproc; n++ { // one M is currently running
595 if allp[pos].mcache == _g_.m.mcache {
596 pos++
597 }
598 mp := mget()
599 if mp == nil {
600 throw("gcprocs inconsistency")
601 }
602 mp.helpgc = n
603 mp.p.set(allp[pos])
604 mp.mcache = allp[pos].mcache
605 pos++
606 notewakeup(&mp.park)
607 }
608 unlock(&sched.lock)
609 }
610
611 // freezeStopWait is a large value that freezetheworld sets
612 // sched.stopwait to in order to request that all Gs permanently stop.
613 const freezeStopWait = 0x7fffffff
614
615 // freezing is set to non-zero if the runtime is trying to freeze the
616 // world.
617 var freezing uint32
618
619 // Similar to stopTheWorld but best-effort and can be called several times.
620 // There is no reverse operation, used during crashing.
621 // This function must not lock any mutexes.
622 func freezetheworld() {
623 atomic.Store(&freezing, 1)
624 // stopwait and preemption requests can be lost
625 // due to races with concurrently executing threads,
626 // so try several times
627 for i := 0; i < 5; i++ {
628 // this should tell the scheduler to not start any new goroutines
629 sched.stopwait = freezeStopWait
630 atomic.Store(&sched.gcwaiting, 1)
631 // this should stop running goroutines
632 if !preemptall() {
633 break // no running goroutines
634 }
635 usleep(1000)
636 }
637 // to be sure
638 usleep(1000)
639 preemptall()
640 usleep(1000)
641 }
642
643 func isscanstatus(status uint32) bool {
644 if status == _Gscan {
645 throw("isscanstatus: Bad status Gscan")
646 }
647 return status&_Gscan == _Gscan
648 }
649
650 // All reads and writes of g's status go through readgstatus, casgstatus
651 // castogscanstatus, casfrom_Gscanstatus.
652 //go:nosplit
653 func readgstatus(gp *g) uint32 {
654 return atomic.Load(&gp.atomicstatus)
655 }
656
657 // Ownership of gcscanvalid:
658 //
659 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
660 // then gp owns gp.gcscanvalid, and other goroutines must not modify it.
661 //
662 // Otherwise, a second goroutine can lock the scan state by setting _Gscan
663 // in the status bit and then modify gcscanvalid, and then unlock the scan state.
664 //
665 // Note that the first condition implies an exception to the second:
666 // if a second goroutine changes gp's status to _Grunning|_Gscan,
667 // that second goroutine still does not have the right to modify gcscanvalid.
668
669 // The Gscanstatuses are acting like locks and this releases them.
670 // If it proves to be a performance hit we should be able to make these
671 // simple atomic stores but for now we are going to throw if
672 // we see an inconsistent state.
673 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
674 success := false
675
676 // Check that transition is valid.
677 switch oldval {
678 default:
679 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
680 dumpgstatus(gp)
681 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
682 case _Gscanrunnable,
683 _Gscanwaiting,
684 _Gscanrunning,
685 _Gscansyscall:
686 if newval == oldval&^_Gscan {
687 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
688 }
689 }
690 if !success {
691 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
692 dumpgstatus(gp)
693 throw("casfrom_Gscanstatus: gp->status is not in scan state")
694 }
695 }
696
697 // This will return false if the gp is not in the expected status and the cas fails.
698 // This acts like a lock acquire while the casfromgstatus acts like a lock release.
699 func castogscanstatus(gp *g, oldval, newval uint32) bool {
700 switch oldval {
701 case _Grunnable,
702 _Grunning,
703 _Gwaiting,
704 _Gsyscall:
705 if newval == oldval|_Gscan {
706 return atomic.Cas(&gp.atomicstatus, oldval, newval)
707 }
708 }
709 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
710 throw("castogscanstatus")
711 panic("not reached")
712 }
713
714 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
715 // and casfrom_Gscanstatus instead.
716 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
717 // put it in the Gscan state is finished.
718 //go:nosplit
719 func casgstatus(gp *g, oldval, newval uint32) {
720 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
721 systemstack(func() {
722 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
723 throw("casgstatus: bad incoming values")
724 })
725 }
726
727 if oldval == _Grunning && gp.gcscanvalid {
728 // If oldvall == _Grunning, then the actual status must be
729 // _Grunning or _Grunning|_Gscan; either way,
730 // we own gp.gcscanvalid, so it's safe to read.
731 // gp.gcscanvalid must not be true when we are running.
732 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
733 throw("casgstatus")
734 }
735
736 // See http://golang.org/cl/21503 for justification of the yield delay.
737 const yieldDelay = 5 * 1000
738 var nextYield int64
739
740 // loop if gp->atomicstatus is in a scan state giving
741 // GC time to finish and change the state to oldval.
742 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
743 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
744 systemstack(func() {
745 throw("casgstatus: waiting for Gwaiting but is Grunnable")
746 })
747 }
748 // Help GC if needed.
749 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
750 // gp.preemptscan = false
751 // systemstack(func() {
752 // gcphasework(gp)
753 // })
754 // }
755 // But meanwhile just yield.
756 if i == 0 {
757 nextYield = nanotime() + yieldDelay
758 }
759 if nanotime() < nextYield {
760 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
761 procyield(1)
762 }
763 } else {
764 osyield()
765 nextYield = nanotime() + yieldDelay/2
766 }
767 }
768 if newval == _Grunning && gp.gcscanvalid {
769 // Run queueRescan on the system stack so it has more space.
770 systemstack(func() { queueRescan(gp) })
771 }
772 }
773
774 // scang blocks until gp's stack has been scanned.
775 // It might be scanned by scang or it might be scanned by the goroutine itself.
776 // Either way, the stack scan has completed when scang returns.
777 func scang(gp *g, gcw *gcWork) {
778 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
779 // Nothing is racing with us now, but gcscandone might be set to true left over
780 // from an earlier round of stack scanning (we scan twice per GC).
781 // We use gcscandone to record whether the scan has been done during this round.
782 // It is important that the scan happens exactly once: if called twice,
783 // the installation of stack barriers will detect the double scan and die.
784
785 gp.gcscandone = false
786
787 // See http://golang.org/cl/21503 for justification of the yield delay.
788 const yieldDelay = 10 * 1000
789 var nextYield int64
790
791 // Endeavor to get gcscandone set to true,
792 // either by doing the stack scan ourselves or by coercing gp to scan itself.
793 // gp.gcscandone can transition from false to true when we're not looking
794 // (if we asked for preemption), so any time we lock the status using
795 // castogscanstatus we have to double-check that the scan is still not done.
796 loop:
797 for i := 0; !gp.gcscandone; i++ {
798 switch s := readgstatus(gp); s {
799 default:
800 dumpgstatus(gp)
801 throw("stopg: invalid status")
802
803 case _Gdead:
804 // No stack.
805 gp.gcscandone = true
806 break loop
807
808 case _Gcopystack:
809 // Stack being switched. Go around again.
810
811 case _Grunnable, _Gsyscall, _Gwaiting:
812 // Claim goroutine by setting scan bit.
813 // Racing with execution or readying of gp.
814 // The scan bit keeps them from running
815 // the goroutine until we're done.
816 if castogscanstatus(gp, s, s|_Gscan) {
817 if gp.scanningself {
818 // Don't try to scan the stack
819 // if the goroutine is going to do
820 // it itself.
821 restartg(gp)
822 break
823 }
824 if !gp.gcscandone {
825 scanstack(gp, gcw)
826 gp.gcscandone = true
827 }
828 restartg(gp)
829 break loop
830 }
831
832 case _Gscanwaiting:
833 // newstack is doing a scan for us right now. Wait.
834
835 case _Gscanrunning:
836 // checkPreempt is scanning. Wait.
837
838 case _Grunning:
839 // Goroutine running. Try to preempt execution so it can scan itself.
840 // The preemption handler (in newstack) does the actual scan.
841
842 // Optimization: if there is already a pending preemption request
843 // (from the previous loop iteration), don't bother with the atomics.
844 if gp.preemptscan && gp.preempt {
845 break
846 }
847
848 // Ask for preemption and self scan.
849 if castogscanstatus(gp, _Grunning, _Gscanrunning) {
850 if !gp.gcscandone {
851 gp.preemptscan = true
852 gp.preempt = true
853 }
854 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
855 }
856 }
857
858 if i == 0 {
859 nextYield = nanotime() + yieldDelay
860 }
861 if nanotime() < nextYield {
862 procyield(10)
863 } else {
864 osyield()
865 nextYield = nanotime() + yieldDelay/2
866 }
867 }
868
869 gp.preemptscan = false // cancel scan request if no longer needed
870 }
871
872 // The GC requests that this routine be moved from a scanmumble state to a mumble state.
873 func restartg(gp *g) {
874 s := readgstatus(gp)
875 switch s {
876 default:
877 dumpgstatus(gp)
878 throw("restartg: unexpected status")
879
880 case _Gdead:
881 // ok
882
883 case _Gscanrunnable,
884 _Gscanwaiting,
885 _Gscansyscall:
886 casfrom_Gscanstatus(gp, s, s&^_Gscan)
887 }
888 }
889
890 // stopTheWorld stops all P's from executing goroutines, interrupting
891 // all goroutines at GC safe points and records reason as the reason
892 // for the stop. On return, only the current goroutine's P is running.
893 // stopTheWorld must not be called from a system stack and the caller
894 // must not hold worldsema. The caller must call startTheWorld when
895 // other P's should resume execution.
896 //
897 // stopTheWorld is safe for multiple goroutines to call at the
898 // same time. Each will execute its own stop, and the stops will
899 // be serialized.
900 //
901 // This is also used by routines that do stack dumps. If the system is
902 // in panic or being exited, this may not reliably stop all
903 // goroutines.
904 func stopTheWorld(reason string) {
905 semacquire(&worldsema, 0)
906 getg().m.preemptoff = reason
907 systemstack(stopTheWorldWithSema)
908 }
909
910 // startTheWorld undoes the effects of stopTheWorld.
911 func startTheWorld() {
912 systemstack(startTheWorldWithSema)
913 // worldsema must be held over startTheWorldWithSema to ensure
914 // gomaxprocs cannot change while worldsema is held.
915 semrelease(&worldsema)
916 getg().m.preemptoff = ""
917 }
918
919 // Holding worldsema grants an M the right to try to stop the world
920 // and prevents gomaxprocs from changing concurrently.
921 var worldsema uint32 = 1
922
923 // stopTheWorldWithSema is the core implementation of stopTheWorld.
924 // The caller is responsible for acquiring worldsema and disabling
925 // preemption first and then should stopTheWorldWithSema on the system
926 // stack:
927 //
928 // semacquire(&worldsema, 0)
929 // m.preemptoff = "reason"
930 // systemstack(stopTheWorldWithSema)
931 //
932 // When finished, the caller must either call startTheWorld or undo
933 // these three operations separately:
934 //
935 // m.preemptoff = ""
936 // systemstack(startTheWorldWithSema)
937 // semrelease(&worldsema)
938 //
939 // It is allowed to acquire worldsema once and then execute multiple
940 // startTheWorldWithSema/stopTheWorldWithSema pairs.
941 // Other P's are able to execute between successive calls to
942 // startTheWorldWithSema and stopTheWorldWithSema.
943 // Holding worldsema causes any other goroutines invoking
944 // stopTheWorld to block.
945 func stopTheWorldWithSema() {
946 _g_ := getg()
947
948 // If we hold a lock, then we won't be able to stop another M
949 // that is blocked trying to acquire the lock.
950 if _g_.m.locks > 0 {
951 throw("stopTheWorld: holding locks")
952 }
953
954 lock(&sched.lock)
955 sched.stopwait = gomaxprocs
956 atomic.Store(&sched.gcwaiting, 1)
957 preemptall()
958 // stop current P
959 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
960 sched.stopwait--
961 // try to retake all P's in Psyscall status
962 for i := 0; i < int(gomaxprocs); i++ {
963 p := allp[i]
964 s := p.status
965 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
966 if trace.enabled {
967 traceGoSysBlock(p)
968 traceProcStop(p)
969 }
970 p.syscalltick++
971 sched.stopwait--
972 }
973 }
974 // stop idle P's
975 for {
976 p := pidleget()
977 if p == nil {
978 break
979 }
980 p.status = _Pgcstop
981 sched.stopwait--
982 }
983 wait := sched.stopwait > 0
984 unlock(&sched.lock)
985
986 // wait for remaining P's to stop voluntarily
987 if wait {
988 for {
989 // wait for 100us, then try to re-preempt in case of any races
990 if notetsleep(&sched.stopnote, 100*1000) {
991 noteclear(&sched.stopnote)
992 break
993 }
994 preemptall()
995 }
996 }
997
998 // sanity checks
999 bad := ""
1000 if sched.stopwait != 0 {
1001 bad = "stopTheWorld: not stopped (stopwait != 0)"
1002 } else {
1003 for i := 0; i < int(gomaxprocs); i++ {
1004 p := allp[i]
1005 if p.status != _Pgcstop {
1006 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1007 }
1008 }
1009 }
1010 if atomic.Load(&freezing) != 0 {
1011 // Some other thread is panicking. This can cause the
1012 // sanity checks above to fail if the panic happens in
1013 // the signal handler on a stopped thread. Either way,
1014 // we should halt this thread.
1015 lock(&deadlock)
1016 lock(&deadlock)
1017 }
1018 if bad != "" {
1019 throw(bad)
1020 }
1021 }
1022
1023 func mhelpgc() {
1024 _g_ := getg()
1025 _g_.m.helpgc = -1
1026 }
1027
1028 func startTheWorldWithSema() {
1029 _g_ := getg()
1030
1031 _g_.m.locks++ // disable preemption because it can be holding p in a local var
1032 gp := netpoll(false) // non-blocking
1033 injectglist(gp)
1034 add := needaddgcproc()
1035 lock(&sched.lock)
1036
1037 procs := gomaxprocs
1038 if newprocs != 0 {
1039 procs = newprocs
1040 newprocs = 0
1041 }
1042 p1 := procresize(procs)
1043 sched.gcwaiting = 0
1044 if sched.sysmonwait != 0 {
1045 sched.sysmonwait = 0
1046 notewakeup(&sched.sysmonnote)
1047 }
1048 unlock(&sched.lock)
1049
1050 for p1 != nil {
1051 p := p1
1052 p1 = p1.link.ptr()
1053 if p.m != 0 {
1054 mp := p.m.ptr()
1055 p.m = 0
1056 if mp.nextp != 0 {
1057 throw("startTheWorld: inconsistent mp->nextp")
1058 }
1059 mp.nextp.set(p)
1060 notewakeup(&mp.park)
1061 } else {
1062 // Start M to run P. Do not start another M below.
1063 newm(nil, p)
1064 add = false
1065 }
1066 }
1067
1068 // Wakeup an additional proc in case we have excessive runnable goroutines
1069 // in local queues or in the global queue. If we don't, the proc will park itself.
1070 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
1071 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
1072 wakep()
1073 }
1074
1075 if add {
1076 // If GC could have used another helper proc, start one now,
1077 // in the hope that it will be available next time.
1078 // It would have been even better to start it before the collection,
1079 // but doing so requires allocating memory, so it's tricky to
1080 // coordinate. This lazy approach works out in practice:
1081 // we don't mind if the first couple gc rounds don't have quite
1082 // the maximum number of procs.
1083 newm(mhelpgc, nil)
1084 }
1085 _g_.m.locks--
1086 }
1087
1088 // First function run by a new goroutine.
1089 // This is passed to makecontext.
1090 func kickoff() {
1091 gp := getg()
1092
1093 if gp.traceback != nil {
1094 gtraceback(gp)
1095 }
1096
1097 fv := gp.entry
1098 param := gp.param
1099 gp.entry = nil
1100 gp.param = nil
1101 fv(param)
1102 goexit1()
1103 }
1104
1105 // This is called from mstart.
1106 func mstart1() {
1107 _g_ := getg()
1108
1109 if _g_ != _g_.m.g0 {
1110 throw("bad runtime·mstart")
1111 }
1112
1113 asminit()
1114 minit()
1115
1116 // Install signal handlers; after minit so that minit can
1117 // prepare the thread to be able to handle the signals.
1118 if _g_.m == &m0 {
1119 // Create an extra M for callbacks on threads not created by Go.
1120 if iscgo && !cgoHasExtraM {
1121 cgoHasExtraM = true
1122 newextram()
1123 }
1124 initsig(false)
1125 }
1126
1127 if fn := _g_.m.mstartfn; fn != nil {
1128 fn()
1129 }
1130
1131 if _g_.m.helpgc != 0 {
1132 _g_.m.helpgc = 0
1133 stopm()
1134 } else if _g_.m != &m0 {
1135 acquirep(_g_.m.nextp.ptr())
1136 _g_.m.nextp = 0
1137 }
1138 schedule()
1139 }
1140
1141 // forEachP calls fn(p) for every P p when p reaches a GC safe point.
1142 // If a P is currently executing code, this will bring the P to a GC
1143 // safe point and execute fn on that P. If the P is not executing code
1144 // (it is idle or in a syscall), this will call fn(p) directly while
1145 // preventing the P from exiting its state. This does not ensure that
1146 // fn will run on every CPU executing Go code, but it acts as a global
1147 // memory barrier. GC uses this as a "ragged barrier."
1148 //
1149 // The caller must hold worldsema.
1150 //
1151 //go:systemstack
1152 func forEachP(fn func(*p)) {
1153 mp := acquirem()
1154 _p_ := getg().m.p.ptr()
1155
1156 lock(&sched.lock)
1157 if sched.safePointWait != 0 {
1158 throw("forEachP: sched.safePointWait != 0")
1159 }
1160 sched.safePointWait = gomaxprocs - 1
1161 sched.safePointFn = fn
1162
1163 // Ask all Ps to run the safe point function.
1164 for _, p := range allp[:gomaxprocs] {
1165 if p != _p_ {
1166 atomic.Store(&p.runSafePointFn, 1)
1167 }
1168 }
1169 preemptall()
1170
1171 // Any P entering _Pidle or _Psyscall from now on will observe
1172 // p.runSafePointFn == 1 and will call runSafePointFn when
1173 // changing its status to _Pidle/_Psyscall.
1174
1175 // Run safe point function for all idle Ps. sched.pidle will
1176 // not change because we hold sched.lock.
1177 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1178 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1179 fn(p)
1180 sched.safePointWait--
1181 }
1182 }
1183
1184 wait := sched.safePointWait > 0
1185 unlock(&sched.lock)
1186
1187 // Run fn for the current P.
1188 fn(_p_)
1189
1190 // Force Ps currently in _Psyscall into _Pidle and hand them
1191 // off to induce safe point function execution.
1192 for i := 0; i < int(gomaxprocs); i++ {
1193 p := allp[i]
1194 s := p.status
1195 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1196 if trace.enabled {
1197 traceGoSysBlock(p)
1198 traceProcStop(p)
1199 }
1200 p.syscalltick++
1201 handoffp(p)
1202 }
1203 }
1204
1205 // Wait for remaining Ps to run fn.
1206 if wait {
1207 for {
1208 // Wait for 100us, then try to re-preempt in
1209 // case of any races.
1210 //
1211 // Requires system stack.
1212 if notetsleep(&sched.safePointNote, 100*1000) {
1213 noteclear(&sched.safePointNote)
1214 break
1215 }
1216 preemptall()
1217 }
1218 }
1219 if sched.safePointWait != 0 {
1220 throw("forEachP: not done")
1221 }
1222 for i := 0; i < int(gomaxprocs); i++ {
1223 p := allp[i]
1224 if p.runSafePointFn != 0 {
1225 throw("forEachP: P did not run fn")
1226 }
1227 }
1228
1229 lock(&sched.lock)
1230 sched.safePointFn = nil
1231 unlock(&sched.lock)
1232 releasem(mp)
1233 }
1234
1235 // runSafePointFn runs the safe point function, if any, for this P.
1236 // This should be called like
1237 //
1238 // if getg().m.p.runSafePointFn != 0 {
1239 // runSafePointFn()
1240 // }
1241 //
1242 // runSafePointFn must be checked on any transition in to _Pidle or
1243 // _Psyscall to avoid a race where forEachP sees that the P is running
1244 // just before the P goes into _Pidle/_Psyscall and neither forEachP
1245 // nor the P run the safe-point function.
1246 func runSafePointFn() {
1247 p := getg().m.p.ptr()
1248 // Resolve the race between forEachP running the safe-point
1249 // function on this P's behalf and this P running the
1250 // safe-point function directly.
1251 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1252 return
1253 }
1254 sched.safePointFn(p)
1255 lock(&sched.lock)
1256 sched.safePointWait--
1257 if sched.safePointWait == 0 {
1258 notewakeup(&sched.safePointNote)
1259 }
1260 unlock(&sched.lock)
1261 }
1262
1263 // Allocate a new m unassociated with any thread.
1264 // Can use p for allocation context if needed.
1265 // fn is recorded as the new m's m.mstartfn.
1266 //
1267 // This function is allowed to have write barriers even if the caller
1268 // isn't because it borrows _p_.
1269 //
1270 //go:yeswritebarrierrec
1271 func allocm(_p_ *p, fn func(), allocatestack bool) (mp *m, g0Stack unsafe.Pointer, g0StackSize uintptr) {
1272 _g_ := getg()
1273 _g_.m.locks++ // disable GC because it can be called from sysmon
1274 if _g_.m.p == 0 {
1275 acquirep(_p_) // temporarily borrow p for mallocs in this function
1276 }
1277 mp = new(m)
1278 mp.mstartfn = fn
1279 mcommoninit(mp)
1280
1281 mp.g0 = malg(allocatestack, false, &g0Stack, &g0StackSize)
1282 mp.g0.m = mp
1283
1284 if _p_ == _g_.m.p.ptr() {
1285 releasep()
1286 }
1287 _g_.m.locks--
1288
1289 return mp, g0Stack, g0StackSize
1290 }
1291
1292 // needm is called when a cgo callback happens on a
1293 // thread without an m (a thread not created by Go).
1294 // In this case, needm is expected to find an m to use
1295 // and return with m, g initialized correctly.
1296 // Since m and g are not set now (likely nil, but see below)
1297 // needm is limited in what routines it can call. In particular
1298 // it can only call nosplit functions (textflag 7) and cannot
1299 // do any scheduling that requires an m.
1300 //
1301 // In order to avoid needing heavy lifting here, we adopt
1302 // the following strategy: there is a stack of available m's
1303 // that can be stolen. Using compare-and-swap
1304 // to pop from the stack has ABA races, so we simulate
1305 // a lock by doing an exchange (via casp) to steal the stack
1306 // head and replace the top pointer with MLOCKED (1).
1307 // This serves as a simple spin lock that we can use even
1308 // without an m. The thread that locks the stack in this way
1309 // unlocks the stack by storing a valid stack head pointer.
1310 //
1311 // In order to make sure that there is always an m structure
1312 // available to be stolen, we maintain the invariant that there
1313 // is always one more than needed. At the beginning of the
1314 // program (if cgo is in use) the list is seeded with a single m.
1315 // If needm finds that it has taken the last m off the list, its job
1316 // is - once it has installed its own m so that it can do things like
1317 // allocate memory - to create a spare m and put it on the list.
1318 //
1319 // Each of these extra m's also has a g0 and a curg that are
1320 // pressed into service as the scheduling stack and current
1321 // goroutine for the duration of the cgo callback.
1322 //
1323 // When the callback is done with the m, it calls dropm to
1324 // put the m back on the list.
1325 //go:nosplit
1326 func needm(x byte) {
1327 if iscgo && !cgoHasExtraM {
1328 // Can happen if C/C++ code calls Go from a global ctor.
1329 // Can not throw, because scheduler is not initialized yet.
1330 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1331 exit(1)
1332 }
1333
1334 // Lock extra list, take head, unlock popped list.
1335 // nilokay=false is safe here because of the invariant above,
1336 // that the extra list always contains or will soon contain
1337 // at least one m.
1338 mp := lockextra(false)
1339
1340 // Set needextram when we've just emptied the list,
1341 // so that the eventual call into cgocallbackg will
1342 // allocate a new m for the extra list. We delay the
1343 // allocation until then so that it can be done
1344 // after exitsyscall makes sure it is okay to be
1345 // running at all (that is, there's no garbage collection
1346 // running right now).
1347 mp.needextram = mp.schedlink == 0
1348 unlockextra(mp.schedlink.ptr())
1349
1350 // Save and block signals before installing g.
1351 // Once g is installed, any incoming signals will try to execute,
1352 // but we won't have the sigaltstack settings and other data
1353 // set up appropriately until the end of minit, which will
1354 // unblock the signals. This is the same dance as when
1355 // starting a new m to run Go code via newosproc.
1356 msigsave(mp)
1357 sigblock()
1358
1359 // Install g (= m->curg).
1360 setg(mp.curg)
1361 atomic.Store(&mp.curg.atomicstatus, _Gsyscall)
1362 setGContext()
1363
1364 // Initialize this thread to use the m.
1365 asminit()
1366 minit()
1367 }
1368
1369 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1370
1371 // newextram allocates m's and puts them on the extra list.
1372 // It is called with a working local m, so that it can do things
1373 // like call schedlock and allocate.
1374 func newextram() {
1375 c := atomic.Xchg(&extraMWaiters, 0)
1376 if c > 0 {
1377 for i := uint32(0); i < c; i++ {
1378 oneNewExtraM()
1379 }
1380 } else {
1381 // Make sure there is at least one extra M.
1382 mp := lockextra(true)
1383 unlockextra(mp)
1384 if mp == nil {
1385 oneNewExtraM()
1386 }
1387 }
1388 }
1389
1390 // oneNewExtraM allocates an m and puts it on the extra list.
1391 func oneNewExtraM() {
1392 // Create extra goroutine locked to extra m.
1393 // The goroutine is the context in which the cgo callback will run.
1394 // The sched.pc will never be returned to, but setting it to
1395 // goexit makes clear to the traceback routines where
1396 // the goroutine stack ends.
1397 mp, g0SP, g0SPSize := allocm(nil, nil, true)
1398 gp := malg(true, false, nil, nil)
1399 gp.gcscanvalid = true // fresh G, so no dequeueRescan necessary
1400 gp.gcscandone = true
1401 gp.gcRescan = -1
1402
1403 // malg returns status as Gidle, change to Gdead before adding to allg
1404 // where GC will see it.
1405 // gccgo uses Gdead here, not Gsyscall, because the split
1406 // stack context is not initialized.
1407 casgstatus(gp, _Gidle, _Gdead)
1408 gp.m = mp
1409 mp.curg = gp
1410 mp.locked = _LockInternal
1411 mp.lockedg = gp
1412 gp.lockedm = mp
1413 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1414 // put on allg for garbage collector
1415 allgadd(gp)
1416
1417 // The context for gp will be set up in needm.
1418 // Here we need to set the context for g0.
1419 makeGContext(mp.g0, g0SP, g0SPSize)
1420
1421 // Add m to the extra list.
1422 mnext := lockextra(true)
1423 mp.schedlink.set(mnext)
1424 unlockextra(mp)
1425 }
1426
1427 // dropm is called when a cgo callback has called needm but is now
1428 // done with the callback and returning back into the non-Go thread.
1429 // It puts the current m back onto the extra list.
1430 //
1431 // The main expense here is the call to signalstack to release the
1432 // m's signal stack, and then the call to needm on the next callback
1433 // from this thread. It is tempting to try to save the m for next time,
1434 // which would eliminate both these costs, but there might not be
1435 // a next time: the current thread (which Go does not control) might exit.
1436 // If we saved the m for that thread, there would be an m leak each time
1437 // such a thread exited. Instead, we acquire and release an m on each
1438 // call. These should typically not be scheduling operations, just a few
1439 // atomics, so the cost should be small.
1440 //
1441 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
1442 // variable using pthread_key_create. Unlike the pthread keys we already use
1443 // on OS X, this dummy key would never be read by Go code. It would exist
1444 // only so that we could register at thread-exit-time destructor.
1445 // That destructor would put the m back onto the extra list.
1446 // This is purely a performance optimization. The current version,
1447 // in which dropm happens on each cgo call, is still correct too.
1448 // We may have to keep the current version on systems with cgo
1449 // but without pthreads, like Windows.
1450 //
1451 // CgocallBackDone calls this after releasing p, so no write barriers.
1452 //go:nowritebarrierrec
1453 func dropm() {
1454 // Clear m and g, and return m to the extra list.
1455 // After the call to setg we can only call nosplit functions
1456 // with no pointer manipulation.
1457 mp := getg().m
1458
1459 // Block signals before unminit.
1460 // Unminit unregisters the signal handling stack (but needs g on some systems).
1461 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
1462 // It's important not to try to handle a signal between those two steps.
1463 sigmask := mp.sigmask
1464 sigblock()
1465 unminit()
1466
1467 // gccgo sets the stack to Gdead here, because the splitstack
1468 // context is not initialized.
1469 atomic.Store(&mp.curg.atomicstatus, _Gdead)
1470 mp.curg.gcstack = 0
1471 mp.curg.gcnextsp = 0
1472
1473 mnext := lockextra(true)
1474 mp.schedlink.set(mnext)
1475
1476 setg(nil)
1477
1478 // Commit the release of mp.
1479 unlockextra(mp)
1480
1481 msigrestore(sigmask)
1482 }
1483
1484 // A helper function for EnsureDropM.
1485 func getm() uintptr {
1486 return uintptr(unsafe.Pointer(getg().m))
1487 }
1488
1489 var extram uintptr
1490 var extraMWaiters uint32
1491
1492 // lockextra locks the extra list and returns the list head.
1493 // The caller must unlock the list by storing a new list head
1494 // to extram. If nilokay is true, then lockextra will
1495 // return a nil list head if that's what it finds. If nilokay is false,
1496 // lockextra will keep waiting until the list head is no longer nil.
1497 //go:nosplit
1498 //go:nowritebarrierrec
1499 func lockextra(nilokay bool) *m {
1500 const locked = 1
1501
1502 incr := false
1503 for {
1504 old := atomic.Loaduintptr(&extram)
1505 if old == locked {
1506 yield := osyield
1507 yield()
1508 continue
1509 }
1510 if old == 0 && !nilokay {
1511 if !incr {
1512 // Add 1 to the number of threads
1513 // waiting for an M.
1514 // This is cleared by newextram.
1515 atomic.Xadd(&extraMWaiters, 1)
1516 incr = true
1517 }
1518 usleep(1)
1519 continue
1520 }
1521 if atomic.Casuintptr(&extram, old, locked) {
1522 return (*m)(unsafe.Pointer(old))
1523 }
1524 yield := osyield
1525 yield()
1526 continue
1527 }
1528 }
1529
1530 //go:nosplit
1531 //go:nowritebarrierrec
1532 func unlockextra(mp *m) {
1533 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
1534 }
1535
1536 // Create a new m. It will start off with a call to fn, or else the scheduler.
1537 // fn needs to be static and not a heap allocated closure.
1538 // May run with m.p==nil, so write barriers are not allowed.
1539 //go:nowritebarrierrec
1540 func newm(fn func(), _p_ *p) {
1541 mp, _, _ := allocm(_p_, fn, false)
1542 mp.nextp.set(_p_)
1543 mp.sigmask = initSigmask
1544 newosproc(mp)
1545 }
1546
1547 // Stops execution of the current m until new work is available.
1548 // Returns with acquired P.
1549 func stopm() {
1550 _g_ := getg()
1551
1552 if _g_.m.locks != 0 {
1553 throw("stopm holding locks")
1554 }
1555 if _g_.m.p != 0 {
1556 throw("stopm holding p")
1557 }
1558 if _g_.m.spinning {
1559 throw("stopm spinning")
1560 }
1561
1562 retry:
1563 lock(&sched.lock)
1564 mput(_g_.m)
1565 unlock(&sched.lock)
1566 notesleep(&_g_.m.park)
1567 noteclear(&_g_.m.park)
1568 if _g_.m.helpgc != 0 {
1569 gchelper()
1570 _g_.m.helpgc = 0
1571 _g_.m.mcache = nil
1572 _g_.m.p = 0
1573 goto retry
1574 }
1575 acquirep(_g_.m.nextp.ptr())
1576 _g_.m.nextp = 0
1577 }
1578
1579 func mspinning() {
1580 // startm's caller incremented nmspinning. Set the new M's spinning.
1581 getg().m.spinning = true
1582 }
1583
1584 // Schedules some M to run the p (creates an M if necessary).
1585 // If p==nil, tries to get an idle P, if no idle P's does nothing.
1586 // May run with m.p==nil, so write barriers are not allowed.
1587 // If spinning is set, the caller has incremented nmspinning and startm will
1588 // either decrement nmspinning or set m.spinning in the newly started M.
1589 //go:nowritebarrierrec
1590 func startm(_p_ *p, spinning bool) {
1591 lock(&sched.lock)
1592 if _p_ == nil {
1593 _p_ = pidleget()
1594 if _p_ == nil {
1595 unlock(&sched.lock)
1596 if spinning {
1597 // The caller incremented nmspinning, but there are no idle Ps,
1598 // so it's okay to just undo the increment and give up.
1599 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1600 throw("startm: negative nmspinning")
1601 }
1602 }
1603 return
1604 }
1605 }
1606 mp := mget()
1607 unlock(&sched.lock)
1608 if mp == nil {
1609 var fn func()
1610 if spinning {
1611 // The caller incremented nmspinning, so set m.spinning in the new M.
1612 fn = mspinning
1613 }
1614 newm(fn, _p_)
1615 return
1616 }
1617 if mp.spinning {
1618 throw("startm: m is spinning")
1619 }
1620 if mp.nextp != 0 {
1621 throw("startm: m has p")
1622 }
1623 if spinning && !runqempty(_p_) {
1624 throw("startm: p has runnable gs")
1625 }
1626 // The caller incremented nmspinning, so set m.spinning in the new M.
1627 mp.spinning = spinning
1628 mp.nextp.set(_p_)
1629 notewakeup(&mp.park)
1630 }
1631
1632 // Hands off P from syscall or locked M.
1633 // Always runs without a P, so write barriers are not allowed.
1634 //go:nowritebarrierrec
1635 func handoffp(_p_ *p) {
1636 // handoffp must start an M in any situation where
1637 // findrunnable would return a G to run on _p_.
1638
1639 // if it has local work, start it straight away
1640 if !runqempty(_p_) || sched.runqsize != 0 {
1641 startm(_p_, false)
1642 return
1643 }
1644 // if it has GC work, start it straight away
1645 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
1646 startm(_p_, false)
1647 return
1648 }
1649 // no local work, check that there are no spinning/idle M's,
1650 // otherwise our help is not required
1651 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
1652 startm(_p_, true)
1653 return
1654 }
1655 lock(&sched.lock)
1656 if sched.gcwaiting != 0 {
1657 _p_.status = _Pgcstop
1658 sched.stopwait--
1659 if sched.stopwait == 0 {
1660 notewakeup(&sched.stopnote)
1661 }
1662 unlock(&sched.lock)
1663 return
1664 }
1665 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
1666 sched.safePointFn(_p_)
1667 sched.safePointWait--
1668 if sched.safePointWait == 0 {
1669 notewakeup(&sched.safePointNote)
1670 }
1671 }
1672 if sched.runqsize != 0 {
1673 unlock(&sched.lock)
1674 startm(_p_, false)
1675 return
1676 }
1677 // If this is the last running P and nobody is polling network,
1678 // need to wakeup another M to poll network.
1679 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
1680 unlock(&sched.lock)
1681 startm(_p_, false)
1682 return
1683 }
1684 pidleput(_p_)
1685 unlock(&sched.lock)
1686 }
1687
1688 // Tries to add one more P to execute G's.
1689 // Called when a G is made runnable (newproc, ready).
1690 func wakep() {
1691 // be conservative about spinning threads
1692 if !atomic.Cas(&sched.nmspinning, 0, 1) {
1693 return
1694 }
1695 startm(nil, true)
1696 }
1697
1698 // Stops execution of the current m that is locked to a g until the g is runnable again.
1699 // Returns with acquired P.
1700 func stoplockedm() {
1701 _g_ := getg()
1702
1703 if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
1704 throw("stoplockedm: inconsistent locking")
1705 }
1706 if _g_.m.p != 0 {
1707 // Schedule another M to run this p.
1708 _p_ := releasep()
1709 handoffp(_p_)
1710 }
1711 incidlelocked(1)
1712 // Wait until another thread schedules lockedg again.
1713 notesleep(&_g_.m.park)
1714 noteclear(&_g_.m.park)
1715 status := readgstatus(_g_.m.lockedg)
1716 if status&^_Gscan != _Grunnable {
1717 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
1718 dumpgstatus(_g_)
1719 throw("stoplockedm: not runnable")
1720 }
1721 acquirep(_g_.m.nextp.ptr())
1722 _g_.m.nextp = 0
1723 }
1724
1725 // Schedules the locked m to run the locked gp.
1726 // May run during STW, so write barriers are not allowed.
1727 //go:nowritebarrierrec
1728 func startlockedm(gp *g) {
1729 _g_ := getg()
1730
1731 mp := gp.lockedm
1732 if mp == _g_.m {
1733 throw("startlockedm: locked to me")
1734 }
1735 if mp.nextp != 0 {
1736 throw("startlockedm: m has p")
1737 }
1738 // directly handoff current P to the locked m
1739 incidlelocked(-1)
1740 _p_ := releasep()
1741 mp.nextp.set(_p_)
1742 notewakeup(&mp.park)
1743 stopm()
1744 }
1745
1746 // Stops the current m for stopTheWorld.
1747 // Returns when the world is restarted.
1748 func gcstopm() {
1749 _g_ := getg()
1750
1751 if sched.gcwaiting == 0 {
1752 throw("gcstopm: not waiting for gc")
1753 }
1754 if _g_.m.spinning {
1755 _g_.m.spinning = false
1756 // OK to just drop nmspinning here,
1757 // startTheWorld will unpark threads as necessary.
1758 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1759 throw("gcstopm: negative nmspinning")
1760 }
1761 }
1762 _p_ := releasep()
1763 lock(&sched.lock)
1764 _p_.status = _Pgcstop
1765 sched.stopwait--
1766 if sched.stopwait == 0 {
1767 notewakeup(&sched.stopnote)
1768 }
1769 unlock(&sched.lock)
1770 stopm()
1771 }
1772
1773 // Schedules gp to run on the current M.
1774 // If inheritTime is true, gp inherits the remaining time in the
1775 // current time slice. Otherwise, it starts a new time slice.
1776 // Never returns.
1777 //
1778 // Write barriers are allowed because this is called immediately after
1779 // acquiring a P in several places.
1780 //
1781 //go:yeswritebarrierrec
1782 func execute(gp *g, inheritTime bool) {
1783 _g_ := getg()
1784
1785 casgstatus(gp, _Grunnable, _Grunning)
1786 gp.waitsince = 0
1787 gp.preempt = false
1788 if !inheritTime {
1789 _g_.m.p.ptr().schedtick++
1790 }
1791 _g_.m.curg = gp
1792 gp.m = _g_.m
1793
1794 // Check whether the profiler needs to be turned on or off.
1795 hz := sched.profilehz
1796 if _g_.m.profilehz != hz {
1797 resetcpuprofiler(hz)
1798 }
1799
1800 if trace.enabled {
1801 // GoSysExit has to happen when we have a P, but before GoStart.
1802 // So we emit it here.
1803 if gp.syscallsp != 0 && gp.sysblocktraced {
1804 traceGoSysExit(gp.sysexitticks)
1805 }
1806 traceGoStart()
1807 }
1808
1809 gogo(gp)
1810 }
1811
1812 // Finds a runnable goroutine to execute.
1813 // Tries to steal from other P's, get g from global queue, poll network.
1814 func findrunnable() (gp *g, inheritTime bool) {
1815 _g_ := getg()
1816
1817 // The conditions here and in handoffp must agree: if
1818 // findrunnable would return a G to run, handoffp must start
1819 // an M.
1820
1821 top:
1822 _p_ := _g_.m.p.ptr()
1823 if sched.gcwaiting != 0 {
1824 gcstopm()
1825 goto top
1826 }
1827 if _p_.runSafePointFn != 0 {
1828 runSafePointFn()
1829 }
1830 if fingwait && fingwake {
1831 if gp := wakefing(); gp != nil {
1832 ready(gp, 0, true)
1833 }
1834 }
1835
1836 // local runq
1837 if gp, inheritTime := runqget(_p_); gp != nil {
1838 return gp, inheritTime
1839 }
1840
1841 // global runq
1842 if sched.runqsize != 0 {
1843 lock(&sched.lock)
1844 gp := globrunqget(_p_, 0)
1845 unlock(&sched.lock)
1846 if gp != nil {
1847 return gp, false
1848 }
1849 }
1850
1851 // Poll network.
1852 // This netpoll is only an optimization before we resort to stealing.
1853 // We can safely skip it if there a thread blocked in netpoll already.
1854 // If there is any kind of logical race with that blocked thread
1855 // (e.g. it has already returned from netpoll, but does not set lastpoll yet),
1856 // this thread will do blocking netpoll below anyway.
1857 if netpollinited() && sched.lastpoll != 0 {
1858 if gp := netpoll(false); gp != nil { // non-blocking
1859 // netpoll returns list of goroutines linked by schedlink.
1860 injectglist(gp.schedlink.ptr())
1861 casgstatus(gp, _Gwaiting, _Grunnable)
1862 if trace.enabled {
1863 traceGoUnpark(gp, 0)
1864 }
1865 return gp, false
1866 }
1867 }
1868
1869 // Steal work from other P's.
1870 procs := uint32(gomaxprocs)
1871 if atomic.Load(&sched.npidle) == procs-1 {
1872 // Either GOMAXPROCS=1 or everybody, except for us, is idle already.
1873 // New work can appear from returning syscall/cgocall, network or timers.
1874 // Neither of that submits to local run queues, so no point in stealing.
1875 goto stop
1876 }
1877 // If number of spinning M's >= number of busy P's, block.
1878 // This is necessary to prevent excessive CPU consumption
1879 // when GOMAXPROCS>>1 but the program parallelism is low.
1880 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
1881 goto stop
1882 }
1883 if !_g_.m.spinning {
1884 _g_.m.spinning = true
1885 atomic.Xadd(&sched.nmspinning, 1)
1886 }
1887 for i := 0; i < 4; i++ {
1888 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
1889 if sched.gcwaiting != 0 {
1890 goto top
1891 }
1892 stealRunNextG := i > 2 // first look for ready queues with more than 1 g
1893 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil {
1894 return gp, false
1895 }
1896 }
1897 }
1898
1899 stop:
1900
1901 // We have nothing to do. If we're in the GC mark phase, can
1902 // safely scan and blacken objects, and have work to do, run
1903 // idle-time marking rather than give up the P.
1904 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
1905 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
1906 gp := _p_.gcBgMarkWorker.ptr()
1907 casgstatus(gp, _Gwaiting, _Grunnable)
1908 if trace.enabled {
1909 traceGoUnpark(gp, 0)
1910 }
1911 return gp, false
1912 }
1913
1914 // return P and block
1915 lock(&sched.lock)
1916 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
1917 unlock(&sched.lock)
1918 goto top
1919 }
1920 if sched.runqsize != 0 {
1921 gp := globrunqget(_p_, 0)
1922 unlock(&sched.lock)
1923 return gp, false
1924 }
1925 if releasep() != _p_ {
1926 throw("findrunnable: wrong p")
1927 }
1928 pidleput(_p_)
1929 unlock(&sched.lock)
1930
1931 // Delicate dance: thread transitions from spinning to non-spinning state,
1932 // potentially concurrently with submission of new goroutines. We must
1933 // drop nmspinning first and then check all per-P queues again (with
1934 // #StoreLoad memory barrier in between). If we do it the other way around,
1935 // another thread can submit a goroutine after we've checked all run queues
1936 // but before we drop nmspinning; as the result nobody will unpark a thread
1937 // to run the goroutine.
1938 // If we discover new work below, we need to restore m.spinning as a signal
1939 // for resetspinning to unpark a new worker thread (because there can be more
1940 // than one starving goroutine). However, if after discovering new work
1941 // we also observe no idle Ps, it is OK to just park the current thread:
1942 // the system is fully loaded so no spinning threads are required.
1943 // Also see "Worker thread parking/unparking" comment at the top of the file.
1944 wasSpinning := _g_.m.spinning
1945 if _g_.m.spinning {
1946 _g_.m.spinning = false
1947 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1948 throw("findrunnable: negative nmspinning")
1949 }
1950 }
1951
1952 // check all runqueues once again
1953 for i := 0; i < int(gomaxprocs); i++ {
1954 _p_ := allp[i]
1955 if _p_ != nil && !runqempty(_p_) {
1956 lock(&sched.lock)
1957 _p_ = pidleget()
1958 unlock(&sched.lock)
1959 if _p_ != nil {
1960 acquirep(_p_)
1961 if wasSpinning {
1962 _g_.m.spinning = true
1963 atomic.Xadd(&sched.nmspinning, 1)
1964 }
1965 goto top
1966 }
1967 break
1968 }
1969 }
1970
1971 // Check for idle-priority GC work again.
1972 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
1973 lock(&sched.lock)
1974 _p_ = pidleget()
1975 if _p_ != nil && _p_.gcBgMarkWorker == 0 {
1976 pidleput(_p_)
1977 _p_ = nil
1978 }
1979 unlock(&sched.lock)
1980 if _p_ != nil {
1981 acquirep(_p_)
1982 if wasSpinning {
1983 _g_.m.spinning = true
1984 atomic.Xadd(&sched.nmspinning, 1)
1985 }
1986 // Go back to idle GC check.
1987 goto stop
1988 }
1989 }
1990
1991 // poll network
1992 if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
1993 if _g_.m.p != 0 {
1994 throw("findrunnable: netpoll with p")
1995 }
1996 if _g_.m.spinning {
1997 throw("findrunnable: netpoll with spinning")
1998 }
1999 gp := netpoll(true) // block until new work is available
2000 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2001 if gp != nil {
2002 lock(&sched.lock)
2003 _p_ = pidleget()
2004 unlock(&sched.lock)
2005 if _p_ != nil {
2006 acquirep(_p_)
2007 injectglist(gp.schedlink.ptr())
2008 casgstatus(gp, _Gwaiting, _Grunnable)
2009 if trace.enabled {
2010 traceGoUnpark(gp, 0)
2011 }
2012 return gp, false
2013 }
2014 injectglist(gp)
2015 }
2016 }
2017 stopm()
2018 goto top
2019 }
2020
2021 // pollWork returns true if there is non-background work this P could
2022 // be doing. This is a fairly lightweight check to be used for
2023 // background work loops, like idle GC. It checks a subset of the
2024 // conditions checked by the actual scheduler.
2025 func pollWork() bool {
2026 if sched.runqsize != 0 {
2027 return true
2028 }
2029 p := getg().m.p.ptr()
2030 if !runqempty(p) {
2031 return true
2032 }
2033 if netpollinited() && sched.lastpoll != 0 {
2034 if gp := netpoll(false); gp != nil {
2035 injectglist(gp)
2036 return true
2037 }
2038 }
2039 return false
2040 }
2041
2042 func resetspinning() {
2043 _g_ := getg()
2044 if !_g_.m.spinning {
2045 throw("resetspinning: not a spinning m")
2046 }
2047 _g_.m.spinning = false
2048 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
2049 if int32(nmspinning) < 0 {
2050 throw("findrunnable: negative nmspinning")
2051 }
2052 // M wakeup policy is deliberately somewhat conservative, so check if we
2053 // need to wakeup another P here. See "Worker thread parking/unparking"
2054 // comment at the top of the file for details.
2055 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
2056 wakep()
2057 }
2058 }
2059
2060 // Injects the list of runnable G's into the scheduler.
2061 // Can run concurrently with GC.
2062 func injectglist(glist *g) {
2063 if glist == nil {
2064 return
2065 }
2066 if trace.enabled {
2067 for gp := glist; gp != nil; gp = gp.schedlink.ptr() {
2068 traceGoUnpark(gp, 0)
2069 }
2070 }
2071 lock(&sched.lock)
2072 var n int
2073 for n = 0; glist != nil; n++ {
2074 gp := glist
2075 glist = gp.schedlink.ptr()
2076 casgstatus(gp, _Gwaiting, _Grunnable)
2077 globrunqput(gp)
2078 }
2079 unlock(&sched.lock)
2080 for ; n != 0 && sched.npidle != 0; n-- {
2081 startm(nil, false)
2082 }
2083 }
2084
2085 // One round of scheduler: find a runnable goroutine and execute it.
2086 // Never returns.
2087 func schedule() {
2088 _g_ := getg()
2089
2090 if _g_.m.locks != 0 {
2091 throw("schedule: holding locks")
2092 }
2093
2094 if _g_.m.lockedg != nil {
2095 stoplockedm()
2096 execute(_g_.m.lockedg, false) // Never returns.
2097 }
2098
2099 top:
2100 if sched.gcwaiting != 0 {
2101 gcstopm()
2102 goto top
2103 }
2104 if _g_.m.p.ptr().runSafePointFn != 0 {
2105 runSafePointFn()
2106 }
2107
2108 var gp *g
2109 var inheritTime bool
2110 if trace.enabled || trace.shutdown {
2111 gp = traceReader()
2112 if gp != nil {
2113 casgstatus(gp, _Gwaiting, _Grunnable)
2114 traceGoUnpark(gp, 0)
2115 }
2116 }
2117 if gp == nil && gcBlackenEnabled != 0 {
2118 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
2119 }
2120 if gp == nil {
2121 // Check the global runnable queue once in a while to ensure fairness.
2122 // Otherwise two goroutines can completely occupy the local runqueue
2123 // by constantly respawning each other.
2124 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
2125 lock(&sched.lock)
2126 gp = globrunqget(_g_.m.p.ptr(), 1)
2127 unlock(&sched.lock)
2128 }
2129 }
2130 if gp == nil {
2131 gp, inheritTime = runqget(_g_.m.p.ptr())
2132 if gp != nil && _g_.m.spinning {
2133 throw("schedule: spinning with local work")
2134 }
2135
2136 // Because gccgo does not implement preemption as a stack check,
2137 // we need to check for preemption here for fairness.
2138 // Otherwise goroutines on the local queue may starve
2139 // goroutines on the global queue.
2140 // Since we preempt by storing the goroutine on the global
2141 // queue, this is the only place we need to check preempt.
2142 // This does not call checkPreempt because gp is not running.
2143 if gp != nil && gp.preempt {
2144 gp.preempt = false
2145 lock(&sched.lock)
2146 globrunqput(gp)
2147 unlock(&sched.lock)
2148 goto top
2149 }
2150 }
2151 if gp == nil {
2152 gp, inheritTime = findrunnable() // blocks until work is available
2153 }
2154
2155 // This thread is going to run a goroutine and is not spinning anymore,
2156 // so if it was marked as spinning we need to reset it now and potentially
2157 // start a new spinning M.
2158 if _g_.m.spinning {
2159 resetspinning()
2160 }
2161
2162 if gp.lockedm != nil {
2163 // Hands off own p to the locked m,
2164 // then blocks waiting for a new p.
2165 startlockedm(gp)
2166 goto top
2167 }
2168
2169 execute(gp, inheritTime)
2170 }
2171
2172 // dropg removes the association between m and the current goroutine m->curg (gp for short).
2173 // Typically a caller sets gp's status away from Grunning and then
2174 // immediately calls dropg to finish the job. The caller is also responsible
2175 // for arranging that gp will be restarted using ready at an
2176 // appropriate time. After calling dropg and arranging for gp to be
2177 // readied later, the caller can do other work but eventually should
2178 // call schedule to restart the scheduling of goroutines on this m.
2179 func dropg() {
2180 _g_ := getg()
2181
2182 setMNoWB(&_g_.m.curg.m, nil)
2183 setGNoWB(&_g_.m.curg, nil)
2184 }
2185
2186 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
2187 unlock((*mutex)(lock))
2188 return true
2189 }
2190
2191 // park continuation on g0.
2192 func park_m(gp *g) {
2193 _g_ := getg()
2194
2195 if trace.enabled {
2196 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp)
2197 }
2198
2199 casgstatus(gp, _Grunning, _Gwaiting)
2200 dropg()
2201
2202 if _g_.m.waitunlockf != nil {
2203 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
2204 ok := fn(gp, _g_.m.waitlock)
2205 _g_.m.waitunlockf = nil
2206 _g_.m.waitlock = nil
2207 if !ok {
2208 if trace.enabled {
2209 traceGoUnpark(gp, 2)
2210 }
2211 casgstatus(gp, _Gwaiting, _Grunnable)
2212 execute(gp, true) // Schedule it back, never returns.
2213 }
2214 }
2215 schedule()
2216 }
2217
2218 func goschedImpl(gp *g) {
2219 status := readgstatus(gp)
2220 if status&^_Gscan != _Grunning {
2221 dumpgstatus(gp)
2222 throw("bad g status")
2223 }
2224 casgstatus(gp, _Grunning, _Grunnable)
2225 dropg()
2226 lock(&sched.lock)
2227 globrunqput(gp)
2228 unlock(&sched.lock)
2229
2230 schedule()
2231 }
2232
2233 // Gosched continuation on g0.
2234 func gosched_m(gp *g) {
2235 if trace.enabled {
2236 traceGoSched()
2237 }
2238 goschedImpl(gp)
2239 }
2240
2241 func gopreempt_m(gp *g) {
2242 if trace.enabled {
2243 traceGoPreempt()
2244 }
2245 goschedImpl(gp)
2246 }
2247
2248 // Finishes execution of the current goroutine.
2249 func goexit1() {
2250 if trace.enabled {
2251 traceGoEnd()
2252 }
2253 mcall(goexit0)
2254 }
2255
2256 // goexit continuation on g0.
2257 func goexit0(gp *g) {
2258 _g_ := getg()
2259
2260 casgstatus(gp, _Grunning, _Gdead)
2261 if isSystemGoroutine(gp) {
2262 atomic.Xadd(&sched.ngsys, -1)
2263 gp.isSystemGoroutine = false
2264 }
2265 gp.m = nil
2266 gp.lockedm = nil
2267 _g_.m.lockedg = nil
2268 gp.entry = nil
2269 gp.paniconfault = false
2270 gp._defer = nil // should be true already but just in case.
2271 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
2272 gp.writebuf = nil
2273 gp.waitreason = ""
2274 gp.param = nil
2275
2276 // Note that gp's stack scan is now "valid" because it has no
2277 // stack. We could dequeueRescan, but that takes a lock and
2278 // isn't really necessary.
2279 gp.gcscanvalid = true
2280 dropg()
2281
2282 if _g_.m.locked&^_LockExternal != 0 {
2283 print("invalid m->locked = ", _g_.m.locked, "\n")
2284 throw("internal lockOSThread error")
2285 }
2286 _g_.m.locked = 0
2287 gfput(_g_.m.p.ptr(), gp)
2288 schedule()
2289 }
2290
2291 // The goroutine g is about to enter a system call.
2292 // Record that it's not using the cpu anymore.
2293 // This is called only from the go syscall library and cgocall,
2294 // not from the low-level system calls used by the runtime.
2295 //
2296 // The entersyscall function is written in C, so that it can save the
2297 // current register context so that the GC will see them.
2298 // It calls reentersyscall.
2299 //
2300 // Syscall tracing:
2301 // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
2302 // If the syscall does not block, that is it, we do not emit any other events.
2303 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
2304 // when syscall returns we emit traceGoSysExit and when the goroutine starts running
2305 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
2306 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
2307 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
2308 // whoever emits traceGoSysBlock increments p.syscalltick afterwards;
2309 // and we wait for the increment before emitting traceGoSysExit.
2310 // Note that the increment is done even if tracing is not enabled,
2311 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
2312 //
2313 //go:nosplit
2314 //go:noinline
2315 func reentersyscall(pc, sp uintptr) {
2316 _g_ := getg()
2317
2318 // Disable preemption because during this function g is in Gsyscall status,
2319 // but can have inconsistent g->sched, do not let GC observe it.
2320 _g_.m.locks++
2321
2322 _g_.syscallsp = sp
2323 _g_.syscallpc = pc
2324 casgstatus(_g_, _Grunning, _Gsyscall)
2325
2326 if trace.enabled {
2327 systemstack(traceGoSysCall)
2328 }
2329
2330 if atomic.Load(&sched.sysmonwait) != 0 {
2331 systemstack(entersyscall_sysmon)
2332 }
2333
2334 if _g_.m.p.ptr().runSafePointFn != 0 {
2335 // runSafePointFn may stack split if run on this stack
2336 systemstack(runSafePointFn)
2337 }
2338
2339 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2340 _g_.sysblocktraced = true
2341 _g_.m.mcache = nil
2342 _g_.m.p.ptr().m = 0
2343 atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
2344 if sched.gcwaiting != 0 {
2345 systemstack(entersyscall_gcwait)
2346 }
2347
2348 _g_.m.locks--
2349 }
2350
2351 func entersyscall_sysmon() {
2352 lock(&sched.lock)
2353 if atomic.Load(&sched.sysmonwait) != 0 {
2354 atomic.Store(&sched.sysmonwait, 0)
2355 notewakeup(&sched.sysmonnote)
2356 }
2357 unlock(&sched.lock)
2358 }
2359
2360 func entersyscall_gcwait() {
2361 _g_ := getg()
2362 _p_ := _g_.m.p.ptr()
2363
2364 lock(&sched.lock)
2365 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
2366 if trace.enabled {
2367 traceGoSysBlock(_p_)
2368 traceProcStop(_p_)
2369 }
2370 _p_.syscalltick++
2371 if sched.stopwait--; sched.stopwait == 0 {
2372 notewakeup(&sched.stopnote)
2373 }
2374 }
2375 unlock(&sched.lock)
2376 }
2377
2378 // The same as reentersyscall(), but with a hint that the syscall is blocking.
2379 //go:nosplit
2380 func reentersyscallblock(pc, sp uintptr) {
2381 _g_ := getg()
2382
2383 _g_.m.locks++ // see comment in entersyscall
2384 _g_.throwsplit = true
2385 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2386 _g_.sysblocktraced = true
2387 _g_.m.p.ptr().syscalltick++
2388
2389 // Leave SP around for GC and traceback.
2390 _g_.syscallsp = sp
2391 _g_.syscallpc = pc
2392 casgstatus(_g_, _Grunning, _Gsyscall)
2393 systemstack(entersyscallblock_handoff)
2394
2395 _g_.m.locks--
2396 }
2397
2398 func entersyscallblock_handoff() {
2399 if trace.enabled {
2400 traceGoSysCall()
2401 traceGoSysBlock(getg().m.p.ptr())
2402 }
2403 handoffp(releasep())
2404 }
2405
2406 // The goroutine g exited its system call.
2407 // Arrange for it to run on a cpu again.
2408 // This is called only from the go syscall library, not
2409 // from the low-level system calls used by the runtime.
2410 //
2411 // Write barriers are not allowed because our P may have been stolen.
2412 //
2413 //go:nosplit
2414 //go:nowritebarrierrec
2415 func exitsyscall(dummy int32) {
2416 _g_ := getg()
2417
2418 _g_.m.locks++ // see comment in entersyscall
2419
2420 _g_.waitsince = 0
2421 oldp := _g_.m.p.ptr()
2422 if exitsyscallfast() {
2423 if _g_.m.mcache == nil {
2424 throw("lost mcache")
2425 }
2426 if trace.enabled {
2427 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2428 systemstack(traceGoStart)
2429 }
2430 }
2431 // There's a cpu for us, so we can run.
2432 _g_.m.p.ptr().syscalltick++
2433 // We need to cas the status and scan before resuming...
2434 casgstatus(_g_, _Gsyscall, _Grunning)
2435
2436 exitsyscallclear(_g_)
2437 _g_.m.locks--
2438 _g_.throwsplit = false
2439 return
2440 }
2441
2442 _g_.sysexitticks = 0
2443 if trace.enabled {
2444 // Wait till traceGoSysBlock event is emitted.
2445 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2446 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
2447 osyield()
2448 }
2449 // We can't trace syscall exit right now because we don't have a P.
2450 // Tracing code can invoke write barriers that cannot run without a P.
2451 // So instead we remember the syscall exit time and emit the event
2452 // in execute when we have a P.
2453 _g_.sysexitticks = cputicks()
2454 }
2455
2456 _g_.m.locks--
2457
2458 // Call the scheduler.
2459 mcall(exitsyscall0)
2460
2461 if _g_.m.mcache == nil {
2462 throw("lost mcache")
2463 }
2464
2465 // Scheduler returned, so we're allowed to run now.
2466 // Delete the syscallsp information that we left for
2467 // the garbage collector during the system call.
2468 // Must wait until now because until gosched returns
2469 // we don't know for sure that the garbage collector
2470 // is not running.
2471 exitsyscallclear(_g_)
2472
2473 _g_.m.p.ptr().syscalltick++
2474 _g_.throwsplit = false
2475 }
2476
2477 //go:nosplit
2478 func exitsyscallfast() bool {
2479 _g_ := getg()
2480
2481 // Freezetheworld sets stopwait but does not retake P's.
2482 if sched.stopwait == freezeStopWait {
2483 _g_.m.mcache = nil
2484 _g_.m.p = 0
2485 return false
2486 }
2487
2488 // Try to re-acquire the last P.
2489 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
2490 // There's a cpu for us, so we can run.
2491 exitsyscallfast_reacquired()
2492 return true
2493 }
2494
2495 // Try to get any other idle P.
2496 oldp := _g_.m.p.ptr()
2497 _g_.m.mcache = nil
2498 _g_.m.p = 0
2499 if sched.pidle != 0 {
2500 var ok bool
2501 systemstack(func() {
2502 ok = exitsyscallfast_pidle()
2503 if ok && trace.enabled {
2504 if oldp != nil {
2505 // Wait till traceGoSysBlock event is emitted.
2506 // This ensures consistency of the trace (the goroutine is started after it is blocked).
2507 for oldp.syscalltick == _g_.m.syscalltick {
2508 osyield()
2509 }
2510 }
2511 traceGoSysExit(0)
2512 }
2513 })
2514 if ok {
2515 return true
2516 }
2517 }
2518 return false
2519 }
2520
2521 // exitsyscallfast_reacquired is the exitsyscall path on which this G
2522 // has successfully reacquired the P it was running on before the
2523 // syscall.
2524 //
2525 // This function is allowed to have write barriers because exitsyscall
2526 // has acquired a P at this point.
2527 //
2528 //go:yeswritebarrierrec
2529 //go:nosplit
2530 func exitsyscallfast_reacquired() {
2531 _g_ := getg()
2532 _g_.m.mcache = _g_.m.p.ptr().mcache
2533 _g_.m.p.ptr().m.set(_g_.m)
2534 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2535 if trace.enabled {
2536 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
2537 // traceGoSysBlock for this syscall was already emitted,
2538 // but here we effectively retake the p from the new syscall running on the same p.
2539 systemstack(func() {
2540 // Denote blocking of the new syscall.
2541 traceGoSysBlock(_g_.m.p.ptr())
2542 // Denote completion of the current syscall.
2543 traceGoSysExit(0)
2544 })
2545 }
2546 _g_.m.p.ptr().syscalltick++
2547 }
2548 }
2549
2550 func exitsyscallfast_pidle() bool {
2551 lock(&sched.lock)
2552 _p_ := pidleget()
2553 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
2554 atomic.Store(&sched.sysmonwait, 0)
2555 notewakeup(&sched.sysmonnote)
2556 }
2557 unlock(&sched.lock)
2558 if _p_ != nil {
2559 acquirep(_p_)
2560 return true
2561 }
2562 return false
2563 }
2564
2565 // exitsyscall slow path on g0.
2566 // Failed to acquire P, enqueue gp as runnable.
2567 //
2568 //go:nowritebarrierrec
2569 func exitsyscall0(gp *g) {
2570 _g_ := getg()
2571
2572 casgstatus(gp, _Gsyscall, _Grunnable)
2573 dropg()
2574 lock(&sched.lock)
2575 _p_ := pidleget()
2576 if _p_ == nil {
2577 globrunqput(gp)
2578 } else if atomic.Load(&sched.sysmonwait) != 0 {
2579 atomic.Store(&sched.sysmonwait, 0)
2580 notewakeup(&sched.sysmonnote)
2581 }
2582 unlock(&sched.lock)
2583 if _p_ != nil {
2584 acquirep(_p_)
2585 execute(gp, false) // Never returns.
2586 }
2587 if _g_.m.lockedg != nil {
2588 // Wait until another thread schedules gp and so m again.
2589 stoplockedm()
2590 execute(gp, false) // Never returns.
2591 }
2592 stopm()
2593 schedule() // Never returns.
2594 }
2595
2596 // exitsyscallclear clears GC-related information that we only track
2597 // during a syscall.
2598 func exitsyscallclear(gp *g) {
2599 // Garbage collector isn't running (since we are), so okay to
2600 // clear syscallsp.
2601 gp.syscallsp = 0
2602
2603 gp.gcstack = 0
2604 gp.gcnextsp = 0
2605 memclrNoHeapPointers(unsafe.Pointer(&gp.gcregs), unsafe.Sizeof(gp.gcregs))
2606 }
2607
2608 // Code generated by cgo, and some library code, calls syscall.Entersyscall
2609 // and syscall.Exitsyscall.
2610
2611 //go:linkname syscall_entersyscall syscall.Entersyscall
2612 //go:nosplit
2613 func syscall_entersyscall() {
2614 entersyscall(0)
2615 }
2616
2617 //go:linkname syscall_exitsyscall syscall.Exitsyscall
2618 //go:nosplit
2619 func syscall_exitsyscall() {
2620 exitsyscall(0)
2621 }
2622
2623 func beforefork() {
2624 gp := getg().m.curg
2625
2626 // Fork can hang if preempted with signals frequently enough (see issue 5517).
2627 // Ensure that we stay on the same M where we disable profiling.
2628 gp.m.locks++
2629 if gp.m.profilehz != 0 {
2630 resetcpuprofiler(0)
2631 }
2632 }
2633
2634 // Called from syscall package before fork.
2635 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
2636 //go:nosplit
2637 func syscall_runtime_BeforeFork() {
2638 systemstack(beforefork)
2639 }
2640
2641 func afterfork() {
2642 gp := getg().m.curg
2643
2644 hz := sched.profilehz
2645 if hz != 0 {
2646 resetcpuprofiler(hz)
2647 }
2648 gp.m.locks--
2649 }
2650
2651 // Called from syscall package after fork in parent.
2652 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
2653 //go:nosplit
2654 func syscall_runtime_AfterFork() {
2655 systemstack(afterfork)
2656 }
2657
2658 // Create a new g running fn passing arg as the single argument.
2659 // Put it on the queue of g's waiting to run.
2660 // The compiler turns a go statement into a call to this.
2661 //go:linkname newproc __go_go
2662 func newproc(fn uintptr, arg unsafe.Pointer) *g {
2663 _g_ := getg()
2664
2665 if fn == 0 {
2666 _g_.m.throwing = -1 // do not dump full stacks
2667 throw("go of nil func value")
2668 }
2669 _g_.m.locks++ // disable preemption because it can be holding p in a local var
2670
2671 _p_ := _g_.m.p.ptr()
2672 newg := gfget(_p_)
2673 var (
2674 sp unsafe.Pointer
2675 spsize uintptr
2676 )
2677 if newg == nil {
2678 newg = malg(true, false, &sp, &spsize)
2679 casgstatus(newg, _Gidle, _Gdead)
2680 newg.gcRescan = -1
2681 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
2682 } else {
2683 resetNewG(newg, &sp, &spsize)
2684 }
2685 newg.traceback = nil
2686
2687 if readgstatus(newg) != _Gdead {
2688 throw("newproc1: new g is not Gdead")
2689 }
2690
2691 // Store the C function pointer into entryfn, take the address
2692 // of entryfn, convert it to a Go function value, and store
2693 // that in entry.
2694 newg.entryfn = fn
2695 var entry func(unsafe.Pointer)
2696 *(*unsafe.Pointer)(unsafe.Pointer(&entry)) = unsafe.Pointer(&newg.entryfn)
2697 newg.entry = entry
2698
2699 newg.param = arg
2700 newg.gopc = getcallerpc(unsafe.Pointer(&fn))
2701 newg.startpc = fn
2702 // The stack is dirty from the argument frame, so queue it for
2703 // scanning. Do this before setting it to runnable so we still
2704 // own the G. If we're recycling a G, it may already be on the
2705 // rescan list.
2706 if newg.gcRescan == -1 {
2707 queueRescan(newg)
2708 } else {
2709 // The recycled G is already on the rescan list. Just
2710 // mark the stack dirty.
2711 newg.gcscanvalid = false
2712 }
2713 casgstatus(newg, _Gdead, _Grunnable)
2714
2715 if _p_.goidcache == _p_.goidcacheend {
2716 // Sched.goidgen is the last allocated id,
2717 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
2718 // At startup sched.goidgen=0, so main goroutine receives goid=1.
2719 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
2720 _p_.goidcache -= _GoidCacheBatch - 1
2721 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
2722 }
2723 newg.goid = int64(_p_.goidcache)
2724 _p_.goidcache++
2725 if trace.enabled {
2726 traceGoCreate(newg, newg.startpc)
2727 }
2728
2729 makeGContext(newg, sp, spsize)
2730
2731 runqput(_p_, newg, true)
2732
2733 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && runtimeInitTime != 0 {
2734 wakep()
2735 }
2736 _g_.m.locks--
2737 return newg
2738 }
2739
2740 // expectedSystemGoroutines counts the number of goroutines expected
2741 // to mark themselves as system goroutines. After they mark themselves
2742 // by calling setSystemGoroutine, this is decremented. NumGoroutines
2743 // uses this to wait for all system goroutines to mark themselves
2744 // before it counts them.
2745 var expectedSystemGoroutines uint32
2746
2747 // expectSystemGoroutine is called when starting a goroutine that will
2748 // call setSystemGoroutine. It increments expectedSystemGoroutines.
2749 func expectSystemGoroutine() {
2750 atomic.Xadd(&expectedSystemGoroutines, +1)
2751 }
2752
2753 // waitForSystemGoroutines waits for all currently expected system
2754 // goroutines to register themselves.
2755 func waitForSystemGoroutines() {
2756 for atomic.Load(&expectedSystemGoroutines) > 0 {
2757 Gosched()
2758 osyield()
2759 }
2760 }
2761
2762 // setSystemGoroutine marks this goroutine as a "system goroutine".
2763 // In the gc toolchain this is done by comparing startpc to a list of
2764 // saved special PCs. In gccgo that approach does not work as startpc
2765 // is often a thunk that invokes the real function with arguments,
2766 // so the thunk address never matches the saved special PCs. Instead,
2767 // since there are only a limited number of "system goroutines",
2768 // we force each one to mark itself as special.
2769 func setSystemGoroutine() {
2770 getg().isSystemGoroutine = true
2771 atomic.Xadd(&sched.ngsys, +1)
2772 atomic.Xadd(&expectedSystemGoroutines, -1)
2773 }
2774
2775 // Put on gfree list.
2776 // If local list is too long, transfer a batch to the global list.
2777 func gfput(_p_ *p, gp *g) {
2778 if readgstatus(gp) != _Gdead {
2779 throw("gfput: bad status (not Gdead)")
2780 }
2781
2782 gp.schedlink.set(_p_.gfree)
2783 _p_.gfree = gp
2784 _p_.gfreecnt++
2785 if _p_.gfreecnt >= 64 {
2786 lock(&sched.gflock)
2787 for _p_.gfreecnt >= 32 {
2788 _p_.gfreecnt--
2789 gp = _p_.gfree
2790 _p_.gfree = gp.schedlink.ptr()
2791 gp.schedlink.set(sched.gfree)
2792 sched.gfree = gp
2793 sched.ngfree++
2794 }
2795 unlock(&sched.gflock)
2796 }
2797 }
2798
2799 // Get from gfree list.
2800 // If local list is empty, grab a batch from global list.
2801 func gfget(_p_ *p) *g {
2802 retry:
2803 gp := _p_.gfree
2804 if gp == nil && sched.gfree != nil {
2805 lock(&sched.gflock)
2806 for _p_.gfreecnt < 32 {
2807 if sched.gfree != nil {
2808 gp = sched.gfree
2809 sched.gfree = gp.schedlink.ptr()
2810 } else {
2811 break
2812 }
2813 _p_.gfreecnt++
2814 sched.ngfree--
2815 gp.schedlink.set(_p_.gfree)
2816 _p_.gfree = gp
2817 }
2818 unlock(&sched.gflock)
2819 goto retry
2820 }
2821 if gp != nil {
2822 _p_.gfree = gp.schedlink.ptr()
2823 _p_.gfreecnt--
2824 }
2825 return gp
2826 }
2827
2828 // Purge all cached G's from gfree list to the global list.
2829 func gfpurge(_p_ *p) {
2830 lock(&sched.gflock)
2831 for _p_.gfreecnt != 0 {
2832 _p_.gfreecnt--
2833 gp := _p_.gfree
2834 _p_.gfree = gp.schedlink.ptr()
2835 gp.schedlink.set(sched.gfree)
2836 sched.gfree = gp
2837 sched.ngfree++
2838 }
2839 unlock(&sched.gflock)
2840 }
2841
2842 // Breakpoint executes a breakpoint trap.
2843 func Breakpoint() {
2844 breakpoint()
2845 }
2846
2847 // dolockOSThread is called by LockOSThread and lockOSThread below
2848 // after they modify m.locked. Do not allow preemption during this call,
2849 // or else the m might be different in this function than in the caller.
2850 //go:nosplit
2851 func dolockOSThread() {
2852 _g_ := getg()
2853 _g_.m.lockedg = _g_
2854 _g_.lockedm = _g_.m
2855 }
2856
2857 //go:nosplit
2858
2859 // LockOSThread wires the calling goroutine to its current operating system thread.
2860 // Until the calling goroutine exits or calls UnlockOSThread, it will always
2861 // execute in that thread, and no other goroutine can.
2862 func LockOSThread() {
2863 getg().m.locked |= _LockExternal
2864 dolockOSThread()
2865 }
2866
2867 //go:nosplit
2868 func lockOSThread() {
2869 getg().m.locked += _LockInternal
2870 dolockOSThread()
2871 }
2872
2873 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
2874 // after they update m->locked. Do not allow preemption during this call,
2875 // or else the m might be in different in this function than in the caller.
2876 //go:nosplit
2877 func dounlockOSThread() {
2878 _g_ := getg()
2879 if _g_.m.locked != 0 {
2880 return
2881 }
2882 _g_.m.lockedg = nil
2883 _g_.lockedm = nil
2884 }
2885
2886 //go:nosplit
2887
2888 // UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
2889 // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
2890 func UnlockOSThread() {
2891 getg().m.locked &^= _LockExternal
2892 dounlockOSThread()
2893 }
2894
2895 //go:nosplit
2896 func unlockOSThread() {
2897 _g_ := getg()
2898 if _g_.m.locked < _LockInternal {
2899 systemstack(badunlockosthread)
2900 }
2901 _g_.m.locked -= _LockInternal
2902 dounlockOSThread()
2903 }
2904
2905 func badunlockosthread() {
2906 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
2907 }
2908
2909 func gcount() int32 {
2910 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
2911 for i := 0; ; i++ {
2912 _p_ := allp[i]
2913 if _p_ == nil {
2914 break
2915 }
2916 n -= _p_.gfreecnt
2917 }
2918
2919 // All these variables can be changed concurrently, so the result can be inconsistent.
2920 // But at least the current goroutine is running.
2921 if n < 1 {
2922 n = 1
2923 }
2924 return n
2925 }
2926
2927 func mcount() int32 {
2928 return sched.mcount
2929 }
2930
2931 var prof struct {
2932 lock uint32
2933 hz int32
2934 }
2935
2936 func _System() { _System() }
2937 func _ExternalCode() { _ExternalCode() }
2938 func _GC() { _GC() }
2939
2940 var _SystemPC = funcPC(_System)
2941 var _ExternalCodePC = funcPC(_ExternalCode)
2942 var _GCPC = funcPC(_GC)
2943
2944 // Called if we receive a SIGPROF signal.
2945 // Called by the signal handler, may run during STW.
2946 //go:nowritebarrierrec
2947 func sigprof(pc uintptr, gp *g, mp *m) {
2948 if prof.hz == 0 {
2949 return
2950 }
2951
2952 // Profiling runs concurrently with GC, so it must not allocate.
2953 // Set a trap in case the code does allocate.
2954 // Note that on windows, one thread takes profiles of all the
2955 // other threads, so mp is usually not getg().m.
2956 // In fact mp may not even be stopped.
2957 // See golang.org/issue/17165.
2958 getg().m.mallocing++
2959
2960 traceback := true
2961
2962 // If SIGPROF arrived while already fetching runtime callers
2963 // we can have trouble on older systems because the unwind
2964 // library calls dl_iterate_phdr which was not reentrant in
2965 // the past. alreadyInCallers checks for that.
2966 if gp == nil || alreadyInCallers() {
2967 traceback = false
2968 }
2969
2970 var stk [maxCPUProfStack]uintptr
2971 n := 0
2972 if traceback {
2973 var stklocs [maxCPUProfStack]location
2974 n = callers(0, stklocs[:])
2975
2976 for i := 0; i < n; i++ {
2977 stk[i] = stklocs[i].pc
2978 }
2979 }
2980
2981 if n <= 0 {
2982 // Normal traceback is impossible or has failed.
2983 // Account it against abstract "System" or "GC".
2984 n = 2
2985 stk[0] = pc
2986 if mp.preemptoff != "" || mp.helpgc != 0 {
2987 stk[1] = _GCPC + sys.PCQuantum
2988 } else {
2989 stk[1] = _SystemPC + sys.PCQuantum
2990 }
2991 }
2992
2993 if prof.hz != 0 {
2994 // Simple cas-lock to coordinate with setcpuprofilerate.
2995 for !atomic.Cas(&prof.lock, 0, 1) {
2996 osyield()
2997 }
2998 if prof.hz != 0 {
2999 cpuprof.add(stk[:n])
3000 }
3001 atomic.Store(&prof.lock, 0)
3002 }
3003 getg().m.mallocing--
3004 }
3005
3006 // Use global arrays rather than using up lots of stack space in the
3007 // signal handler. This is safe since while we are executing a SIGPROF
3008 // signal other SIGPROF signals are blocked.
3009 var nonprofGoStklocs [maxCPUProfStack]location
3010 var nonprofGoStk [maxCPUProfStack]uintptr
3011
3012 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
3013 // and the signal handler collected a stack trace in sigprofCallers.
3014 // When this is called, sigprofCallersUse will be non-zero.
3015 // g is nil, and what we can do is very limited.
3016 //go:nosplit
3017 //go:nowritebarrierrec
3018 func sigprofNonGo(pc uintptr) {
3019 if prof.hz != 0 {
3020 n := callers(0, nonprofGoStklocs[:])
3021
3022 for i := 0; i < n; i++ {
3023 nonprofGoStk[i] = nonprofGoStklocs[i].pc
3024 }
3025
3026 if n <= 0 {
3027 n = 2
3028 nonprofGoStk[0] = pc
3029 nonprofGoStk[1] = _ExternalCodePC + sys.PCQuantum
3030 }
3031
3032 // Simple cas-lock to coordinate with setcpuprofilerate.
3033 for !atomic.Cas(&prof.lock, 0, 1) {
3034 osyield()
3035 }
3036 if prof.hz != 0 {
3037 cpuprof.addNonGo(nonprofGoStk[:n])
3038 }
3039 atomic.Store(&prof.lock, 0)
3040 }
3041 }
3042
3043 // Arrange to call fn with a traceback hz times a second.
3044 func setcpuprofilerate_m(hz int32) {
3045 // Force sane arguments.
3046 if hz < 0 {
3047 hz = 0
3048 }
3049
3050 // Disable preemption, otherwise we can be rescheduled to another thread
3051 // that has profiling enabled.
3052 _g_ := getg()
3053 _g_.m.locks++
3054
3055 // Stop profiler on this thread so that it is safe to lock prof.
3056 // if a profiling signal came in while we had prof locked,
3057 // it would deadlock.
3058 resetcpuprofiler(0)
3059
3060 for !atomic.Cas(&prof.lock, 0, 1) {
3061 osyield()
3062 }
3063 prof.hz = hz
3064 atomic.Store(&prof.lock, 0)
3065
3066 lock(&sched.lock)
3067 sched.profilehz = hz
3068 unlock(&sched.lock)
3069
3070 if hz != 0 {
3071 resetcpuprofiler(hz)
3072 }
3073
3074 _g_.m.locks--
3075 }
3076
3077 // Change number of processors. The world is stopped, sched is locked.
3078 // gcworkbufs are not being modified by either the GC or
3079 // the write barrier code.
3080 // Returns list of Ps with local work, they need to be scheduled by the caller.
3081 func procresize(nprocs int32) *p {
3082 old := gomaxprocs
3083 if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs {
3084 throw("procresize: invalid arg")
3085 }
3086 if trace.enabled {
3087 traceGomaxprocs(nprocs)
3088 }
3089
3090 // update statistics
3091 now := nanotime()
3092 if sched.procresizetime != 0 {
3093 sched.totaltime += int64(old) * (now - sched.procresizetime)
3094 }
3095 sched.procresizetime = now
3096
3097 // initialize new P's
3098 for i := int32(0); i < nprocs; i++ {
3099 pp := allp[i]
3100 if pp == nil {
3101 pp = new(p)
3102 pp.id = i
3103 pp.status = _Pgcstop
3104 pp.sudogcache = pp.sudogbuf[:0]
3105 pp.deferpool = pp.deferpoolbuf[:0]
3106 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
3107 }
3108 if pp.mcache == nil {
3109 if old == 0 && i == 0 {
3110 if getg().m.mcache == nil {
3111 throw("missing mcache?")
3112 }
3113 pp.mcache = getg().m.mcache // bootstrap
3114 } else {
3115 pp.mcache = allocmcache()
3116 }
3117 }
3118 }
3119
3120 // free unused P's
3121 for i := nprocs; i < old; i++ {
3122 p := allp[i]
3123 if trace.enabled {
3124 if p == getg().m.p.ptr() {
3125 // moving to p[0], pretend that we were descheduled
3126 // and then scheduled again to keep the trace sane.
3127 traceGoSched()
3128 traceProcStop(p)
3129 }
3130 }
3131 // move all runnable goroutines to the global queue
3132 for p.runqhead != p.runqtail {
3133 // pop from tail of local queue
3134 p.runqtail--
3135 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr()
3136 // push onto head of global queue
3137 globrunqputhead(gp)
3138 }
3139 if p.runnext != 0 {
3140 globrunqputhead(p.runnext.ptr())
3141 p.runnext = 0
3142 }
3143 // if there's a background worker, make it runnable and put
3144 // it on the global queue so it can clean itself up
3145 if gp := p.gcBgMarkWorker.ptr(); gp != nil {
3146 casgstatus(gp, _Gwaiting, _Grunnable)
3147 if trace.enabled {
3148 traceGoUnpark(gp, 0)
3149 }
3150 globrunqput(gp)
3151 // This assignment doesn't race because the
3152 // world is stopped.
3153 p.gcBgMarkWorker.set(nil)
3154 }
3155 for i := range p.sudogbuf {
3156 p.sudogbuf[i] = nil
3157 }
3158 p.sudogcache = p.sudogbuf[:0]
3159 for i := range p.deferpoolbuf {
3160 p.deferpoolbuf[i] = nil
3161 }
3162 p.deferpool = p.deferpoolbuf[:0]
3163 freemcache(p.mcache)
3164 p.mcache = nil
3165 gfpurge(p)
3166 traceProcFree(p)
3167 p.status = _Pdead
3168 // can't free P itself because it can be referenced by an M in syscall
3169 }
3170
3171 _g_ := getg()
3172 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
3173 // continue to use the current P
3174 _g_.m.p.ptr().status = _Prunning
3175 } else {
3176 // release the current P and acquire allp[0]
3177 if _g_.m.p != 0 {
3178 _g_.m.p.ptr().m = 0
3179 }
3180 _g_.m.p = 0
3181 _g_.m.mcache = nil
3182 p := allp[0]
3183 p.m = 0
3184 p.status = _Pidle
3185 acquirep(p)
3186 if trace.enabled {
3187 traceGoStart()
3188 }
3189 }
3190 var runnablePs *p
3191 for i := nprocs - 1; i >= 0; i-- {
3192 p := allp[i]
3193 if _g_.m.p.ptr() == p {
3194 continue
3195 }
3196 p.status = _Pidle
3197 if runqempty(p) {
3198 pidleput(p)
3199 } else {
3200 p.m.set(mget())
3201 p.link.set(runnablePs)
3202 runnablePs = p
3203 }
3204 }
3205 stealOrder.reset(uint32(nprocs))
3206 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
3207 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
3208 return runnablePs
3209 }
3210
3211 // Associate p and the current m.
3212 //
3213 // This function is allowed to have write barriers even if the caller
3214 // isn't because it immediately acquires _p_.
3215 //
3216 //go:yeswritebarrierrec
3217 func acquirep(_p_ *p) {
3218 // Do the part that isn't allowed to have write barriers.
3219 acquirep1(_p_)
3220
3221 // have p; write barriers now allowed
3222 _g_ := getg()
3223 _g_.m.mcache = _p_.mcache
3224
3225 if trace.enabled {
3226 traceProcStart()
3227 }
3228 }
3229
3230 // acquirep1 is the first step of acquirep, which actually acquires
3231 // _p_. This is broken out so we can disallow write barriers for this
3232 // part, since we don't yet have a P.
3233 //
3234 //go:nowritebarrierrec
3235 func acquirep1(_p_ *p) {
3236 _g_ := getg()
3237
3238 if _g_.m.p != 0 || _g_.m.mcache != nil {
3239 throw("acquirep: already in go")
3240 }
3241 if _p_.m != 0 || _p_.status != _Pidle {
3242 id := int32(0)
3243 if _p_.m != 0 {
3244 id = _p_.m.ptr().id
3245 }
3246 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
3247 throw("acquirep: invalid p state")
3248 }
3249 _g_.m.p.set(_p_)
3250 _p_.m.set(_g_.m)
3251 _p_.status = _Prunning
3252 }
3253
3254 // Disassociate p and the current m.
3255 func releasep() *p {
3256 _g_ := getg()
3257
3258 if _g_.m.p == 0 || _g_.m.mcache == nil {
3259 throw("releasep: invalid arg")
3260 }
3261 _p_ := _g_.m.p.ptr()
3262 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
3263 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
3264 throw("releasep: invalid p state")
3265 }
3266 if trace.enabled {
3267 traceProcStop(_g_.m.p.ptr())
3268 }
3269 _g_.m.p = 0
3270 _g_.m.mcache = nil
3271 _p_.m = 0
3272 _p_.status = _Pidle
3273 return _p_
3274 }
3275
3276 func incidlelocked(v int32) {
3277 lock(&sched.lock)
3278 sched.nmidlelocked += v
3279 if v > 0 {
3280 checkdead()
3281 }
3282 unlock(&sched.lock)
3283 }
3284
3285 // Check for deadlock situation.
3286 // The check is based on number of running M's, if 0 -> deadlock.
3287 func checkdead() {
3288 // For -buildmode=c-shared or -buildmode=c-archive it's OK if
3289 // there are no running goroutines. The calling program is
3290 // assumed to be running.
3291 if islibrary || isarchive {
3292 return
3293 }
3294
3295 // If we are dying because of a signal caught on an already idle thread,
3296 // freezetheworld will cause all running threads to block.
3297 // And runtime will essentially enter into deadlock state,
3298 // except that there is a thread that will call exit soon.
3299 if panicking > 0 {
3300 return
3301 }
3302
3303 // -1 for sysmon
3304 run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1
3305 if run > 0 {
3306 return
3307 }
3308 if run < 0 {
3309 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n")
3310 throw("checkdead: inconsistent counts")
3311 }
3312
3313 grunning := 0
3314 lock(&allglock)
3315 for i := 0; i < len(allgs); i++ {
3316 gp := allgs[i]
3317 if isSystemGoroutine(gp) {
3318 continue
3319 }
3320 s := readgstatus(gp)
3321 switch s &^ _Gscan {
3322 case _Gwaiting:
3323 grunning++
3324 case _Grunnable,
3325 _Grunning,
3326 _Gsyscall:
3327 unlock(&allglock)
3328 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
3329 throw("checkdead: runnable g")
3330 }
3331 }
3332 unlock(&allglock)
3333 if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
3334 throw("no goroutines (main called runtime.Goexit) - deadlock!")
3335 }
3336
3337 // Maybe jump time forward for playground.
3338 gp := timejump()
3339 if gp != nil {
3340 casgstatus(gp, _Gwaiting, _Grunnable)
3341 globrunqput(gp)
3342 _p_ := pidleget()
3343 if _p_ == nil {
3344 throw("checkdead: no p for timer")
3345 }
3346 mp := mget()
3347 if mp == nil {
3348 // There should always be a free M since
3349 // nothing is running.
3350 throw("checkdead: no m for timer")
3351 }
3352 mp.nextp.set(_p_)
3353 notewakeup(&mp.park)
3354 return
3355 }
3356
3357 getg().m.throwing = -1 // do not dump full stacks
3358 throw("all goroutines are asleep - deadlock!")
3359 }
3360
3361 // forcegcperiod is the maximum time in nanoseconds between garbage
3362 // collections. If we go this long without a garbage collection, one
3363 // is forced to run.
3364 //
3365 // This is a variable for testing purposes. It normally doesn't change.
3366 var forcegcperiod int64 = 2 * 60 * 1e9
3367
3368 // Always runs without a P, so write barriers are not allowed.
3369 //
3370 //go:nowritebarrierrec
3371 func sysmon() {
3372 // If a heap span goes unused for 5 minutes after a garbage collection,
3373 // we hand it back to the operating system.
3374 scavengelimit := int64(5 * 60 * 1e9)
3375
3376 if debug.scavenge > 0 {
3377 // Scavenge-a-lot for testing.
3378 forcegcperiod = 10 * 1e6
3379 scavengelimit = 20 * 1e6
3380 }
3381
3382 lastscavenge := nanotime()
3383 nscavenge := 0
3384
3385 lasttrace := int64(0)
3386 idle := 0 // how many cycles in succession we had not wokeup somebody
3387 delay := uint32(0)
3388 for {
3389 if idle == 0 { // start with 20us sleep...
3390 delay = 20
3391 } else if idle > 50 { // start doubling the sleep after 1ms...
3392 delay *= 2
3393 }
3394 if delay > 10*1000 { // up to 10ms
3395 delay = 10 * 1000
3396 }
3397 usleep(delay)
3398 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
3399 lock(&sched.lock)
3400 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
3401 atomic.Store(&sched.sysmonwait, 1)
3402 unlock(&sched.lock)
3403 // Make wake-up period small enough
3404 // for the sampling to be correct.
3405 maxsleep := forcegcperiod / 2
3406 if scavengelimit < forcegcperiod {
3407 maxsleep = scavengelimit / 2
3408 }
3409 notetsleep(&sched.sysmonnote, maxsleep)
3410 lock(&sched.lock)
3411 atomic.Store(&sched.sysmonwait, 0)
3412 noteclear(&sched.sysmonnote)
3413 idle = 0
3414 delay = 20
3415 }
3416 unlock(&sched.lock)
3417 }
3418 // poll network if not polled for more than 10ms
3419 lastpoll := int64(atomic.Load64(&sched.lastpoll))
3420 now := nanotime()
3421 unixnow := unixnanotime()
3422 if lastpoll != 0 && lastpoll+10*1000*1000 < now {
3423 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
3424 gp := netpoll(false) // non-blocking - returns list of goroutines
3425 if gp != nil {
3426 // Need to decrement number of idle locked M's
3427 // (pretending that one more is running) before injectglist.
3428 // Otherwise it can lead to the following situation:
3429 // injectglist grabs all P's but before it starts M's to run the P's,
3430 // another M returns from syscall, finishes running its G,
3431 // observes that there is no work to do and no other running M's
3432 // and reports deadlock.
3433 incidlelocked(-1)
3434 injectglist(gp)
3435 incidlelocked(1)
3436 }
3437 }
3438 // retake P's blocked in syscalls
3439 // and preempt long running G's
3440 if retake(now) != 0 {
3441 idle = 0
3442 } else {
3443 idle++
3444 }
3445 // check if we need to force a GC
3446 lastgc := int64(atomic.Load64(&memstats.last_gc))
3447 if gcphase == _GCoff && lastgc != 0 && unixnow-lastgc > forcegcperiod && atomic.Load(&forcegc.idle) != 0 {
3448 lock(&forcegc.lock)
3449 forcegc.idle = 0
3450 forcegc.g.schedlink = 0
3451 injectglist(forcegc.g)
3452 unlock(&forcegc.lock)
3453 }
3454 // scavenge heap once in a while
3455 if lastscavenge+scavengelimit/2 < now {
3456 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
3457 lastscavenge = now
3458 nscavenge++
3459 }
3460 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
3461 lasttrace = now
3462 schedtrace(debug.scheddetail > 0)
3463 }
3464 }
3465 }
3466
3467 var pdesc [_MaxGomaxprocs]struct {
3468 schedtick uint32
3469 schedwhen int64
3470 syscalltick uint32
3471 syscallwhen int64
3472 }
3473
3474 // forcePreemptNS is the time slice given to a G before it is
3475 // preempted.
3476 const forcePreemptNS = 10 * 1000 * 1000 // 10ms
3477
3478 func retake(now int64) uint32 {
3479 n := 0
3480 for i := int32(0); i < gomaxprocs; i++ {
3481 _p_ := allp[i]
3482 if _p_ == nil {
3483 continue
3484 }
3485 pd := &pdesc[i]
3486 s := _p_.status
3487 if s == _Psyscall {
3488 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
3489 t := int64(_p_.syscalltick)
3490 if int64(pd.syscalltick) != t {
3491 pd.syscalltick = uint32(t)
3492 pd.syscallwhen = now
3493 continue
3494 }
3495 // On the one hand we don't want to retake Ps if there is no other work to do,
3496 // but on the other hand we want to retake them eventually
3497 // because they can prevent the sysmon thread from deep sleep.
3498 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
3499 continue
3500 }
3501 // Need to decrement number of idle locked M's
3502 // (pretending that one more is running) before the CAS.
3503 // Otherwise the M from which we retake can exit the syscall,
3504 // increment nmidle and report deadlock.
3505 incidlelocked(-1)
3506 if atomic.Cas(&_p_.status, s, _Pidle) {
3507 if trace.enabled {
3508 traceGoSysBlock(_p_)
3509 traceProcStop(_p_)
3510 }
3511 n++
3512 _p_.syscalltick++
3513 handoffp(_p_)
3514 }
3515 incidlelocked(1)
3516 } else if s == _Prunning {
3517 // Preempt G if it's running for too long.
3518 t := int64(_p_.schedtick)
3519 if int64(pd.schedtick) != t {
3520 pd.schedtick = uint32(t)
3521 pd.schedwhen = now
3522 continue
3523 }
3524 if pd.schedwhen+forcePreemptNS > now {
3525 continue
3526 }
3527 preemptone(_p_)
3528 }
3529 }
3530 return uint32(n)
3531 }
3532
3533 // Tell all goroutines that they have been preempted and they should stop.
3534 // This function is purely best-effort. It can fail to inform a goroutine if a
3535 // processor just started running it.
3536 // No locks need to be held.
3537 // Returns true if preemption request was issued to at least one goroutine.
3538 func preemptall() bool {
3539 res := false
3540 for i := int32(0); i < gomaxprocs; i++ {
3541 _p_ := allp[i]
3542 if _p_ == nil || _p_.status != _Prunning {
3543 continue
3544 }
3545 if preemptone(_p_) {
3546 res = true
3547 }
3548 }
3549 return res
3550 }
3551
3552 // Tell the goroutine running on processor P to stop.
3553 // This function is purely best-effort. It can incorrectly fail to inform the
3554 // goroutine. It can send inform the wrong goroutine. Even if it informs the
3555 // correct goroutine, that goroutine might ignore the request if it is
3556 // simultaneously executing newstack.
3557 // No lock needs to be held.
3558 // Returns true if preemption request was issued.
3559 // The actual preemption will happen at some point in the future
3560 // and will be indicated by the gp->status no longer being
3561 // Grunning
3562 func preemptone(_p_ *p) bool {
3563 mp := _p_.m.ptr()
3564 if mp == nil || mp == getg().m {
3565 return false
3566 }
3567 gp := mp.curg
3568 if gp == nil || gp == mp.g0 {
3569 return false
3570 }
3571
3572 gp.preempt = true
3573
3574 // At this point the gc implementation sets gp.stackguard0 to
3575 // a value that causes the goroutine to suspend itself.
3576 // gccgo has no support for this, and it's hard to support.
3577 // The split stack code reads a value from its TCB.
3578 // We have no way to set a value in the TCB of a different thread.
3579 // And, of course, not all systems support split stack anyhow.
3580 // Checking the field in the g is expensive, since it requires
3581 // loading the g from TLS. The best mechanism is likely to be
3582 // setting a global variable and figuring out a way to efficiently
3583 // check that global variable.
3584 //
3585 // For now we check gp.preempt in schedule and mallocgc,
3586 // which is at least better than doing nothing at all.
3587
3588 return true
3589 }
3590
3591 var starttime int64
3592
3593 func schedtrace(detailed bool) {
3594 now := nanotime()
3595 if starttime == 0 {
3596 starttime = now
3597 }
3598
3599 lock(&sched.lock)
3600 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
3601 if detailed {
3602 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
3603 }
3604 // We must be careful while reading data from P's, M's and G's.
3605 // Even if we hold schedlock, most data can be changed concurrently.
3606 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
3607 for i := int32(0); i < gomaxprocs; i++ {
3608 _p_ := allp[i]
3609 if _p_ == nil {
3610 continue
3611 }
3612 mp := _p_.m.ptr()
3613 h := atomic.Load(&_p_.runqhead)
3614 t := atomic.Load(&_p_.runqtail)
3615 if detailed {
3616 id := int32(-1)
3617 if mp != nil {
3618 id = mp.id
3619 }
3620 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
3621 } else {
3622 // In non-detailed mode format lengths of per-P run queues as:
3623 // [len1 len2 len3 len4]
3624 print(" ")
3625 if i == 0 {
3626 print("[")
3627 }
3628 print(t - h)
3629 if i == gomaxprocs-1 {
3630 print("]\n")
3631 }
3632 }
3633 }
3634
3635 if !detailed {
3636 unlock(&sched.lock)
3637 return
3638 }
3639
3640 for mp := allm; mp != nil; mp = mp.alllink {
3641 _p_ := mp.p.ptr()
3642 gp := mp.curg
3643 lockedg := mp.lockedg
3644 id1 := int32(-1)
3645 if _p_ != nil {
3646 id1 = _p_.id
3647 }
3648 id2 := int64(-1)
3649 if gp != nil {
3650 id2 = gp.goid
3651 }
3652 id3 := int64(-1)
3653 if lockedg != nil {
3654 id3 = lockedg.goid
3655 }
3656 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
3657 }
3658
3659 lock(&allglock)
3660 for gi := 0; gi < len(allgs); gi++ {
3661 gp := allgs[gi]
3662 mp := gp.m
3663 lockedm := gp.lockedm
3664 id1 := int32(-1)
3665 if mp != nil {
3666 id1 = mp.id
3667 }
3668 id2 := int32(-1)
3669 if lockedm != nil {
3670 id2 = lockedm.id
3671 }
3672 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
3673 }
3674 unlock(&allglock)
3675 unlock(&sched.lock)
3676 }
3677
3678 // Put mp on midle list.
3679 // Sched must be locked.
3680 // May run during STW, so write barriers are not allowed.
3681 //go:nowritebarrierrec
3682 func mput(mp *m) {
3683 mp.schedlink = sched.midle
3684 sched.midle.set(mp)
3685 sched.nmidle++
3686 checkdead()
3687 }
3688
3689 // Try to get an m from midle list.
3690 // Sched must be locked.
3691 // May run during STW, so write barriers are not allowed.
3692 //go:nowritebarrierrec
3693 func mget() *m {
3694 mp := sched.midle.ptr()
3695 if mp != nil {
3696 sched.midle = mp.schedlink
3697 sched.nmidle--
3698 }
3699 return mp
3700 }
3701
3702 // Put gp on the global runnable queue.
3703 // Sched must be locked.
3704 // May run during STW, so write barriers are not allowed.
3705 //go:nowritebarrierrec
3706 func globrunqput(gp *g) {
3707 gp.schedlink = 0
3708 if sched.runqtail != 0 {
3709 sched.runqtail.ptr().schedlink.set(gp)
3710 } else {
3711 sched.runqhead.set(gp)
3712 }
3713 sched.runqtail.set(gp)
3714 sched.runqsize++
3715 }
3716
3717 // Put gp at the head of the global runnable queue.
3718 // Sched must be locked.
3719 // May run during STW, so write barriers are not allowed.
3720 //go:nowritebarrierrec
3721 func globrunqputhead(gp *g) {
3722 gp.schedlink = sched.runqhead
3723 sched.runqhead.set(gp)
3724 if sched.runqtail == 0 {
3725 sched.runqtail.set(gp)
3726 }
3727 sched.runqsize++
3728 }
3729
3730 // Put a batch of runnable goroutines on the global runnable queue.
3731 // Sched must be locked.
3732 func globrunqputbatch(ghead *g, gtail *g, n int32) {
3733 gtail.schedlink = 0
3734 if sched.runqtail != 0 {
3735 sched.runqtail.ptr().schedlink.set(ghead)
3736 } else {
3737 sched.runqhead.set(ghead)
3738 }
3739 sched.runqtail.set(gtail)
3740 sched.runqsize += n
3741 }
3742
3743 // Try get a batch of G's from the global runnable queue.
3744 // Sched must be locked.
3745 func globrunqget(_p_ *p, max int32) *g {
3746 if sched.runqsize == 0 {
3747 return nil
3748 }
3749
3750 n := sched.runqsize/gomaxprocs + 1
3751 if n > sched.runqsize {
3752 n = sched.runqsize
3753 }
3754 if max > 0 && n > max {
3755 n = max
3756 }
3757 if n > int32(len(_p_.runq))/2 {
3758 n = int32(len(_p_.runq)) / 2
3759 }
3760
3761 sched.runqsize -= n
3762 if sched.runqsize == 0 {
3763 sched.runqtail = 0
3764 }
3765
3766 gp := sched.runqhead.ptr()
3767 sched.runqhead = gp.schedlink
3768 n--
3769 for ; n > 0; n-- {
3770 gp1 := sched.runqhead.ptr()
3771 sched.runqhead = gp1.schedlink
3772 runqput(_p_, gp1, false)
3773 }
3774 return gp
3775 }
3776
3777 // Put p to on _Pidle list.
3778 // Sched must be locked.
3779 // May run during STW, so write barriers are not allowed.
3780 //go:nowritebarrierrec
3781 func pidleput(_p_ *p) {
3782 if !runqempty(_p_) {
3783 throw("pidleput: P has non-empty run queue")
3784 }
3785 _p_.link = sched.pidle
3786 sched.pidle.set(_p_)
3787 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
3788 }
3789
3790 // Try get a p from _Pidle list.
3791 // Sched must be locked.
3792 // May run during STW, so write barriers are not allowed.
3793 //go:nowritebarrierrec
3794 func pidleget() *p {
3795 _p_ := sched.pidle.ptr()
3796 if _p_ != nil {
3797 sched.pidle = _p_.link
3798 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
3799 }
3800 return _p_
3801 }
3802
3803 // runqempty returns true if _p_ has no Gs on its local run queue.
3804 // It never returns true spuriously.
3805 func runqempty(_p_ *p) bool {
3806 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
3807 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
3808 // Simply observing that runqhead == runqtail and then observing that runqnext == nil
3809 // does not mean the queue is empty.
3810 for {
3811 head := atomic.Load(&_p_.runqhead)
3812 tail := atomic.Load(&_p_.runqtail)
3813 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
3814 if tail == atomic.Load(&_p_.runqtail) {
3815 return head == tail && runnext == 0
3816 }
3817 }
3818 }
3819
3820 // To shake out latent assumptions about scheduling order,
3821 // we introduce some randomness into scheduling decisions
3822 // when running with the race detector.
3823 // The need for this was made obvious by changing the
3824 // (deterministic) scheduling order in Go 1.5 and breaking
3825 // many poorly-written tests.
3826 // With the randomness here, as long as the tests pass
3827 // consistently with -race, they shouldn't have latent scheduling
3828 // assumptions.
3829 const randomizeScheduler = raceenabled
3830
3831 // runqput tries to put g on the local runnable queue.
3832 // If next if false, runqput adds g to the tail of the runnable queue.
3833 // If next is true, runqput puts g in the _p_.runnext slot.
3834 // If the run queue is full, runnext puts g on the global queue.
3835 // Executed only by the owner P.
3836 func runqput(_p_ *p, gp *g, next bool) {
3837 if randomizeScheduler && next && fastrand()%2 == 0 {
3838 next = false
3839 }
3840
3841 if next {
3842 retryNext:
3843 oldnext := _p_.runnext
3844 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
3845 goto retryNext
3846 }
3847 if oldnext == 0 {
3848 return
3849 }
3850 // Kick the old runnext out to the regular run queue.
3851 gp = oldnext.ptr()
3852 }
3853
3854 retry:
3855 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
3856 t := _p_.runqtail
3857 if t-h < uint32(len(_p_.runq)) {
3858 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
3859 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
3860 return
3861 }
3862 if runqputslow(_p_, gp, h, t) {
3863 return
3864 }
3865 // the queue is not full, now the put above must succeed
3866 goto retry
3867 }
3868
3869 // Put g and a batch of work from local runnable queue on global queue.
3870 // Executed only by the owner P.
3871 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
3872 var batch [len(_p_.runq)/2 + 1]*g
3873
3874 // First, grab a batch from local queue.
3875 n := t - h
3876 n = n / 2
3877 if n != uint32(len(_p_.runq)/2) {
3878 throw("runqputslow: queue is not full")
3879 }
3880 for i := uint32(0); i < n; i++ {
3881 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
3882 }
3883 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
3884 return false
3885 }
3886 batch[n] = gp
3887
3888 if randomizeScheduler {
3889 for i := uint32(1); i <= n; i++ {
3890 j := fastrand() % (i + 1)
3891 batch[i], batch[j] = batch[j], batch[i]
3892 }
3893 }
3894
3895 // Link the goroutines.
3896 for i := uint32(0); i < n; i++ {
3897 batch[i].schedlink.set(batch[i+1])
3898 }
3899
3900 // Now put the batch on global queue.
3901 lock(&sched.lock)
3902 globrunqputbatch(batch[0], batch[n], int32(n+1))
3903 unlock(&sched.lock)
3904 return true
3905 }
3906
3907 // Get g from local runnable queue.
3908 // If inheritTime is true, gp should inherit the remaining time in the
3909 // current time slice. Otherwise, it should start a new time slice.
3910 // Executed only by the owner P.
3911 func runqget(_p_ *p) (gp *g, inheritTime bool) {
3912 // If there's a runnext, it's the next G to run.
3913 for {
3914 next := _p_.runnext
3915 if next == 0 {
3916 break
3917 }
3918 if _p_.runnext.cas(next, 0) {
3919 return next.ptr(), true
3920 }
3921 }
3922
3923 for {
3924 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
3925 t := _p_.runqtail
3926 if t == h {
3927 return nil, false
3928 }
3929 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
3930 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
3931 return gp, false
3932 }
3933 }
3934 }
3935
3936 // Grabs a batch of goroutines from _p_'s runnable queue into batch.
3937 // Batch is a ring buffer starting at batchHead.
3938 // Returns number of grabbed goroutines.
3939 // Can be executed by any P.
3940 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
3941 for {
3942 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
3943 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer
3944 n := t - h
3945 n = n - n/2
3946 if n == 0 {
3947 if stealRunNextG {
3948 // Try to steal from _p_.runnext.
3949 if next := _p_.runnext; next != 0 {
3950 // Sleep to ensure that _p_ isn't about to run the g we
3951 // are about to steal.
3952 // The important use case here is when the g running on _p_
3953 // ready()s another g and then almost immediately blocks.
3954 // Instead of stealing runnext in this window, back off
3955 // to give _p_ a chance to schedule runnext. This will avoid
3956 // thrashing gs between different Ps.
3957 // A sync chan send/recv takes ~50ns as of time of writing,
3958 // so 3us gives ~50x overshoot.
3959 if GOOS != "windows" {
3960 usleep(3)
3961 } else {
3962 // On windows system timer granularity is 1-15ms,
3963 // which is way too much for this optimization.
3964 // So just yield.
3965 osyield()
3966 }
3967 if !_p_.runnext.cas(next, 0) {
3968 continue
3969 }
3970 batch[batchHead%uint32(len(batch))] = next
3971 return 1
3972 }
3973 }
3974 return 0
3975 }
3976 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
3977 continue
3978 }
3979 for i := uint32(0); i < n; i++ {
3980 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
3981 batch[(batchHead+i)%uint32(len(batch))] = g
3982 }
3983 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
3984 return n
3985 }
3986 }
3987 }
3988
3989 // Steal half of elements from local runnable queue of p2
3990 // and put onto local runnable queue of p.
3991 // Returns one of the stolen elements (or nil if failed).
3992 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
3993 t := _p_.runqtail
3994 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
3995 if n == 0 {
3996 return nil
3997 }
3998 n--
3999 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
4000 if n == 0 {
4001 return gp
4002 }
4003 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
4004 if t-h+n >= uint32(len(_p_.runq)) {
4005 throw("runqsteal: runq overflow")
4006 }
4007 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
4008 return gp
4009 }
4010
4011 //go:linkname setMaxThreads runtime_debug.setMaxThreads
4012 func setMaxThreads(in int) (out int) {
4013 lock(&sched.lock)
4014 out = int(sched.maxmcount)
4015 if in > 0x7fffffff { // MaxInt32
4016 sched.maxmcount = 0x7fffffff
4017 } else {
4018 sched.maxmcount = int32(in)
4019 }
4020 checkmcount()
4021 unlock(&sched.lock)
4022 return
4023 }
4024
4025 //go:nosplit
4026 func procPin() int {
4027 _g_ := getg()
4028 mp := _g_.m
4029
4030 mp.locks++
4031 return int(mp.p.ptr().id)
4032 }
4033
4034 //go:nosplit
4035 func procUnpin() {
4036 _g_ := getg()
4037 _g_.m.locks--
4038 }
4039
4040 //go:linkname sync_runtime_procPin sync.runtime_procPin
4041 //go:nosplit
4042 func sync_runtime_procPin() int {
4043 return procPin()
4044 }
4045
4046 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
4047 //go:nosplit
4048 func sync_runtime_procUnpin() {
4049 procUnpin()
4050 }
4051
4052 //go:linkname sync_atomic_runtime_procPin sync_atomic.runtime_procPin
4053 //go:nosplit
4054 func sync_atomic_runtime_procPin() int {
4055 return procPin()
4056 }
4057
4058 //go:linkname sync_atomic_runtime_procUnpin sync_atomic.runtime_procUnpin
4059 //go:nosplit
4060 func sync_atomic_runtime_procUnpin() {
4061 procUnpin()
4062 }
4063
4064 // Active spinning for sync.Mutex.
4065 //go:linkname sync_runtime_canSpin sync.runtime_canSpin
4066 //go:nosplit
4067 func sync_runtime_canSpin(i int) bool {
4068 // sync.Mutex is cooperative, so we are conservative with spinning.
4069 // Spin only few times and only if running on a multicore machine and
4070 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
4071 // As opposed to runtime mutex we don't do passive spinning here,
4072 // because there can be work on global runq on on other Ps.
4073 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
4074 return false
4075 }
4076 if p := getg().m.p.ptr(); !runqempty(p) {
4077 return false
4078 }
4079 return true
4080 }
4081
4082 //go:linkname sync_runtime_doSpin sync.runtime_doSpin
4083 //go:nosplit
4084 func sync_runtime_doSpin() {
4085 procyield(active_spin_cnt)
4086 }
4087
4088 var stealOrder randomOrder
4089
4090 // randomOrder/randomEnum are helper types for randomized work stealing.
4091 // They allow to enumerate all Ps in different pseudo-random orders without repetitions.
4092 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
4093 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
4094 type randomOrder struct {
4095 count uint32
4096 coprimes []uint32
4097 }
4098
4099 type randomEnum struct {
4100 i uint32
4101 count uint32
4102 pos uint32
4103 inc uint32
4104 }
4105
4106 func (ord *randomOrder) reset(count uint32) {
4107 ord.count = count
4108 ord.coprimes = ord.coprimes[:0]
4109 for i := uint32(1); i <= count; i++ {
4110 if gcd(i, count) == 1 {
4111 ord.coprimes = append(ord.coprimes, i)
4112 }
4113 }
4114 }
4115
4116 func (ord *randomOrder) start(i uint32) randomEnum {
4117 return randomEnum{
4118 count: ord.count,
4119 pos: i % ord.count,
4120 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
4121 }
4122 }
4123
4124 func (enum *randomEnum) done() bool {
4125 return enum.i == enum.count
4126 }
4127
4128 func (enum *randomEnum) next() {
4129 enum.i++
4130 enum.pos = (enum.pos + enum.inc) % enum.count
4131 }
4132
4133 func (enum *randomEnum) position() uint32 {
4134 return enum.pos
4135 }
4136
4137 func gcd(a, b uint32) uint32 {
4138 for b != 0 {
4139 a, b = b, a%b
4140 }
4141 return a
4142 }