runtime: Copy runtime_printf from other Go library.
[gcc.git] / libgo / runtime / proc.c
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include <limits.h>
6 #include <stdlib.h>
7 #include <pthread.h>
8 #include <unistd.h>
9
10 #include "config.h"
11 #include "runtime.h"
12 #include "arch.h"
13 #include "defs.h"
14 #include "malloc.h"
15 #include "go-defer.h"
16
17 #ifdef USING_SPLIT_STACK
18
19 /* FIXME: These are not declared anywhere. */
20
21 extern void __splitstack_getcontext(void *context[10]);
22
23 extern void __splitstack_setcontext(void *context[10]);
24
25 extern void *__splitstack_makecontext(size_t, void *context[10], size_t *);
26
27 extern void * __splitstack_resetcontext(void *context[10], size_t *);
28
29 extern void *__splitstack_find(void *, void *, size_t *, void **, void **,
30 void **);
31
32 extern void __splitstack_block_signals (int *, int *);
33
34 extern void __splitstack_block_signals_context (void *context[10], int *,
35 int *);
36
37 #endif
38
39 #if defined(USING_SPLIT_STACK) && defined(LINKER_SUPPORTS_SPLIT_STACK)
40 # ifdef PTHREAD_STACK_MIN
41 # define StackMin PTHREAD_STACK_MIN
42 # else
43 # define StackMin 8192
44 # endif
45 #else
46 # define StackMin 2 * 1024 * 1024
47 #endif
48
49 uintptr runtime_stacks_sys;
50
51 static void schedule(G*);
52
53 static void gtraceback(G*);
54
55 typedef struct Sched Sched;
56
57 M runtime_m0;
58 G runtime_g0; // idle goroutine for m0
59
60 #ifdef __rtems__
61 #define __thread
62 #endif
63
64 static __thread G *g;
65 static __thread M *m;
66
67 #ifndef SETCONTEXT_CLOBBERS_TLS
68
69 static inline void
70 initcontext(void)
71 {
72 }
73
74 static inline void
75 fixcontext(ucontext_t *c __attribute__ ((unused)))
76 {
77 }
78
79 # else
80
81 # if defined(__x86_64__) && defined(__sun__)
82
83 // x86_64 Solaris 10 and 11 have a bug: setcontext switches the %fs
84 // register to that of the thread which called getcontext. The effect
85 // is that the address of all __thread variables changes. This bug
86 // also affects pthread_self() and pthread_getspecific. We work
87 // around it by clobbering the context field directly to keep %fs the
88 // same.
89
90 static __thread greg_t fs;
91
92 static inline void
93 initcontext(void)
94 {
95 ucontext_t c;
96
97 getcontext(&c);
98 fs = c.uc_mcontext.gregs[REG_FSBASE];
99 }
100
101 static inline void
102 fixcontext(ucontext_t* c)
103 {
104 c->uc_mcontext.gregs[REG_FSBASE] = fs;
105 }
106
107 # else
108
109 # error unknown case for SETCONTEXT_CLOBBERS_TLS
110
111 # endif
112
113 #endif
114
115 // We can not always refer to the TLS variables directly. The
116 // compiler will call tls_get_addr to get the address of the variable,
117 // and it may hold it in a register across a call to schedule. When
118 // we get back from the call we may be running in a different thread,
119 // in which case the register now points to the TLS variable for a
120 // different thread. We use non-inlinable functions to avoid this
121 // when necessary.
122
123 G* runtime_g(void) __attribute__ ((noinline, no_split_stack));
124
125 G*
126 runtime_g(void)
127 {
128 return g;
129 }
130
131 M* runtime_m(void) __attribute__ ((noinline, no_split_stack));
132
133 M*
134 runtime_m(void)
135 {
136 return m;
137 }
138
139 int32 runtime_gcwaiting;
140
141 // Go scheduler
142 //
143 // The go scheduler's job is to match ready-to-run goroutines (`g's)
144 // with waiting-for-work schedulers (`m's). If there are ready g's
145 // and no waiting m's, ready() will start a new m running in a new
146 // OS thread, so that all ready g's can run simultaneously, up to a limit.
147 // For now, m's never go away.
148 //
149 // By default, Go keeps only one kernel thread (m) running user code
150 // at a single time; other threads may be blocked in the operating system.
151 // Setting the environment variable $GOMAXPROCS or calling
152 // runtime.GOMAXPROCS() will change the number of user threads
153 // allowed to execute simultaneously. $GOMAXPROCS is thus an
154 // approximation of the maximum number of cores to use.
155 //
156 // Even a program that can run without deadlock in a single process
157 // might use more m's if given the chance. For example, the prime
158 // sieve will use as many m's as there are primes (up to runtime_sched.mmax),
159 // allowing different stages of the pipeline to execute in parallel.
160 // We could revisit this choice, only kicking off new m's for blocking
161 // system calls, but that would limit the amount of parallel computation
162 // that go would try to do.
163 //
164 // In general, one could imagine all sorts of refinements to the
165 // scheduler, but the goal now is just to get something working on
166 // Linux and OS X.
167
168 struct Sched {
169 Lock;
170
171 G *gfree; // available g's (status == Gdead)
172 int32 goidgen;
173
174 G *ghead; // g's waiting to run
175 G *gtail;
176 int32 gwait; // number of g's waiting to run
177 int32 gcount; // number of g's that are alive
178 int32 grunning; // number of g's running on cpu or in syscall
179
180 M *mhead; // m's waiting for work
181 int32 mwait; // number of m's waiting for work
182 int32 mcount; // number of m's that have been created
183
184 volatile uint32 atomic; // atomic scheduling word (see below)
185
186 int32 profilehz; // cpu profiling rate
187
188 bool init; // running initialization
189 bool lockmain; // init called runtime.LockOSThread
190
191 Note stopped; // one g can set waitstop and wait here for m's to stop
192 };
193
194 // The atomic word in sched is an atomic uint32 that
195 // holds these fields.
196 //
197 // [15 bits] mcpu number of m's executing on cpu
198 // [15 bits] mcpumax max number of m's allowed on cpu
199 // [1 bit] waitstop some g is waiting on stopped
200 // [1 bit] gwaiting gwait != 0
201 //
202 // These fields are the information needed by entersyscall
203 // and exitsyscall to decide whether to coordinate with the
204 // scheduler. Packing them into a single machine word lets
205 // them use a fast path with a single atomic read/write and
206 // no lock/unlock. This greatly reduces contention in
207 // syscall- or cgo-heavy multithreaded programs.
208 //
209 // Except for entersyscall and exitsyscall, the manipulations
210 // to these fields only happen while holding the schedlock,
211 // so the routines holding schedlock only need to worry about
212 // what entersyscall and exitsyscall do, not the other routines
213 // (which also use the schedlock).
214 //
215 // In particular, entersyscall and exitsyscall only read mcpumax,
216 // waitstop, and gwaiting. They never write them. Thus, writes to those
217 // fields can be done (holding schedlock) without fear of write conflicts.
218 // There may still be logic conflicts: for example, the set of waitstop must
219 // be conditioned on mcpu >= mcpumax or else the wait may be a
220 // spurious sleep. The Promela model in proc.p verifies these accesses.
221 enum {
222 mcpuWidth = 15,
223 mcpuMask = (1<<mcpuWidth) - 1,
224 mcpuShift = 0,
225 mcpumaxShift = mcpuShift + mcpuWidth,
226 waitstopShift = mcpumaxShift + mcpuWidth,
227 gwaitingShift = waitstopShift+1,
228
229 // The max value of GOMAXPROCS is constrained
230 // by the max value we can store in the bit fields
231 // of the atomic word. Reserve a few high values
232 // so that we can detect accidental decrement
233 // beyond zero.
234 maxgomaxprocs = mcpuMask - 10,
235 };
236
237 #define atomic_mcpu(v) (((v)>>mcpuShift)&mcpuMask)
238 #define atomic_mcpumax(v) (((v)>>mcpumaxShift)&mcpuMask)
239 #define atomic_waitstop(v) (((v)>>waitstopShift)&1)
240 #define atomic_gwaiting(v) (((v)>>gwaitingShift)&1)
241
242 Sched runtime_sched;
243 int32 runtime_gomaxprocs;
244 bool runtime_singleproc;
245
246 static bool canaddmcpu(void);
247
248 // An m that is waiting for notewakeup(&m->havenextg). This may
249 // only be accessed while the scheduler lock is held. This is used to
250 // minimize the number of times we call notewakeup while the scheduler
251 // lock is held, since the m will normally move quickly to lock the
252 // scheduler itself, producing lock contention.
253 static M* mwakeup;
254
255 // Scheduling helpers. Sched must be locked.
256 static void gput(G*); // put/get on ghead/gtail
257 static G* gget(void);
258 static void mput(M*); // put/get on mhead
259 static M* mget(G*);
260 static void gfput(G*); // put/get on gfree
261 static G* gfget(void);
262 static void matchmg(void); // match m's to g's
263 static void readylocked(G*); // ready, but sched is locked
264 static void mnextg(M*, G*);
265 static void mcommoninit(M*);
266
267 void
268 setmcpumax(uint32 n)
269 {
270 uint32 v, w;
271
272 for(;;) {
273 v = runtime_sched.atomic;
274 w = v;
275 w &= ~(mcpuMask<<mcpumaxShift);
276 w |= n<<mcpumaxShift;
277 if(runtime_cas(&runtime_sched.atomic, v, w))
278 break;
279 }
280 }
281
282 // First function run by a new goroutine. This replaces gogocall.
283 static void
284 kickoff(void)
285 {
286 void (*fn)(void*);
287
288 fn = (void (*)(void*))(g->entry);
289 fn(g->param);
290 runtime_goexit();
291 }
292
293 // Switch context to a different goroutine. This is like longjmp.
294 static void runtime_gogo(G*) __attribute__ ((noinline));
295 static void
296 runtime_gogo(G* newg)
297 {
298 #ifdef USING_SPLIT_STACK
299 __splitstack_setcontext(&newg->stack_context[0]);
300 #endif
301 g = newg;
302 newg->fromgogo = true;
303 fixcontext(&newg->context);
304 setcontext(&newg->context);
305 runtime_throw("gogo setcontext returned");
306 }
307
308 // Save context and call fn passing g as a parameter. This is like
309 // setjmp. Because getcontext always returns 0, unlike setjmp, we use
310 // g->fromgogo as a code. It will be true if we got here via
311 // setcontext. g == nil the first time this is called in a new m.
312 static void runtime_mcall(void (*)(G*)) __attribute__ ((noinline));
313 static void
314 runtime_mcall(void (*pfn)(G*))
315 {
316 M *mp;
317 G *gp;
318 #ifndef USING_SPLIT_STACK
319 int i;
320 #endif
321
322 // Ensure that all registers are on the stack for the garbage
323 // collector.
324 __builtin_unwind_init();
325
326 mp = m;
327 gp = g;
328 if(gp == mp->g0)
329 runtime_throw("runtime: mcall called on m->g0 stack");
330
331 if(gp != nil) {
332
333 #ifdef USING_SPLIT_STACK
334 __splitstack_getcontext(&g->stack_context[0]);
335 #else
336 gp->gcnext_sp = &i;
337 #endif
338 gp->fromgogo = false;
339 getcontext(&gp->context);
340
341 // When we return from getcontext, we may be running
342 // in a new thread. That means that m and g may have
343 // changed. They are global variables so we will
344 // reload them, but the addresses of m and g may be
345 // cached in our local stack frame, and those
346 // addresses may be wrong. Call functions to reload
347 // the values for this thread.
348 mp = runtime_m();
349 gp = runtime_g();
350
351 if(gp->dotraceback != nil)
352 gtraceback(gp);
353 }
354 if (gp == nil || !gp->fromgogo) {
355 #ifdef USING_SPLIT_STACK
356 __splitstack_setcontext(&mp->g0->stack_context[0]);
357 #endif
358 mp->g0->entry = (byte*)pfn;
359 mp->g0->param = gp;
360
361 // It's OK to set g directly here because this case
362 // can not occur if we got here via a setcontext to
363 // the getcontext call just above.
364 g = mp->g0;
365
366 fixcontext(&mp->g0->context);
367 setcontext(&mp->g0->context);
368 runtime_throw("runtime: mcall function returned");
369 }
370 }
371
372 // Keep trace of scavenger's goroutine for deadlock detection.
373 static G *scvg;
374
375 // The bootstrap sequence is:
376 //
377 // call osinit
378 // call schedinit
379 // make & queue new G
380 // call runtime_mstart
381 //
382 // The new G calls runtime_main.
383 void
384 runtime_schedinit(void)
385 {
386 int32 n;
387 const byte *p;
388
389 m = &runtime_m0;
390 g = &runtime_g0;
391 m->g0 = g;
392 m->curg = g;
393 g->m = m;
394
395 initcontext();
396
397 m->nomemprof++;
398 runtime_mallocinit();
399 mcommoninit(m);
400
401 runtime_goargs();
402 runtime_goenvs();
403
404 // For debugging:
405 // Allocate internal symbol table representation now,
406 // so that we don't need to call malloc when we crash.
407 // runtime_findfunc(0);
408
409 runtime_gomaxprocs = 1;
410 p = runtime_getenv("GOMAXPROCS");
411 if(p != nil && (n = runtime_atoi(p)) != 0) {
412 if(n > maxgomaxprocs)
413 n = maxgomaxprocs;
414 runtime_gomaxprocs = n;
415 }
416 // wait for the main goroutine to start before taking
417 // GOMAXPROCS into account.
418 setmcpumax(1);
419 runtime_singleproc = runtime_gomaxprocs == 1;
420
421 canaddmcpu(); // mcpu++ to account for bootstrap m
422 m->helpgc = 1; // flag to tell schedule() to mcpu--
423 runtime_sched.grunning++;
424
425 // Can not enable GC until all roots are registered.
426 // mstats.enablegc = 1;
427 m->nomemprof--;
428 }
429
430 extern void main_init(void) __asm__ ("__go_init_main");
431 extern void main_main(void) __asm__ ("main.main");
432
433 // The main goroutine.
434 void
435 runtime_main(void)
436 {
437 // Lock the main goroutine onto this, the main OS thread,
438 // during initialization. Most programs won't care, but a few
439 // do require certain calls to be made by the main thread.
440 // Those can arrange for main.main to run in the main thread
441 // by calling runtime.LockOSThread during initialization
442 // to preserve the lock.
443 runtime_LockOSThread();
444 // From now on, newgoroutines may use non-main threads.
445 setmcpumax(runtime_gomaxprocs);
446 runtime_sched.init = true;
447 scvg = __go_go(runtime_MHeap_Scavenger, nil);
448 main_init();
449 runtime_sched.init = false;
450 if(!runtime_sched.lockmain)
451 runtime_UnlockOSThread();
452
453 // For gccgo we have to wait until after main is initialized
454 // to enable GC, because initializing main registers the GC
455 // roots.
456 mstats.enablegc = 1;
457
458 // The deadlock detection has false negatives.
459 // Let scvg start up, to eliminate the false negative
460 // for the trivial program func main() { select{} }.
461 runtime_gosched();
462
463 main_main();
464 runtime_exit(0);
465 for(;;)
466 *(int32*)0 = 0;
467 }
468
469 // Lock the scheduler.
470 static void
471 schedlock(void)
472 {
473 runtime_lock(&runtime_sched);
474 }
475
476 // Unlock the scheduler.
477 static void
478 schedunlock(void)
479 {
480 M *m;
481
482 m = mwakeup;
483 mwakeup = nil;
484 runtime_unlock(&runtime_sched);
485 if(m != nil)
486 runtime_notewakeup(&m->havenextg);
487 }
488
489 void
490 runtime_goexit(void)
491 {
492 g->status = Gmoribund;
493 runtime_gosched();
494 }
495
496 void
497 runtime_goroutineheader(G *g)
498 {
499 const char *status;
500
501 switch(g->status) {
502 case Gidle:
503 status = "idle";
504 break;
505 case Grunnable:
506 status = "runnable";
507 break;
508 case Grunning:
509 status = "running";
510 break;
511 case Gsyscall:
512 status = "syscall";
513 break;
514 case Gwaiting:
515 if(g->waitreason)
516 status = g->waitreason;
517 else
518 status = "waiting";
519 break;
520 case Gmoribund:
521 status = "moribund";
522 break;
523 default:
524 status = "???";
525 break;
526 }
527 runtime_printf("goroutine %d [%s]:\n", g->goid, status);
528 }
529
530 void
531 runtime_goroutinetrailer(G *g)
532 {
533 if(g != nil && g->gopc != 0 && g->goid != 1) {
534 struct __go_string fn;
535 struct __go_string file;
536 int line;
537
538 if(__go_file_line(g->gopc - 1, &fn, &file, &line)) {
539 runtime_printf("created by %s\n", fn.__data);
540 runtime_printf("\t%s:%d\n", file.__data, line);
541 }
542 }
543 }
544
545 void
546 runtime_tracebackothers(G * volatile me)
547 {
548 G * volatile g;
549
550 for(g = runtime_allg; g != nil; g = g->alllink) {
551 if(g == me || g->status == Gdead)
552 continue;
553 runtime_printf("\n");
554 runtime_goroutineheader(g);
555
556 // Our only mechanism for doing a stack trace is
557 // _Unwind_Backtrace. And that only works for the
558 // current thread, not for other random goroutines.
559 // So we need to switch context to the goroutine, get
560 // the backtrace, and then switch back.
561
562 // This means that if g is running or in a syscall, we
563 // can't reliably print a stack trace. FIXME.
564 if(g->status == Gsyscall || g->status == Grunning) {
565 runtime_printf("no stack trace available\n");
566 runtime_goroutinetrailer(g);
567 continue;
568 }
569
570 g->dotraceback = me;
571
572 #ifdef USING_SPLIT_STACK
573 __splitstack_getcontext(&me->stack_context[0]);
574 #endif
575 getcontext(&me->context);
576
577 if(g->dotraceback) {
578 runtime_gogo(g);
579 }
580 }
581 }
582
583 // Do a stack trace of gp, and then restore the context to
584 // gp->dotraceback.
585
586 static void
587 gtraceback(G* gp)
588 {
589 G* ret;
590
591 runtime_traceback(nil);
592 runtime_goroutinetrailer(gp);
593 ret = gp->dotraceback;
594 gp->dotraceback = nil;
595 runtime_gogo(ret);
596 }
597
598 // Mark this g as m's idle goroutine.
599 // This functionality might be used in environments where programs
600 // are limited to a single thread, to simulate a select-driven
601 // network server. It is not exposed via the standard runtime API.
602 void
603 runtime_idlegoroutine(void)
604 {
605 if(g->idlem != nil)
606 runtime_throw("g is already an idle goroutine");
607 g->idlem = m;
608 }
609
610 static void
611 mcommoninit(M *m)
612 {
613 m->id = runtime_sched.mcount++;
614 m->fastrand = 0x49f6428aUL + m->id + runtime_cputicks();
615
616 if(m->mcache == nil)
617 m->mcache = runtime_allocmcache();
618
619 runtime_callers(1, m->createstack, nelem(m->createstack));
620
621 // Add to runtime_allm so garbage collector doesn't free m
622 // when it is just in a register or thread-local storage.
623 m->alllink = runtime_allm;
624 // runtime_NumCgoCall() iterates over allm w/o schedlock,
625 // so we need to publish it safely.
626 runtime_atomicstorep(&runtime_allm, m);
627 }
628
629 // Try to increment mcpu. Report whether succeeded.
630 static bool
631 canaddmcpu(void)
632 {
633 uint32 v;
634
635 for(;;) {
636 v = runtime_sched.atomic;
637 if(atomic_mcpu(v) >= atomic_mcpumax(v))
638 return 0;
639 if(runtime_cas(&runtime_sched.atomic, v, v+(1<<mcpuShift)))
640 return 1;
641 }
642 }
643
644 // Put on `g' queue. Sched must be locked.
645 static void
646 gput(G *g)
647 {
648 M *m;
649
650 // If g is wired, hand it off directly.
651 if((m = g->lockedm) != nil && canaddmcpu()) {
652 mnextg(m, g);
653 return;
654 }
655
656 // If g is the idle goroutine for an m, hand it off.
657 if(g->idlem != nil) {
658 if(g->idlem->idleg != nil) {
659 runtime_printf("m%d idle out of sync: g%d g%d\n",
660 g->idlem->id,
661 g->idlem->idleg->goid, g->goid);
662 runtime_throw("runtime: double idle");
663 }
664 g->idlem->idleg = g;
665 return;
666 }
667
668 g->schedlink = nil;
669 if(runtime_sched.ghead == nil)
670 runtime_sched.ghead = g;
671 else
672 runtime_sched.gtail->schedlink = g;
673 runtime_sched.gtail = g;
674
675 // increment gwait.
676 // if it transitions to nonzero, set atomic gwaiting bit.
677 if(runtime_sched.gwait++ == 0)
678 runtime_xadd(&runtime_sched.atomic, 1<<gwaitingShift);
679 }
680
681 // Report whether gget would return something.
682 static bool
683 haveg(void)
684 {
685 return runtime_sched.ghead != nil || m->idleg != nil;
686 }
687
688 // Get from `g' queue. Sched must be locked.
689 static G*
690 gget(void)
691 {
692 G *g;
693
694 g = runtime_sched.ghead;
695 if(g){
696 runtime_sched.ghead = g->schedlink;
697 if(runtime_sched.ghead == nil)
698 runtime_sched.gtail = nil;
699 // decrement gwait.
700 // if it transitions to zero, clear atomic gwaiting bit.
701 if(--runtime_sched.gwait == 0)
702 runtime_xadd(&runtime_sched.atomic, -1<<gwaitingShift);
703 } else if(m->idleg != nil) {
704 g = m->idleg;
705 m->idleg = nil;
706 }
707 return g;
708 }
709
710 // Put on `m' list. Sched must be locked.
711 static void
712 mput(M *m)
713 {
714 m->schedlink = runtime_sched.mhead;
715 runtime_sched.mhead = m;
716 runtime_sched.mwait++;
717 }
718
719 // Get an `m' to run `g'. Sched must be locked.
720 static M*
721 mget(G *g)
722 {
723 M *m;
724
725 // if g has its own m, use it.
726 if(g && (m = g->lockedm) != nil)
727 return m;
728
729 // otherwise use general m pool.
730 if((m = runtime_sched.mhead) != nil){
731 runtime_sched.mhead = m->schedlink;
732 runtime_sched.mwait--;
733 }
734 return m;
735 }
736
737 // Mark g ready to run.
738 void
739 runtime_ready(G *g)
740 {
741 schedlock();
742 readylocked(g);
743 schedunlock();
744 }
745
746 // Mark g ready to run. Sched is already locked.
747 // G might be running already and about to stop.
748 // The sched lock protects g->status from changing underfoot.
749 static void
750 readylocked(G *g)
751 {
752 if(g->m){
753 // Running on another machine.
754 // Ready it when it stops.
755 g->readyonstop = 1;
756 return;
757 }
758
759 // Mark runnable.
760 if(g->status == Grunnable || g->status == Grunning) {
761 runtime_printf("goroutine %d has status %d\n", g->goid, g->status);
762 runtime_throw("bad g->status in ready");
763 }
764 g->status = Grunnable;
765
766 gput(g);
767 matchmg();
768 }
769
770 // Same as readylocked but a different symbol so that
771 // debuggers can set a breakpoint here and catch all
772 // new goroutines.
773 static void
774 newprocreadylocked(G *g)
775 {
776 readylocked(g);
777 }
778
779 // Pass g to m for running.
780 // Caller has already incremented mcpu.
781 static void
782 mnextg(M *m, G *g)
783 {
784 runtime_sched.grunning++;
785 m->nextg = g;
786 if(m->waitnextg) {
787 m->waitnextg = 0;
788 if(mwakeup != nil)
789 runtime_notewakeup(&mwakeup->havenextg);
790 mwakeup = m;
791 }
792 }
793
794 // Get the next goroutine that m should run.
795 // Sched must be locked on entry, is unlocked on exit.
796 // Makes sure that at most $GOMAXPROCS g's are
797 // running on cpus (not in system calls) at any given time.
798 static G*
799 nextgandunlock(void)
800 {
801 G *gp;
802 uint32 v;
803
804 top:
805 if(atomic_mcpu(runtime_sched.atomic) >= maxgomaxprocs)
806 runtime_throw("negative mcpu");
807
808 // If there is a g waiting as m->nextg, the mcpu++
809 // happened before it was passed to mnextg.
810 if(m->nextg != nil) {
811 gp = m->nextg;
812 m->nextg = nil;
813 schedunlock();
814 return gp;
815 }
816
817 if(m->lockedg != nil) {
818 // We can only run one g, and it's not available.
819 // Make sure some other cpu is running to handle
820 // the ordinary run queue.
821 if(runtime_sched.gwait != 0) {
822 matchmg();
823 // m->lockedg might have been on the queue.
824 if(m->nextg != nil) {
825 gp = m->nextg;
826 m->nextg = nil;
827 schedunlock();
828 return gp;
829 }
830 }
831 } else {
832 // Look for work on global queue.
833 while(haveg() && canaddmcpu()) {
834 gp = gget();
835 if(gp == nil)
836 runtime_throw("gget inconsistency");
837
838 if(gp->lockedm) {
839 mnextg(gp->lockedm, gp);
840 continue;
841 }
842 runtime_sched.grunning++;
843 schedunlock();
844 return gp;
845 }
846
847 // The while loop ended either because the g queue is empty
848 // or because we have maxed out our m procs running go
849 // code (mcpu >= mcpumax). We need to check that
850 // concurrent actions by entersyscall/exitsyscall cannot
851 // invalidate the decision to end the loop.
852 //
853 // We hold the sched lock, so no one else is manipulating the
854 // g queue or changing mcpumax. Entersyscall can decrement
855 // mcpu, but if does so when there is something on the g queue,
856 // the gwait bit will be set, so entersyscall will take the slow path
857 // and use the sched lock. So it cannot invalidate our decision.
858 //
859 // Wait on global m queue.
860 mput(m);
861 }
862
863 // Look for deadlock situation.
864 // There is a race with the scavenger that causes false negatives:
865 // if the scavenger is just starting, then we have
866 // scvg != nil && grunning == 0 && gwait == 0
867 // and we do not detect a deadlock. It is possible that we should
868 // add that case to the if statement here, but it is too close to Go 1
869 // to make such a subtle change. Instead, we work around the
870 // false negative in trivial programs by calling runtime.gosched
871 // from the main goroutine just before main.main.
872 // See runtime_main above.
873 //
874 // On a related note, it is also possible that the scvg == nil case is
875 // wrong and should include gwait, but that does not happen in
876 // standard Go programs, which all start the scavenger.
877 //
878 if((scvg == nil && runtime_sched.grunning == 0) ||
879 (scvg != nil && runtime_sched.grunning == 1 && runtime_sched.gwait == 0 &&
880 (scvg->status == Grunning || scvg->status == Gsyscall))) {
881 runtime_throw("all goroutines are asleep - deadlock!");
882 }
883
884 m->nextg = nil;
885 m->waitnextg = 1;
886 runtime_noteclear(&m->havenextg);
887
888 // Stoptheworld is waiting for all but its cpu to go to stop.
889 // Entersyscall might have decremented mcpu too, but if so
890 // it will see the waitstop and take the slow path.
891 // Exitsyscall never increments mcpu beyond mcpumax.
892 v = runtime_atomicload(&runtime_sched.atomic);
893 if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
894 // set waitstop = 0 (known to be 1)
895 runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
896 runtime_notewakeup(&runtime_sched.stopped);
897 }
898 schedunlock();
899
900 runtime_notesleep(&m->havenextg);
901 if(m->helpgc) {
902 runtime_gchelper();
903 m->helpgc = 0;
904 runtime_lock(&runtime_sched);
905 goto top;
906 }
907 if((gp = m->nextg) == nil)
908 runtime_throw("bad m->nextg in nextgoroutine");
909 m->nextg = nil;
910 return gp;
911 }
912
913 int32
914 runtime_helpgc(bool *extra)
915 {
916 M *mp;
917 int32 n, max;
918
919 // Figure out how many CPUs to use.
920 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
921 max = runtime_gomaxprocs;
922 if(max > runtime_ncpu)
923 max = runtime_ncpu > 0 ? runtime_ncpu : 1;
924 if(max > MaxGcproc)
925 max = MaxGcproc;
926
927 // We're going to use one CPU no matter what.
928 // Figure out the max number of additional CPUs.
929 max--;
930
931 runtime_lock(&runtime_sched);
932 n = 0;
933 while(n < max && (mp = mget(nil)) != nil) {
934 n++;
935 mp->helpgc = 1;
936 mp->waitnextg = 0;
937 runtime_notewakeup(&mp->havenextg);
938 }
939 runtime_unlock(&runtime_sched);
940 if(extra)
941 *extra = n != max;
942 return n;
943 }
944
945 void
946 runtime_stoptheworld(void)
947 {
948 uint32 v;
949
950 schedlock();
951 runtime_gcwaiting = 1;
952
953 setmcpumax(1);
954
955 // while mcpu > 1
956 for(;;) {
957 v = runtime_sched.atomic;
958 if(atomic_mcpu(v) <= 1)
959 break;
960
961 // It would be unsafe for multiple threads to be using
962 // the stopped note at once, but there is only
963 // ever one thread doing garbage collection.
964 runtime_noteclear(&runtime_sched.stopped);
965 if(atomic_waitstop(v))
966 runtime_throw("invalid waitstop");
967
968 // atomic { waitstop = 1 }, predicated on mcpu <= 1 check above
969 // still being true.
970 if(!runtime_cas(&runtime_sched.atomic, v, v+(1<<waitstopShift)))
971 continue;
972
973 schedunlock();
974 runtime_notesleep(&runtime_sched.stopped);
975 schedlock();
976 }
977 runtime_singleproc = runtime_gomaxprocs == 1;
978 schedunlock();
979 }
980
981 void
982 runtime_starttheworld(bool extra)
983 {
984 M *m;
985
986 schedlock();
987 runtime_gcwaiting = 0;
988 setmcpumax(runtime_gomaxprocs);
989 matchmg();
990 if(extra && canaddmcpu()) {
991 // Start a new m that will (we hope) be idle
992 // and so available to help when the next
993 // garbage collection happens.
994 // canaddmcpu above did mcpu++
995 // (necessary, because m will be doing various
996 // initialization work so is definitely running),
997 // but m is not running a specific goroutine,
998 // so set the helpgc flag as a signal to m's
999 // first schedule(nil) to mcpu-- and grunning--.
1000 m = runtime_newm();
1001 m->helpgc = 1;
1002 runtime_sched.grunning++;
1003 }
1004 schedunlock();
1005 }
1006
1007 // Called to start an M.
1008 void*
1009 runtime_mstart(void* mp)
1010 {
1011 m = (M*)mp;
1012 g = m->g0;
1013
1014 initcontext();
1015
1016 g->entry = nil;
1017 g->param = nil;
1018
1019 // Record top of stack for use by mcall.
1020 // Once we call schedule we're never coming back,
1021 // so other calls can reuse this stack space.
1022 #ifdef USING_SPLIT_STACK
1023 __splitstack_getcontext(&g->stack_context[0]);
1024 #else
1025 g->gcinitial_sp = &mp;
1026 // Setting gcstack_size to 0 is a marker meaning that gcinitial_sp
1027 // is the top of the stack, not the bottom.
1028 g->gcstack_size = 0;
1029 g->gcnext_sp = &mp;
1030 #endif
1031 getcontext(&g->context);
1032
1033 if(g->entry != nil) {
1034 // Got here from mcall.
1035 void (*pfn)(G*) = (void (*)(G*))g->entry;
1036 G* gp = (G*)g->param;
1037 pfn(gp);
1038 *(int*)0x21 = 0x21;
1039 }
1040 runtime_minit();
1041
1042 #ifdef USING_SPLIT_STACK
1043 {
1044 int dont_block_signals = 0;
1045 __splitstack_block_signals(&dont_block_signals, nil);
1046 }
1047 #endif
1048
1049 // Install signal handlers; after minit so that minit can
1050 // prepare the thread to be able to handle the signals.
1051 if(m == &runtime_m0)
1052 runtime_initsig();
1053
1054 schedule(nil);
1055 return nil;
1056 }
1057
1058 typedef struct CgoThreadStart CgoThreadStart;
1059 struct CgoThreadStart
1060 {
1061 M *m;
1062 G *g;
1063 void (*fn)(void);
1064 };
1065
1066 // Kick off new m's as needed (up to mcpumax).
1067 // Sched is locked.
1068 static void
1069 matchmg(void)
1070 {
1071 G *gp;
1072 M *mp;
1073
1074 if(m->mallocing || m->gcing)
1075 return;
1076
1077 while(haveg() && canaddmcpu()) {
1078 gp = gget();
1079 if(gp == nil)
1080 runtime_throw("gget inconsistency");
1081
1082 // Find the m that will run gp.
1083 if((mp = mget(gp)) == nil)
1084 mp = runtime_newm();
1085 mnextg(mp, gp);
1086 }
1087 }
1088
1089 // Create a new m. It will start off with a call to runtime_mstart.
1090 M*
1091 runtime_newm(void)
1092 {
1093 M *m;
1094 pthread_attr_t attr;
1095 pthread_t tid;
1096
1097 m = runtime_malloc(sizeof(M));
1098 mcommoninit(m);
1099 m->g0 = runtime_malg(-1, nil, nil);
1100
1101 if(pthread_attr_init(&attr) != 0)
1102 runtime_throw("pthread_attr_init");
1103 if(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
1104 runtime_throw("pthread_attr_setdetachstate");
1105
1106 #ifndef PTHREAD_STACK_MIN
1107 #define PTHREAD_STACK_MIN 8192
1108 #endif
1109 if(pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN) != 0)
1110 runtime_throw("pthread_attr_setstacksize");
1111
1112 if(pthread_create(&tid, &attr, runtime_mstart, m) != 0)
1113 runtime_throw("pthread_create");
1114
1115 return m;
1116 }
1117
1118 // One round of scheduler: find a goroutine and run it.
1119 // The argument is the goroutine that was running before
1120 // schedule was called, or nil if this is the first call.
1121 // Never returns.
1122 static void
1123 schedule(G *gp)
1124 {
1125 int32 hz;
1126 uint32 v;
1127
1128 schedlock();
1129 if(gp != nil) {
1130 // Just finished running gp.
1131 gp->m = nil;
1132 runtime_sched.grunning--;
1133
1134 // atomic { mcpu-- }
1135 v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
1136 if(atomic_mcpu(v) > maxgomaxprocs)
1137 runtime_throw("negative mcpu in scheduler");
1138
1139 switch(gp->status){
1140 case Grunnable:
1141 case Gdead:
1142 // Shouldn't have been running!
1143 runtime_throw("bad gp->status in sched");
1144 case Grunning:
1145 gp->status = Grunnable;
1146 gput(gp);
1147 break;
1148 case Gmoribund:
1149 gp->status = Gdead;
1150 if(gp->lockedm) {
1151 gp->lockedm = nil;
1152 m->lockedg = nil;
1153 }
1154 gp->idlem = nil;
1155 runtime_memclr(&gp->context, sizeof gp->context);
1156 gfput(gp);
1157 if(--runtime_sched.gcount == 0)
1158 runtime_exit(0);
1159 break;
1160 }
1161 if(gp->readyonstop){
1162 gp->readyonstop = 0;
1163 readylocked(gp);
1164 }
1165 } else if(m->helpgc) {
1166 // Bootstrap m or new m started by starttheworld.
1167 // atomic { mcpu-- }
1168 v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
1169 if(atomic_mcpu(v) > maxgomaxprocs)
1170 runtime_throw("negative mcpu in scheduler");
1171 // Compensate for increment in starttheworld().
1172 runtime_sched.grunning--;
1173 m->helpgc = 0;
1174 } else if(m->nextg != nil) {
1175 // New m started by matchmg.
1176 } else {
1177 runtime_throw("invalid m state in scheduler");
1178 }
1179
1180 // Find (or wait for) g to run. Unlocks runtime_sched.
1181 gp = nextgandunlock();
1182 gp->readyonstop = 0;
1183 gp->status = Grunning;
1184 m->curg = gp;
1185 gp->m = m;
1186
1187 // Check whether the profiler needs to be turned on or off.
1188 hz = runtime_sched.profilehz;
1189 if(m->profilehz != hz)
1190 runtime_resetcpuprofiler(hz);
1191
1192 runtime_gogo(gp);
1193 }
1194
1195 // Enter scheduler. If g->status is Grunning,
1196 // re-queues g and runs everyone else who is waiting
1197 // before running g again. If g->status is Gmoribund,
1198 // kills off g.
1199 void
1200 runtime_gosched(void)
1201 {
1202 if(m->locks != 0)
1203 runtime_throw("gosched holding locks");
1204 if(g == m->g0)
1205 runtime_throw("gosched of g0");
1206 runtime_mcall(schedule);
1207 }
1208
1209 // The goroutine g is about to enter a system call.
1210 // Record that it's not using the cpu anymore.
1211 // This is called only from the go syscall library and cgocall,
1212 // not from the low-level system calls used by the runtime.
1213 //
1214 // Entersyscall cannot split the stack: the runtime_gosave must
1215 // make g->sched refer to the caller's stack segment, because
1216 // entersyscall is going to return immediately after.
1217 // It's okay to call matchmg and notewakeup even after
1218 // decrementing mcpu, because we haven't released the
1219 // sched lock yet, so the garbage collector cannot be running.
1220
1221 void runtime_entersyscall(void) __attribute__ ((no_split_stack));
1222
1223 void
1224 runtime_entersyscall(void)
1225 {
1226 uint32 v;
1227
1228 if(m->profilehz > 0)
1229 runtime_setprof(false);
1230
1231 // Leave SP around for gc and traceback.
1232 #ifdef USING_SPLIT_STACK
1233 g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size,
1234 &g->gcnext_segment, &g->gcnext_sp,
1235 &g->gcinitial_sp);
1236 #else
1237 g->gcnext_sp = (byte *) &v;
1238 #endif
1239
1240 // Save the registers in the g structure so that any pointers
1241 // held in registers will be seen by the garbage collector.
1242 getcontext(&g->gcregs);
1243
1244 g->status = Gsyscall;
1245
1246 // Fast path.
1247 // The slow path inside the schedlock/schedunlock will get
1248 // through without stopping if it does:
1249 // mcpu--
1250 // gwait not true
1251 // waitstop && mcpu <= mcpumax not true
1252 // If we can do the same with a single atomic add,
1253 // then we can skip the locks.
1254 v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift);
1255 if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v)))
1256 return;
1257
1258 schedlock();
1259 v = runtime_atomicload(&runtime_sched.atomic);
1260 if(atomic_gwaiting(v)) {
1261 matchmg();
1262 v = runtime_atomicload(&runtime_sched.atomic);
1263 }
1264 if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
1265 runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift);
1266 runtime_notewakeup(&runtime_sched.stopped);
1267 }
1268
1269 schedunlock();
1270 }
1271
1272 // The goroutine g exited its system call.
1273 // Arrange for it to run on a cpu again.
1274 // This is called only from the go syscall library, not
1275 // from the low-level system calls used by the runtime.
1276 void
1277 runtime_exitsyscall(void)
1278 {
1279 G *gp;
1280 uint32 v;
1281
1282 // Fast path.
1283 // If we can do the mcpu++ bookkeeping and
1284 // find that we still have mcpu <= mcpumax, then we can
1285 // start executing Go code immediately, without having to
1286 // schedlock/schedunlock.
1287 // Also do fast return if any locks are held, so that
1288 // panic code can use syscalls to open a file.
1289 gp = g;
1290 v = runtime_xadd(&runtime_sched.atomic, (1<<mcpuShift));
1291 if((m->profilehz == runtime_sched.profilehz && atomic_mcpu(v) <= atomic_mcpumax(v)) || m->locks > 0) {
1292 // There's a cpu for us, so we can run.
1293 gp->status = Grunning;
1294 // Garbage collector isn't running (since we are),
1295 // so okay to clear gcstack.
1296 #ifdef USING_SPLIT_STACK
1297 gp->gcstack = nil;
1298 #endif
1299 gp->gcnext_sp = nil;
1300 runtime_memclr(&gp->gcregs, sizeof gp->gcregs);
1301
1302 if(m->profilehz > 0)
1303 runtime_setprof(true);
1304 return;
1305 }
1306
1307 // Tell scheduler to put g back on the run queue:
1308 // mostly equivalent to g->status = Grunning,
1309 // but keeps the garbage collector from thinking
1310 // that g is running right now, which it's not.
1311 gp->readyonstop = 1;
1312
1313 // All the cpus are taken.
1314 // The scheduler will ready g and put this m to sleep.
1315 // When the scheduler takes g away from m,
1316 // it will undo the runtime_sched.mcpu++ above.
1317 runtime_gosched();
1318
1319 // Gosched returned, so we're allowed to run now.
1320 // Delete the gcstack information that we left for
1321 // the garbage collector during the system call.
1322 // Must wait until now because until gosched returns
1323 // we don't know for sure that the garbage collector
1324 // is not running.
1325 #ifdef USING_SPLIT_STACK
1326 gp->gcstack = nil;
1327 #endif
1328 gp->gcnext_sp = nil;
1329 runtime_memclr(&gp->gcregs, sizeof gp->gcregs);
1330 }
1331
1332 // Allocate a new g, with a stack big enough for stacksize bytes.
1333 G*
1334 runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize)
1335 {
1336 G *newg;
1337
1338 newg = runtime_malloc(sizeof(G));
1339 if(stacksize >= 0) {
1340 #if USING_SPLIT_STACK
1341 int dont_block_signals = 0;
1342
1343 *ret_stack = __splitstack_makecontext(stacksize,
1344 &newg->stack_context[0],
1345 ret_stacksize);
1346 __splitstack_block_signals_context(&newg->stack_context[0],
1347 &dont_block_signals, nil);
1348 #else
1349 *ret_stack = runtime_mallocgc(stacksize, FlagNoProfiling|FlagNoGC, 0, 0);
1350 *ret_stacksize = stacksize;
1351 newg->gcinitial_sp = *ret_stack;
1352 newg->gcstack_size = stacksize;
1353 runtime_xadd(&runtime_stacks_sys, stacksize);
1354 #endif
1355 }
1356 return newg;
1357 }
1358
1359 /* For runtime package testing. */
1360
1361 void runtime_testing_entersyscall(void)
1362 __asm__("runtime.entersyscall");
1363
1364 void
1365 runtime_testing_entersyscall()
1366 {
1367 runtime_entersyscall();
1368 }
1369
1370 void runtime_testing_exitsyscall(void)
1371 __asm__("runtime.exitsyscall");
1372
1373 void
1374 runtime_testing_exitsyscall()
1375 {
1376 runtime_exitsyscall();
1377 }
1378
1379 G*
1380 __go_go(void (*fn)(void*), void* arg)
1381 {
1382 byte *sp;
1383 size_t spsize;
1384 G *newg;
1385
1386 schedlock();
1387
1388 if((newg = gfget()) != nil){
1389 #ifdef USING_SPLIT_STACK
1390 int dont_block_signals = 0;
1391
1392 sp = __splitstack_resetcontext(&newg->stack_context[0],
1393 &spsize);
1394 __splitstack_block_signals_context(&newg->stack_context[0],
1395 &dont_block_signals, nil);
1396 #else
1397 sp = newg->gcinitial_sp;
1398 spsize = newg->gcstack_size;
1399 if(spsize == 0)
1400 runtime_throw("bad spsize in __go_go");
1401 newg->gcnext_sp = sp;
1402 #endif
1403 } else {
1404 newg = runtime_malg(StackMin, &sp, &spsize);
1405 if(runtime_lastg == nil)
1406 runtime_allg = newg;
1407 else
1408 runtime_lastg->alllink = newg;
1409 runtime_lastg = newg;
1410 }
1411 newg->status = Gwaiting;
1412 newg->waitreason = "new goroutine";
1413
1414 newg->entry = (byte*)fn;
1415 newg->param = arg;
1416 newg->gopc = (uintptr)__builtin_return_address(0);
1417
1418 runtime_sched.gcount++;
1419 runtime_sched.goidgen++;
1420 newg->goid = runtime_sched.goidgen;
1421
1422 if(sp == nil)
1423 runtime_throw("nil g->stack0");
1424
1425 {
1426 // Avoid warnings about variables clobbered by
1427 // longjmp.
1428 byte * volatile vsp = sp;
1429 size_t volatile vspsize = spsize;
1430 G * volatile vnewg = newg;
1431
1432 getcontext(&vnewg->context);
1433 vnewg->context.uc_stack.ss_sp = vsp;
1434 #ifdef MAKECONTEXT_STACK_TOP
1435 vnewg->context.uc_stack.ss_sp += vspsize;
1436 #endif
1437 vnewg->context.uc_stack.ss_size = vspsize;
1438 makecontext(&vnewg->context, kickoff, 0);
1439
1440 newprocreadylocked(vnewg);
1441 schedunlock();
1442
1443 return vnewg;
1444 }
1445 }
1446
1447 // Put on gfree list. Sched must be locked.
1448 static void
1449 gfput(G *g)
1450 {
1451 g->schedlink = runtime_sched.gfree;
1452 runtime_sched.gfree = g;
1453 }
1454
1455 // Get from gfree list. Sched must be locked.
1456 static G*
1457 gfget(void)
1458 {
1459 G *g;
1460
1461 g = runtime_sched.gfree;
1462 if(g)
1463 runtime_sched.gfree = g->schedlink;
1464 return g;
1465 }
1466
1467 // Run all deferred functions for the current goroutine.
1468 static void
1469 rundefer(void)
1470 {
1471 Defer *d;
1472
1473 while((d = g->defer) != nil) {
1474 void (*pfn)(void*);
1475
1476 pfn = d->__pfn;
1477 d->__pfn = nil;
1478 if (pfn != nil)
1479 (*pfn)(d->__arg);
1480 g->defer = d->__next;
1481 runtime_free(d);
1482 }
1483 }
1484
1485 void runtime_Goexit (void) asm ("runtime.Goexit");
1486
1487 void
1488 runtime_Goexit(void)
1489 {
1490 rundefer();
1491 runtime_goexit();
1492 }
1493
1494 void runtime_Gosched (void) asm ("runtime.Gosched");
1495
1496 void
1497 runtime_Gosched(void)
1498 {
1499 runtime_gosched();
1500 }
1501
1502 // Implementation of runtime.GOMAXPROCS.
1503 // delete when scheduler is stronger
1504 int32
1505 runtime_gomaxprocsfunc(int32 n)
1506 {
1507 int32 ret;
1508 uint32 v;
1509
1510 schedlock();
1511 ret = runtime_gomaxprocs;
1512 if(n <= 0)
1513 n = ret;
1514 if(n > maxgomaxprocs)
1515 n = maxgomaxprocs;
1516 runtime_gomaxprocs = n;
1517 if(runtime_gomaxprocs > 1)
1518 runtime_singleproc = false;
1519 if(runtime_gcwaiting != 0) {
1520 if(atomic_mcpumax(runtime_sched.atomic) != 1)
1521 runtime_throw("invalid mcpumax during gc");
1522 schedunlock();
1523 return ret;
1524 }
1525
1526 setmcpumax(n);
1527
1528 // If there are now fewer allowed procs
1529 // than procs running, stop.
1530 v = runtime_atomicload(&runtime_sched.atomic);
1531 if((int32)atomic_mcpu(v) > n) {
1532 schedunlock();
1533 runtime_gosched();
1534 return ret;
1535 }
1536 // handle more procs
1537 matchmg();
1538 schedunlock();
1539 return ret;
1540 }
1541
1542 void
1543 runtime_LockOSThread(void)
1544 {
1545 if(m == &runtime_m0 && runtime_sched.init) {
1546 runtime_sched.lockmain = true;
1547 return;
1548 }
1549 m->lockedg = g;
1550 g->lockedm = m;
1551 }
1552
1553 void
1554 runtime_UnlockOSThread(void)
1555 {
1556 if(m == &runtime_m0 && runtime_sched.init) {
1557 runtime_sched.lockmain = false;
1558 return;
1559 }
1560 m->lockedg = nil;
1561 g->lockedm = nil;
1562 }
1563
1564 bool
1565 runtime_lockedOSThread(void)
1566 {
1567 return g->lockedm != nil && m->lockedg != nil;
1568 }
1569
1570 // for testing of callbacks
1571
1572 _Bool runtime_golockedOSThread(void)
1573 asm("runtime.golockedOSThread");
1574
1575 _Bool
1576 runtime_golockedOSThread(void)
1577 {
1578 return runtime_lockedOSThread();
1579 }
1580
1581 // for testing of wire, unwire
1582 uint32
1583 runtime_mid()
1584 {
1585 return m->id;
1586 }
1587
1588 int32 runtime_NumGoroutine (void)
1589 __asm__ ("runtime.NumGoroutine");
1590
1591 int32
1592 runtime_NumGoroutine()
1593 {
1594 return runtime_sched.gcount;
1595 }
1596
1597 int32
1598 runtime_gcount(void)
1599 {
1600 return runtime_sched.gcount;
1601 }
1602
1603 int32
1604 runtime_mcount(void)
1605 {
1606 return runtime_sched.mcount;
1607 }
1608
1609 static struct {
1610 Lock;
1611 void (*fn)(uintptr*, int32);
1612 int32 hz;
1613 uintptr pcbuf[100];
1614 } prof;
1615
1616 // Called if we receive a SIGPROF signal.
1617 void
1618 runtime_sigprof(uint8 *pc __attribute__ ((unused)),
1619 uint8 *sp __attribute__ ((unused)),
1620 uint8 *lr __attribute__ ((unused)),
1621 G *gp __attribute__ ((unused)))
1622 {
1623 int32 n;
1624
1625 if(prof.fn == nil || prof.hz == 0)
1626 return;
1627
1628 runtime_lock(&prof);
1629 if(prof.fn == nil) {
1630 runtime_unlock(&prof);
1631 return;
1632 }
1633 n = runtime_callers(0, prof.pcbuf, nelem(prof.pcbuf));
1634 if(n > 0)
1635 prof.fn(prof.pcbuf, n);
1636 runtime_unlock(&prof);
1637 }
1638
1639 // Arrange to call fn with a traceback hz times a second.
1640 void
1641 runtime_setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
1642 {
1643 // Force sane arguments.
1644 if(hz < 0)
1645 hz = 0;
1646 if(hz == 0)
1647 fn = nil;
1648 if(fn == nil)
1649 hz = 0;
1650
1651 // Stop profiler on this cpu so that it is safe to lock prof.
1652 // if a profiling signal came in while we had prof locked,
1653 // it would deadlock.
1654 runtime_resetcpuprofiler(0);
1655
1656 runtime_lock(&prof);
1657 prof.fn = fn;
1658 prof.hz = hz;
1659 runtime_unlock(&prof);
1660 runtime_lock(&runtime_sched);
1661 runtime_sched.profilehz = hz;
1662 runtime_unlock(&runtime_sched);
1663
1664 if(hz != 0)
1665 runtime_resetcpuprofiler(hz);
1666 }