+ runtime_newosproc(mp);
+}
+
+// Stops execution of the current m until new work is available.
+// Returns with acquired P.
+static void
+stopm(void)
+{
+ if(m->locks)
+ runtime_throw("stopm holding locks");
+ if(m->p)
+ runtime_throw("stopm holding p");
+ if(m->spinning) {
+ m->spinning = false;
+ runtime_xadd(&runtime_sched.nmspinning, -1);
+ }
+
+retry:
+ runtime_lock(&runtime_sched);
+ mput(m);
+ runtime_unlock(&runtime_sched);
+ runtime_notesleep(&m->park);
+ runtime_noteclear(&m->park);
+ if(m->helpgc) {
+ runtime_gchelper();
+ m->helpgc = 0;
+ m->mcache = nil;
+ goto retry;
+ }
+ acquirep(m->nextp);
+ m->nextp = nil;
+}
+
+static void
+mspinning(void)
+{
+ m->spinning = true;
+}
+
+// Schedules some M to run the p (creates an M if necessary).
+// If p==nil, tries to get an idle P, if no idle P's does nothing.
+static void
+startm(P *p, bool spinning)
+{
+ M *mp;
+ void (*fn)(void);
+
+ runtime_lock(&runtime_sched);
+ if(p == nil) {
+ p = pidleget();
+ if(p == nil) {
+ runtime_unlock(&runtime_sched);
+ if(spinning)
+ runtime_xadd(&runtime_sched.nmspinning, -1);
+ return;
+ }
+ }
+ mp = mget();
+ runtime_unlock(&runtime_sched);
+ if(mp == nil) {
+ fn = nil;
+ if(spinning)
+ fn = mspinning;
+ newm(fn, p);
+ return;
+ }
+ if(mp->spinning)
+ runtime_throw("startm: m is spinning");
+ if(mp->nextp)
+ runtime_throw("startm: m has p");
+ mp->spinning = spinning;
+ mp->nextp = p;
+ runtime_notewakeup(&mp->park);
+}
+
+// Hands off P from syscall or locked M.
+static void
+handoffp(P *p)
+{
+ // if it has local work, start it straight away
+ if(p->runqhead != p->runqtail || runtime_sched.runqsize) {
+ startm(p, false);
+ return;
+ }
+ // no local work, check that there are no spinning/idle M's,
+ // otherwise our help is not required
+ if(runtime_atomicload(&runtime_sched.nmspinning) + runtime_atomicload(&runtime_sched.npidle) == 0 && // TODO: fast atomic
+ runtime_cas(&runtime_sched.nmspinning, 0, 1)) {
+ startm(p, true);
+ return;
+ }
+ runtime_lock(&runtime_sched);
+ if(runtime_sched.gcwaiting) {
+ p->status = Pgcstop;
+ if(--runtime_sched.stopwait == 0)
+ runtime_notewakeup(&runtime_sched.stopnote);
+ runtime_unlock(&runtime_sched);
+ return;
+ }
+ if(runtime_sched.runqsize) {
+ runtime_unlock(&runtime_sched);
+ startm(p, false);
+ return;
+ }
+ // If this is the last running P and nobody is polling network,
+ // need to wakeup another M to poll network.
+ if(runtime_sched.npidle == (uint32)runtime_gomaxprocs-1 && runtime_atomicload64(&runtime_sched.lastpoll) != 0) {
+ runtime_unlock(&runtime_sched);
+ startm(p, false);
+ return;
+ }
+ pidleput(p);
+ runtime_unlock(&runtime_sched);
+}
+
+// Tries to add one more P to execute G's.
+// Called when a G is made runnable (newproc, ready).
+static void
+wakep(void)
+{
+ // be conservative about spinning threads
+ if(!runtime_cas(&runtime_sched.nmspinning, 0, 1))
+ return;
+ startm(nil, true);
+}
+
+// Stops execution of the current m that is locked to a g until the g is runnable again.
+// Returns with acquired P.
+static void
+stoplockedm(void)
+{
+ P *p;
+
+ if(m->lockedg == nil || m->lockedg->lockedm != m)
+ runtime_throw("stoplockedm: inconsistent locking");
+ if(m->p) {
+ // Schedule another M to run this p.
+ p = releasep();
+ handoffp(p);
+ }
+ incidlelocked(1);
+ // Wait until another thread schedules lockedg again.
+ runtime_notesleep(&m->park);
+ runtime_noteclear(&m->park);
+ if(m->lockedg->status != Grunnable)
+ runtime_throw("stoplockedm: not runnable");
+ acquirep(m->nextp);
+ m->nextp = nil;
+}
+
+// Schedules the locked m to run the locked gp.
+static void
+startlockedm(G *gp)
+{
+ M *mp;
+ P *p;
+
+ mp = gp->lockedm;
+ if(mp == m)
+ runtime_throw("startlockedm: locked to me");
+ if(mp->nextp)
+ runtime_throw("startlockedm: m has p");
+ // directly handoff current P to the locked m
+ incidlelocked(-1);
+ p = releasep();
+ mp->nextp = p;
+ runtime_notewakeup(&mp->park);
+ stopm();
+}
+
+// Stops the current m for stoptheworld.
+// Returns when the world is restarted.
+static void
+gcstopm(void)
+{
+ P *p;
+
+ if(!runtime_sched.gcwaiting)
+ runtime_throw("gcstopm: not waiting for gc");
+ if(m->spinning) {
+ m->spinning = false;
+ runtime_xadd(&runtime_sched.nmspinning, -1);
+ }
+ p = releasep();
+ runtime_lock(&runtime_sched);
+ p->status = Pgcstop;
+ if(--runtime_sched.stopwait == 0)
+ runtime_notewakeup(&runtime_sched.stopnote);
+ runtime_unlock(&runtime_sched);
+ stopm();
+}
+
+// Schedules gp to run on the current M.
+// Never returns.
+static void
+execute(G *gp)
+{
+ int32 hz;
+
+ if(gp->status != Grunnable) {
+ runtime_printf("execute: bad g status %d\n", gp->status);
+ runtime_throw("execute: bad g status");
+ }
+ gp->status = Grunning;
+ gp->waitsince = 0;
+ m->p->schedtick++;
+ m->curg = gp;
+ gp->m = m;
+
+ // Check whether the profiler needs to be turned on or off.
+ hz = runtime_sched.profilehz;
+ if(m->profilehz != hz)
+ runtime_resetcpuprofiler(hz);
+
+ runtime_gogo(gp);
+}
+
+// Finds a runnable goroutine to execute.
+// Tries to steal from other P's, get g from global queue, poll network.
+static G*
+findrunnable(void)
+{
+ G *gp;
+ P *p;
+ int32 i;
+
+top:
+ if(runtime_sched.gcwaiting) {
+ gcstopm();
+ goto top;
+ }
+ if(runtime_fingwait && runtime_fingwake && (gp = runtime_wakefing()) != nil)
+ runtime_ready(gp);
+ // local runq
+ gp = runqget(m->p);
+ if(gp)
+ return gp;
+ // global runq
+ if(runtime_sched.runqsize) {
+ runtime_lock(&runtime_sched);
+ gp = globrunqget(m->p, 0);
+ runtime_unlock(&runtime_sched);
+ if(gp)
+ return gp;
+ }
+ // poll network
+ gp = runtime_netpoll(false); // non-blocking
+ if(gp) {
+ injectglist(gp->schedlink);
+ gp->status = Grunnable;
+ return gp;
+ }
+ // If number of spinning M's >= number of busy P's, block.
+ // This is necessary to prevent excessive CPU consumption
+ // when GOMAXPROCS>>1 but the program parallelism is low.
+ if(!m->spinning && 2 * runtime_atomicload(&runtime_sched.nmspinning) >= runtime_gomaxprocs - runtime_atomicload(&runtime_sched.npidle)) // TODO: fast atomic
+ goto stop;
+ if(!m->spinning) {
+ m->spinning = true;
+ runtime_xadd(&runtime_sched.nmspinning, 1);
+ }
+ // random steal from other P's
+ for(i = 0; i < 2*runtime_gomaxprocs; i++) {
+ if(runtime_sched.gcwaiting)
+ goto top;
+ p = runtime_allp[runtime_fastrand1()%runtime_gomaxprocs];
+ if(p == m->p)
+ gp = runqget(p);
+ else
+ gp = runqsteal(m->p, p);
+ if(gp)
+ return gp;
+ }
+stop:
+ // return P and block
+ runtime_lock(&runtime_sched);
+ if(runtime_sched.gcwaiting) {
+ runtime_unlock(&runtime_sched);
+ goto top;
+ }
+ if(runtime_sched.runqsize) {
+ gp = globrunqget(m->p, 0);
+ runtime_unlock(&runtime_sched);
+ return gp;
+ }
+ p = releasep();
+ pidleput(p);
+ runtime_unlock(&runtime_sched);
+ if(m->spinning) {
+ m->spinning = false;
+ runtime_xadd(&runtime_sched.nmspinning, -1);
+ }
+ // check all runqueues once again
+ for(i = 0; i < runtime_gomaxprocs; i++) {
+ p = runtime_allp[i];
+ if(p && p->runqhead != p->runqtail) {
+ runtime_lock(&runtime_sched);
+ p = pidleget();
+ runtime_unlock(&runtime_sched);
+ if(p) {
+ acquirep(p);
+ goto top;
+ }
+ break;
+ }
+ }
+ // poll network
+ if(runtime_xchg64(&runtime_sched.lastpoll, 0) != 0) {
+ if(m->p)
+ runtime_throw("findrunnable: netpoll with p");
+ if(m->spinning)
+ runtime_throw("findrunnable: netpoll with spinning");
+ gp = runtime_netpoll(true); // block until new work is available
+ runtime_atomicstore64(&runtime_sched.lastpoll, runtime_nanotime());
+ if(gp) {
+ runtime_lock(&runtime_sched);
+ p = pidleget();
+ runtime_unlock(&runtime_sched);
+ if(p) {
+ acquirep(p);
+ injectglist(gp->schedlink);
+ gp->status = Grunnable;
+ return gp;
+ }
+ injectglist(gp);
+ }
+ }
+ stopm();
+ goto top;
+}
+
+static void
+resetspinning(void)
+{
+ int32 nmspinning;
+
+ if(m->spinning) {
+ m->spinning = false;
+ nmspinning = runtime_xadd(&runtime_sched.nmspinning, -1);
+ if(nmspinning < 0)
+ runtime_throw("findrunnable: negative nmspinning");
+ } else
+ nmspinning = runtime_atomicload(&runtime_sched.nmspinning);
+
+ // M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
+ // so see if we need to wakeup another P here.
+ if (nmspinning == 0 && runtime_atomicload(&runtime_sched.npidle) > 0)
+ wakep();
+}
+
+// Injects the list of runnable G's into the scheduler.
+// Can run concurrently with GC.
+static void
+injectglist(G *glist)
+{
+ int32 n;
+ G *gp;
+
+ if(glist == nil)
+ return;
+ runtime_lock(&runtime_sched);
+ for(n = 0; glist; n++) {
+ gp = glist;
+ glist = gp->schedlink;
+ gp->status = Grunnable;
+ globrunqput(gp);
+ }
+ runtime_unlock(&runtime_sched);
+
+ for(; n && runtime_sched.npidle; n--)
+ startm(nil, false);
+}
+
+// One round of scheduler: find a runnable goroutine and execute it.
+// Never returns.
+static void
+schedule(void)
+{
+ G *gp;
+ uint32 tick;
+
+ if(m->locks)
+ runtime_throw("schedule: holding locks");
+
+top:
+ if(runtime_sched.gcwaiting) {
+ gcstopm();
+ goto top;
+ }
+
+ gp = nil;
+ // Check the global runnable queue once in a while to ensure fairness.
+ // Otherwise two goroutines can completely occupy the local runqueue
+ // by constantly respawning each other.
+ tick = m->p->schedtick;
+ // This is a fancy way to say tick%61==0,
+ // it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
+ if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime_sched.runqsize > 0) {
+ runtime_lock(&runtime_sched);
+ gp = globrunqget(m->p, 1);
+ runtime_unlock(&runtime_sched);
+ if(gp)
+ resetspinning();
+ }
+ if(gp == nil) {
+ gp = runqget(m->p);
+ if(gp && m->spinning)
+ runtime_throw("schedule: spinning with local work");
+ }
+ if(gp == nil) {
+ gp = findrunnable(); // blocks until work is available
+ resetspinning();
+ }
+
+ if(gp->lockedm) {
+ // Hands off own p to the locked m,
+ // then blocks waiting for a new p.
+ startlockedm(gp);
+ goto top;
+ }
+
+ execute(gp);
+}
+
+// Puts the current goroutine into a waiting state and calls unlockf.
+// If unlockf returns false, the goroutine is resumed.
+void
+runtime_park(bool(*unlockf)(G*, void*), void *lock, const char *reason)
+{
+ if(g->status != Grunning)
+ runtime_throw("bad g status");
+ m->waitlock = lock;
+ m->waitunlockf = unlockf;
+ g->waitreason = reason;
+ runtime_mcall(park0);
+}
+
+static bool
+parkunlock(G *gp, void *lock)
+{
+ USED(gp);
+ runtime_unlock(lock);
+ return true;
+}
+
+// Puts the current goroutine into a waiting state and unlocks the lock.
+// The goroutine can be made runnable again by calling runtime_ready(gp).
+void
+runtime_parkunlock(Lock *lock, const char *reason)
+{
+ runtime_park(parkunlock, lock, reason);
+}
+
+// runtime_park continuation on g0.
+static void
+park0(G *gp)
+{
+ bool ok;
+
+ gp->status = Gwaiting;
+ gp->m = nil;
+ m->curg = nil;
+ if(m->waitunlockf) {
+ ok = m->waitunlockf(gp, m->waitlock);
+ m->waitunlockf = nil;
+ m->waitlock = nil;
+ if(!ok) {
+ gp->status = Grunnable;
+ execute(gp); // Schedule it back, never returns.
+ }
+ }
+ if(m->lockedg) {
+ stoplockedm();
+ execute(gp); // Never returns.
+ }
+ schedule();
+}
+
+// Scheduler yield.
+void
+runtime_gosched(void)
+{
+ if(g->status != Grunning)
+ runtime_throw("bad g status");
+ runtime_mcall(runtime_gosched0);
+}
+
+// runtime_gosched continuation on g0.
+void
+runtime_gosched0(G *gp)
+{
+ gp->status = Grunnable;
+ gp->m = nil;
+ m->curg = nil;
+ runtime_lock(&runtime_sched);
+ globrunqput(gp);
+ runtime_unlock(&runtime_sched);
+ if(m->lockedg) {
+ stoplockedm();
+ execute(gp); // Never returns.
+ }
+ schedule();
+}
+
+// Finishes execution of the current goroutine.
+// Need to mark it as nosplit, because it runs with sp > stackbase (as runtime_lessstack).
+// Since it does not return it does not matter. But if it is preempted
+// at the split stack check, GC will complain about inconsistent sp.
+void runtime_goexit(void) __attribute__ ((noinline));
+void
+runtime_goexit(void)
+{
+ if(g->status != Grunning)
+ runtime_throw("bad g status");
+ runtime_mcall(goexit0);
+}
+
+// runtime_goexit continuation on g0.
+static void
+goexit0(G *gp)
+{
+ gp->status = Gdead;
+ gp->entry = nil;
+ gp->m = nil;
+ gp->lockedm = nil;
+ gp->paniconfault = 0;
+ gp->defer = nil; // should be true already but just in case.
+ gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
+ gp->writenbuf = 0;
+ gp->writebuf = nil;
+ gp->waitreason = nil;
+ gp->param = nil;
+ m->curg = nil;
+ m->lockedg = nil;
+ if(m->locked & ~LockExternal) {
+ runtime_printf("invalid m->locked = %d\n", m->locked);
+ runtime_throw("internal lockOSThread error");
+ }
+ m->locked = 0;
+ gfput(m->p, gp);
+ schedule();
+}
+
+// The goroutine g is about to enter a system call.
+// Record that it's not using the cpu anymore.
+// This is called only from the go syscall library and cgocall,
+// not from the low-level system calls used by the runtime.
+//
+// Entersyscall cannot split the stack: the runtime_gosave must
+// make g->sched refer to the caller's stack segment, because
+// entersyscall is going to return immediately after.
+
+void runtime_entersyscall(void) __attribute__ ((no_split_stack));
+static void doentersyscall(void) __attribute__ ((no_split_stack, noinline));
+
+void
+runtime_entersyscall()
+{
+ // Save the registers in the g structure so that any pointers
+ // held in registers will be seen by the garbage collector.
+ getcontext(&g->gcregs);
+
+ // Do the work in a separate function, so that this function
+ // doesn't save any registers on its own stack. If this
+ // function does save any registers, we might store the wrong
+ // value in the call to getcontext.
+ //
+ // FIXME: This assumes that we do not need to save any
+ // callee-saved registers to access the TLS variable g. We
+ // don't want to put the ucontext_t on the stack because it is
+ // large and we can not split the stack here.
+ doentersyscall();
+}
+
+static void
+doentersyscall()
+{
+ // Disable preemption because during this function g is in Gsyscall status,
+ // but can have inconsistent g->sched, do not let GC observe it.
+ m->locks++;
+
+ // Leave SP around for GC and traceback.
+#ifdef USING_SPLIT_STACK
+ g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size,
+ &g->gcnext_segment, &g->gcnext_sp,
+ &g->gcinitial_sp);
+#else
+ {
+ void *v;
+
+ g->gcnext_sp = (byte *) &v;
+ }
+#endif
+
+ g->status = Gsyscall;
+
+ if(runtime_atomicload(&runtime_sched.sysmonwait)) { // TODO: fast atomic
+ runtime_lock(&runtime_sched);
+ if(runtime_atomicload(&runtime_sched.sysmonwait)) {
+ runtime_atomicstore(&runtime_sched.sysmonwait, 0);
+ runtime_notewakeup(&runtime_sched.sysmonnote);
+ }
+ runtime_unlock(&runtime_sched);
+ }
+
+ m->mcache = nil;
+ m->p->m = nil;
+ runtime_atomicstore(&m->p->status, Psyscall);
+ if(runtime_sched.gcwaiting) {
+ runtime_lock(&runtime_sched);
+ if (runtime_sched.stopwait > 0 && runtime_cas(&m->p->status, Psyscall, Pgcstop)) {
+ if(--runtime_sched.stopwait == 0)
+ runtime_notewakeup(&runtime_sched.stopnote);
+ }
+ runtime_unlock(&runtime_sched);
+ }
+
+ m->locks--;
+}
+
+// The same as runtime_entersyscall(), but with a hint that the syscall is blocking.
+void
+runtime_entersyscallblock(void)
+{
+ P *p;
+
+ m->locks++; // see comment in entersyscall
+
+ // Leave SP around for GC and traceback.
+#ifdef USING_SPLIT_STACK
+ g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size,
+ &g->gcnext_segment, &g->gcnext_sp,
+ &g->gcinitial_sp);
+#else
+ g->gcnext_sp = (byte *) &p;
+#endif
+
+ // Save the registers in the g structure so that any pointers
+ // held in registers will be seen by the garbage collector.
+ getcontext(&g->gcregs);
+
+ g->status = Gsyscall;
+
+ p = releasep();
+ handoffp(p);
+ if(g->isbackground) // do not consider blocked scavenger for deadlock detection
+ incidlelocked(1);
+
+ m->locks--;
+}
+
+// The goroutine g exited its system call.
+// Arrange for it to run on a cpu again.
+// This is called only from the go syscall library, not
+// from the low-level system calls used by the runtime.
+void
+runtime_exitsyscall(void)
+{
+ G *gp;
+
+ m->locks++; // see comment in entersyscall
+
+ gp = g;
+ if(gp->isbackground) // do not consider blocked scavenger for deadlock detection
+ incidlelocked(-1);
+
+ g->waitsince = 0;
+ if(exitsyscallfast()) {
+ // There's a cpu for us, so we can run.
+ m->p->syscalltick++;
+ gp->status = Grunning;
+ // Garbage collector isn't running (since we are),
+ // so okay to clear gcstack and gcsp.
+#ifdef USING_SPLIT_STACK
+ gp->gcstack = nil;
+#endif
+ gp->gcnext_sp = nil;
+ runtime_memclr(&gp->gcregs, sizeof gp->gcregs);
+ m->locks--;
+ return;
+ }
+
+ m->locks--;
+
+ // Call the scheduler.
+ runtime_mcall(exitsyscall0);
+
+ // Scheduler returned, so we're allowed to run now.
+ // Delete the gcstack information that we left for
+ // the garbage collector during the system call.
+ // Must wait until now because until gosched returns
+ // we don't know for sure that the garbage collector
+ // is not running.
+#ifdef USING_SPLIT_STACK
+ gp->gcstack = nil;
+#endif
+ gp->gcnext_sp = nil;
+ runtime_memclr(&gp->gcregs, sizeof gp->gcregs);
+
+ // Don't refer to m again, we might be running on a different
+ // thread after returning from runtime_mcall.
+ runtime_m()->p->syscalltick++;
+}
+
+static bool
+exitsyscallfast(void)
+{
+ P *p;
+
+ // Freezetheworld sets stopwait but does not retake P's.
+ if(runtime_sched.stopwait) {
+ m->p = nil;
+ return false;
+ }
+
+ // Try to re-acquire the last P.
+ if(m->p && m->p->status == Psyscall && runtime_cas(&m->p->status, Psyscall, Prunning)) {
+ // There's a cpu for us, so we can run.
+ m->mcache = m->p->mcache;
+ m->p->m = m;
+ return true;
+ }
+ // Try to get any other idle P.
+ m->p = nil;
+ if(runtime_sched.pidle) {
+ runtime_lock(&runtime_sched);
+ p = pidleget();
+ if(p && runtime_atomicload(&runtime_sched.sysmonwait)) {
+ runtime_atomicstore(&runtime_sched.sysmonwait, 0);
+ runtime_notewakeup(&runtime_sched.sysmonnote);
+ }
+ runtime_unlock(&runtime_sched);
+ if(p) {
+ acquirep(p);
+ return true;
+ }
+ }
+ return false;
+}
+
+// runtime_exitsyscall slow path on g0.
+// Failed to acquire P, enqueue gp as runnable.
+static void
+exitsyscall0(G *gp)
+{
+ P *p;
+
+ gp->status = Grunnable;
+ gp->m = nil;
+ m->curg = nil;
+ runtime_lock(&runtime_sched);
+ p = pidleget();
+ if(p == nil)
+ globrunqput(gp);
+ else if(runtime_atomicload(&runtime_sched.sysmonwait)) {
+ runtime_atomicstore(&runtime_sched.sysmonwait, 0);
+ runtime_notewakeup(&runtime_sched.sysmonnote);
+ }
+ runtime_unlock(&runtime_sched);
+ if(p) {
+ acquirep(p);
+ execute(gp); // Never returns.
+ }
+ if(m->lockedg) {
+ // Wait until another thread schedules gp and so m again.
+ stoplockedm();
+ execute(gp); // Never returns.
+ }
+ stopm();
+ schedule(); // Never returns.
+}
+
+// Called from syscall package before fork.
+void syscall_runtime_BeforeFork(void)
+ __asm__(GOSYM_PREFIX "syscall.runtime_BeforeFork");
+void
+syscall_runtime_BeforeFork(void)
+{
+ // Fork can hang if preempted with signals frequently enough (see issue 5517).
+ // Ensure that we stay on the same M where we disable profiling.
+ runtime_m()->locks++;
+ if(runtime_m()->profilehz != 0)
+ runtime_resetcpuprofiler(0);
+}
+
+// Called from syscall package after fork in parent.
+void syscall_runtime_AfterFork(void)
+ __asm__(GOSYM_PREFIX "syscall.runtime_AfterFork");
+void
+syscall_runtime_AfterFork(void)
+{
+ int32 hz;
+
+ hz = runtime_sched.profilehz;
+ if(hz != 0)
+ runtime_resetcpuprofiler(hz);
+ runtime_m()->locks--;
+}
+
+// Allocate a new g, with a stack big enough for stacksize bytes.
+G*
+runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize)
+{
+ G *newg;
+
+ newg = allocg();
+ if(stacksize >= 0) {
+#if USING_SPLIT_STACK
+ int dont_block_signals = 0;
+
+ *ret_stack = __splitstack_makecontext(stacksize,
+ &newg->stack_context[0],
+ ret_stacksize);
+ __splitstack_block_signals_context(&newg->stack_context[0],
+ &dont_block_signals, nil);
+#else
+ *ret_stack = runtime_mallocgc(stacksize, 0, FlagNoProfiling|FlagNoGC);
+ *ret_stacksize = stacksize;
+ newg->gcinitial_sp = *ret_stack;
+ newg->gcstack_size = stacksize;
+ runtime_xadd(&runtime_stacks_sys, stacksize);
+#endif
+ }
+ return newg;
+}
+
+/* For runtime package testing. */
+
+
+// Create a new g running fn with siz bytes of arguments.
+// Put it on the queue of g's waiting to run.
+// The compiler turns a go statement into a call to this.
+// Cannot split the stack because it assumes that the arguments
+// are available sequentially after &fn; they would not be
+// copied if a stack split occurred. It's OK for this to call
+// functions that split the stack.
+void runtime_testing_entersyscall(void)
+ __asm__ (GOSYM_PREFIX "runtime.entersyscall");
+void
+runtime_testing_entersyscall()
+{
+ runtime_entersyscall();
+}
+
+void runtime_testing_exitsyscall(void)
+ __asm__ (GOSYM_PREFIX "runtime.exitsyscall");
+
+void
+runtime_testing_exitsyscall()
+{
+ runtime_exitsyscall();
+}
+
+G*
+__go_go(void (*fn)(void*), void* arg)
+{
+ byte *sp;
+ size_t spsize;
+ G *newg;
+ P *p;
+
+//runtime_printf("newproc1 %p %p narg=%d nret=%d\n", fn->fn, argp, narg, nret);
+ if(fn == nil) {
+ m->throwing = -1; // do not dump full stacks
+ runtime_throw("go of nil func value");
+ }
+ m->locks++; // disable preemption because it can be holding p in a local var
+
+ p = m->p;
+ if((newg = gfget(p)) != nil) {