From: Ian Lance Taylor Date: Thu, 29 Sep 2016 00:56:44 +0000 (+0000) Subject: runtime: copy runtime.go and runtime1.go from Go 1.7 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=6748787813f8080f853766877f6933bc0823c19d;p=gcc.git runtime: copy runtime.go and runtime1.go from Go 1.7 Also copy over cputicks.go, env_posix.go, vdso_none.go, stubs2.go, and a part of os_linux.go. Remove the corresponding functions from the C code in libgo/go/runtime. Add some transitional support functions to stubs.go. This converts several minor functions from C to Go. Reviewed-on: https://go-review.googlesource.com/29962 From-SVN: r240609 --- diff --git a/gcc/go/gofrontend/MERGE b/gcc/go/gofrontend/MERGE index 6ab7e422a5a..6e5d3c46752 100644 --- a/gcc/go/gofrontend/MERGE +++ b/gcc/go/gofrontend/MERGE @@ -1,4 +1,4 @@ -c79a35411c1065c71add196fdeca6e5207a79248 +e51657a576367c7a498c94baf985b79066fc082a The first line of this file holds the git revision number of the last merge done from the gofrontend repository. diff --git a/libgo/go/runtime/cputicks.go b/libgo/go/runtime/cputicks.go new file mode 100644 index 00000000000..ee15aca24ef --- /dev/null +++ b/libgo/go/runtime/cputicks.go @@ -0,0 +1,9 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// careful: cputicks is not guaranteed to be monotonic! In particular, we have +// noticed drift between cpus on certain os/arch combinations. See issue 8976. +func cputicks() int64 diff --git a/libgo/go/runtime/env_posix.go b/libgo/go/runtime/env_posix.go new file mode 100644 index 00000000000..e076edb85d0 --- /dev/null +++ b/libgo/go/runtime/env_posix.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows + +package runtime + +func gogetenv(key string) string { + env := environ() + if env == nil { + throw("getenv before env init") + } + for _, s := range environ() { + if len(s) > len(key) && s[len(key)] == '=' && s[:len(key)] == key { + return s[len(key)+1:] + } + } + return "" +} diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go index 2b1a9b72211..b13e3829423 100644 --- a/libgo/go/runtime/export_test.go +++ b/libgo/go/runtime/export_test.go @@ -6,10 +6,6 @@ package runtime -import ( - "unsafe" -) - //var Fadd64 = fadd64 //var Fsub64 = fsub64 //var Fmul64 = fmul64 @@ -103,20 +99,6 @@ var HashLoad = &hashLoad //type Uintreg uintreg -//extern __go_open -func open(path *byte, mode int32, perm int32) int32 - -func Open(path *byte, mode int32, perm int32) int32 { - return open(path, mode, perm) -} - -//extern close -func close(int32) int32 - -func Close(fd int32) int32 { - return close(fd) -} - /* func RunSchedLocalQueueTest() { _p_ := new(p) @@ -224,25 +206,13 @@ var IfaceHash = ifaceHash var MemclrBytes = memclrBytes */ -//extern read -func read(fd int32, buf unsafe.Pointer, size int32) int32 +var Open = open +var Close = closefd +var Read = read +var Write = write -func Read(fd int32, buf unsafe.Pointer, size int32) int32 { - return read(fd, buf, size) -} - -//extern write -func write(fd int32, buf unsafe.Pointer, size int32) int32 - -func Write(fd uintptr, buf unsafe.Pointer, size int32) int32 { - return write(int32(fd), buf, size) -} - -func envs() []string -func setenvs([]string) - -var Envs = envs -var SetEnvs = setenvs +func Envs() []string { return envs } +func SetEnvs(e []string) { envs = e } //var BigEndian = sys.BigEndian @@ -287,7 +257,10 @@ var ForceGCPeriod = &forcegcperiod // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises // the "environment" traceback level, so later calls to // debug.SetTraceback (e.g., from testing timeouts) can't lower it. -func SetTracebackEnv(level string) +func SetTracebackEnv(level string) { + setTraceback(level) + traceback_env = traceback_cache +} /* var ReadUnaligned32 = readUnaligned32 diff --git a/libgo/go/runtime/os_linux.go b/libgo/go/runtime/os_linux.go new file mode 100644 index 00000000000..c44e4e8287e --- /dev/null +++ b/libgo/go/runtime/os_linux.go @@ -0,0 +1,56 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "runtime/internal/sys" + "unsafe" +) + +const ( + _AT_NULL = 0 // End of vector + _AT_PAGESZ = 6 // System physical page size + _AT_RANDOM = 25 // introduced in 2.6.29 +) + +func sysargs(argc int32, argv **byte) { + n := argc + 1 + + // skip over argv, envp to get to auxv + for argv_index(argv, n) != nil { + n++ + } + + // skip NULL separator + n++ + + // now argv+n is auxv + auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize)) + for i := 0; auxv[i] != _AT_NULL; i += 2 { + tag, val := auxv[i], auxv[i+1] + switch tag { + case _AT_RANDOM: + // The kernel provides a pointer to 16-bytes + // worth of random data. + startupRandomData = (*[16]byte)(unsafe.Pointer(val))[:] + + case _AT_PAGESZ: + // Check that the true physical page size is + // compatible with the runtime's assumed + // physical page size. + if sys.PhysPageSize < val { + print("runtime: kernel page size (", val, ") is larger than runtime page size (", sys.PhysPageSize, ")\n") + exit(1) + } + if sys.PhysPageSize%val != 0 { + print("runtime: runtime page size (", sys.PhysPageSize, ") is not a multiple of kernel page size (", val, ")\n") + exit(1) + } + } + + // Commented out for gccgo for now. + // archauxv(tag, val) + } +} diff --git a/libgo/go/runtime/runtime.go b/libgo/go/runtime/runtime.go new file mode 100644 index 00000000000..23601e1fc66 --- /dev/null +++ b/libgo/go/runtime/runtime.go @@ -0,0 +1,69 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "runtime/internal/atomic" + _ "unsafe" // for go:linkname +) + +//go:generate go run wincallback.go +//go:generate go run mkduff.go +//go:generate go run mkfastlog2table.go + +// For gccgo, while we still have C runtime code, use go:linkname to +// rename some functions to themselves, so that the compiler will +// export them. +// +//go:linkname tickspersecond runtime.tickspersecond + +var ticks struct { + lock mutex + pad uint32 // ensure 8-byte alignment of val on 386 + val uint64 +} + +// Note: Called by runtime/pprof in addition to runtime code. +func tickspersecond() int64 { + r := int64(atomic.Load64(&ticks.val)) + if r != 0 { + return r + } + lock(&ticks.lock) + r = int64(ticks.val) + if r == 0 { + t0 := nanotime() + c0 := cputicks() + usleep(100 * 1000) + t1 := nanotime() + c1 := cputicks() + if t1 == t0 { + t1++ + } + r = (c1 - c0) * 1000 * 1000 * 1000 / (t1 - t0) + if r == 0 { + r++ + } + atomic.Store64(&ticks.val, uint64(r)) + } + unlock(&ticks.lock) + return r +} + +var envs []string +var argslice []string + +//go:linkname syscall_runtime_envs syscall.runtime_envs +func syscall_runtime_envs() []string { return append([]string{}, envs...) } + +//go:linkname os_runtime_args os.runtime_args +func os_runtime_args() []string { return append([]string{}, argslice...) } + +// Temporary, for the gccgo runtime code written in C. +//go:linkname get_envs runtime_get_envs +func get_envs() []string { return envs } + +//go:linkname get_args runtime_get_args +func get_args() []string { return argslice } diff --git a/libgo/go/runtime/runtime1.go b/libgo/go/runtime/runtime1.go new file mode 100644 index 00000000000..dea19da997f --- /dev/null +++ b/libgo/go/runtime/runtime1.go @@ -0,0 +1,509 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "runtime/internal/atomic" + "runtime/internal/sys" + "unsafe" +) + +// For gccgo, while we still have C runtime code, use go:linkname to +// rename some functions to themselves, so that the compiler will +// export them. +// +//go:linkname gotraceback runtime.gotraceback +//go:linkname args runtime.args +//go:linkname goargs runtime.goargs +//go:linkname check runtime.check +//go:linkname goenvs_unix runtime.goenvs_unix +//go:linkname parsedebugvars runtime.parsedebugvars +//go:linkname timediv runtime.timediv + +// Keep a cached value to make gotraceback fast, +// since we call it on every call to gentraceback. +// The cached value is a uint32 in which the low bits +// are the "crash" and "all" settings and the remaining +// bits are the traceback value (0 off, 1 on, 2 include system). +const ( + tracebackCrash = 1 << iota + tracebackAll + tracebackShift = iota +) + +var traceback_cache uint32 = 2 << tracebackShift +var traceback_env uint32 + +// gotraceback returns the current traceback settings. +// +// If level is 0, suppress all tracebacks. +// If level is 1, show tracebacks, but exclude runtime frames. +// If level is 2, show tracebacks including runtime frames. +// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine. +// If crash is set, crash (core dump, etc) after tracebacking. +// +//go:nosplit +func gotraceback() (level int32, all, crash bool) { + _g_ := getg() + all = _g_.m.throwing > 0 + if _g_.m.traceback != 0 { + level = int32(_g_.m.traceback) + return + } + t := atomic.Load(&traceback_cache) + crash = t&tracebackCrash != 0 + all = all || t&tracebackAll != 0 + level = int32(t >> tracebackShift) + return +} + +var ( + argc int32 + argv **byte +) + +// nosplit for use in linux startup sysargs +//go:nosplit +func argv_index(argv **byte, i int32) *byte { + return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize)) +} + +func args(c int32, v **byte) { + argc = c + argv = v + sysargs(c, v) +} + +func goargs() { + if GOOS == "windows" { + return + } + + argslice = make([]string, argc) + for i := int32(0); i < argc; i++ { + argslice[i] = gostringnocopy(argv_index(argv, i)) + } +} + +func goenvs_unix() { + // TODO(austin): ppc64 in dynamic linking mode doesn't + // guarantee env[] will immediately follow argv. Might cause + // problems. + n := int32(0) + for argv_index(argv, argc+1+n) != nil { + n++ + } + + envs = make([]string, n) + for i := int32(0); i < n; i++ { + envs[i] = gostring(argv_index(argv, argc+1+i)) + } +} + +func environ() []string { + return envs +} + +// TODO: These should be locals in testAtomic64, but we don't 8-byte +// align stack variables on 386. +var test_z64, test_x64 uint64 + +func testAtomic64() { + test_z64 = 42 + test_x64 = 0 + // prefetcht0(uintptr(unsafe.Pointer(&test_z64))) + // prefetcht1(uintptr(unsafe.Pointer(&test_z64))) + // prefetcht2(uintptr(unsafe.Pointer(&test_z64))) + // prefetchnta(uintptr(unsafe.Pointer(&test_z64))) + if atomic.Cas64(&test_z64, test_x64, 1) { + throw("cas64 failed") + } + if test_x64 != 0 { + throw("cas64 failed") + } + test_x64 = 42 + if !atomic.Cas64(&test_z64, test_x64, 1) { + throw("cas64 failed") + } + if test_x64 != 42 || test_z64 != 1 { + throw("cas64 failed") + } + if atomic.Load64(&test_z64) != 1 { + throw("load64 failed") + } + atomic.Store64(&test_z64, (1<<40)+1) + if atomic.Load64(&test_z64) != (1<<40)+1 { + throw("store64 failed") + } + if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 { + throw("xadd64 failed") + } + if atomic.Load64(&test_z64) != (2<<40)+2 { + throw("xadd64 failed") + } + if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 { + throw("xchg64 failed") + } + if atomic.Load64(&test_z64) != (3<<40)+3 { + throw("xchg64 failed") + } +} + +func check() { + + // This doesn't currently work for gccgo. Because escape + // analysis is not turned on by default, the code below that + // takes the address of local variables causes memory + // allocation, but this function is called before the memory + // allocator has been initialized. + return + + var ( + a int8 + b uint8 + c int16 + d uint16 + e int32 + f uint32 + g int64 + h uint64 + i, i1 float32 + j, j1 float64 + k, k1 unsafe.Pointer + l *uint16 + m [4]byte + ) + type x1t struct { + x uint8 + } + type y1t struct { + x1 x1t + y uint8 + } + var x1 x1t + var y1 y1t + + if unsafe.Sizeof(a) != 1 { + throw("bad a") + } + if unsafe.Sizeof(b) != 1 { + throw("bad b") + } + if unsafe.Sizeof(c) != 2 { + throw("bad c") + } + if unsafe.Sizeof(d) != 2 { + throw("bad d") + } + if unsafe.Sizeof(e) != 4 { + throw("bad e") + } + if unsafe.Sizeof(f) != 4 { + throw("bad f") + } + if unsafe.Sizeof(g) != 8 { + throw("bad g") + } + if unsafe.Sizeof(h) != 8 { + throw("bad h") + } + if unsafe.Sizeof(i) != 4 { + throw("bad i") + } + if unsafe.Sizeof(j) != 8 { + throw("bad j") + } + if unsafe.Sizeof(k) != sys.PtrSize { + throw("bad k") + } + if unsafe.Sizeof(l) != sys.PtrSize { + throw("bad l") + } + if unsafe.Sizeof(x1) != 1 { + throw("bad unsafe.Sizeof x1") + } + if unsafe.Offsetof(y1.y) != 1 { + throw("bad offsetof y1.y") + } + if unsafe.Sizeof(y1) != 2 { + throw("bad unsafe.Sizeof y1") + } + + if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 { + throw("bad timediv") + } + + var z uint32 + z = 1 + if !atomic.Cas(&z, 1, 2) { + throw("cas1") + } + if z != 2 { + throw("cas2") + } + + z = 4 + if atomic.Cas(&z, 5, 6) { + throw("cas3") + } + if z != 4 { + throw("cas4") + } + + z = 0xffffffff + if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) { + throw("cas5") + } + if z != 0xfffffffe { + throw("cas6") + } + + k = unsafe.Pointer(uintptr(0xfedcb123)) + if sys.PtrSize == 8 { + k = unsafe.Pointer(uintptr(k) << 10) + } + if casp(&k, nil, nil) { + throw("casp1") + } + k1 = add(k, 1) + if !casp(&k, k, k1) { + throw("casp2") + } + if k != k1 { + throw("casp3") + } + + m = [4]byte{1, 1, 1, 1} + atomic.Or8(&m[1], 0xf0) + if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 { + throw("atomicor8") + } + + *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0) + if j == j { + throw("float64nan") + } + if !(j != j) { + throw("float64nan1") + } + + *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1) + if j == j1 { + throw("float64nan2") + } + if !(j != j1) { + throw("float64nan3") + } + + *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0) + if i == i { + throw("float32nan") + } + if i == i { + throw("float32nan1") + } + + *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1) + if i == i1 { + throw("float32nan2") + } + if i == i1 { + throw("float32nan3") + } + + testAtomic64() + + // if _FixedStack != round2(_FixedStack) { + // throw("FixedStack is not power-of-2") + // } + + if !checkASM() { + throw("assembly checks failed") + } +} + +type dbgVar struct { + name string + value *int32 +} + +// Holds variables parsed from GODEBUG env var, +// except for "memprofilerate" since there is an +// existing int var for that value, which may +// already have an initial value. + +// For gccgo we use a named type so that the C code can see the +// definition. +type debugVars struct { + allocfreetrace int32 + cgocheck int32 + efence int32 + gccheckmark int32 + gcpacertrace int32 + gcshrinkstackoff int32 + gcstackbarrieroff int32 + gcstackbarrierall int32 + gcstoptheworld int32 + gctrace int32 + invalidptr int32 + sbrk int32 + scavenge int32 + scheddetail int32 + schedtrace int32 + wbshadow int32 +} + +var debug debugVars + +// For gccgo's C code. +//extern runtime_setdebug +func runtime_setdebug(*debugVars) + +var dbgvars = []dbgVar{ + {"allocfreetrace", &debug.allocfreetrace}, + {"cgocheck", &debug.cgocheck}, + {"efence", &debug.efence}, + {"gccheckmark", &debug.gccheckmark}, + {"gcpacertrace", &debug.gcpacertrace}, + {"gcshrinkstackoff", &debug.gcshrinkstackoff}, + {"gcstackbarrieroff", &debug.gcstackbarrieroff}, + {"gcstackbarrierall", &debug.gcstackbarrierall}, + {"gcstoptheworld", &debug.gcstoptheworld}, + {"gctrace", &debug.gctrace}, + {"invalidptr", &debug.invalidptr}, + {"sbrk", &debug.sbrk}, + {"scavenge", &debug.scavenge}, + {"scheddetail", &debug.scheddetail}, + {"schedtrace", &debug.schedtrace}, + {"wbshadow", &debug.wbshadow}, +} + +func parsedebugvars() { + // defaults + debug.cgocheck = 1 + debug.invalidptr = 1 + + for p := gogetenv("GODEBUG"); p != ""; { + field := "" + i := index(p, ",") + if i < 0 { + field, p = p, "" + } else { + field, p = p[:i], p[i+1:] + } + i = index(field, "=") + if i < 0 { + continue + } + key, value := field[:i], field[i+1:] + + // Update MemProfileRate directly here since it + // is int, not int32, and should only be updated + // if specified in GODEBUG. + if key == "memprofilerate" { + MemProfileRate = atoi(value) + } else { + for _, v := range dbgvars { + if v.name == key { + *v.value = int32(atoi(value)) + } + } + } + } + + setTraceback(gogetenv("GOTRACEBACK")) + traceback_env = traceback_cache + + // if debug.gcstackbarrierall > 0 { + // firstStackBarrierOffset = 0 + // } + + // For cgocheck > 1, we turn on the write barrier at all times + // and check all pointer writes. + if debug.cgocheck > 1 { + writeBarrier.cgo = true + writeBarrier.enabled = true + } + + // Tell the C code what the value is. + runtime_setdebug(&debug) +} + +//go:linkname setTraceback runtime_debug.SetTraceback +func setTraceback(level string) { + var t uint32 + switch level { + case "none": + t = 0 + case "single", "": + t = 1 << tracebackShift + case "all": + t = 1<= 0; bit-- { + if v >= int64(div)<= int64(div) { + if rem != nil { + *rem = 0 + } + return 0x7fffffff + } + if rem != nil { + *rem = int32(v) + } + return res +} + +// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block. + +//go:nosplit +func acquirem() *m { + _g_ := getg() + _g_.m.locks++ + return _g_.m +} + +//go:nosplit +func releasem(mp *m) { + // _g_ := getg() + mp.locks-- + // if mp.locks == 0 && _g_.preempt { + // // restore the preemption request in case we've cleared it in newstack + // _g_.stackguard0 = stackPreempt + // } +} + +//go:nosplit +func gomcache() *mcache { + return getg().m.mcache +} diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go index 468d11e8e83..4fba428d7af 100644 --- a/libgo/go/runtime/runtime2.go +++ b/libgo/go/runtime/runtime2.go @@ -678,11 +678,11 @@ type forcegcstate struct { idle uint32 } -/* // startup_random_data holds random bytes initialized at startup. These come from // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). var startupRandomData []byte +/* // extendRandom extends the random numbers in r[:n] to the whole slice r. // Treats n<0 as n==0. func extendRandom(r []byte, n int) { @@ -797,8 +797,8 @@ var ( // Set by the linker so the runtime can determine the buildmode. var ( -// islibrary bool // -buildmode=c-shared -// isarchive bool // -buildmode=c-archive + islibrary bool // -buildmode=c-shared + isarchive bool // -buildmode=c-archive ) // Types that are only used by gccgo. diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go index 48abbfa889f..3ff3aef2216 100644 --- a/libgo/go/runtime/stubs.go +++ b/libgo/go/runtime/stubs.go @@ -5,6 +5,7 @@ package runtime import ( + "runtime/internal/atomic" "runtime/internal/sys" "unsafe" ) @@ -209,10 +210,10 @@ func round(n, a uintptr) uintptr { return (n + a - 1) &^ (a - 1) } -/* // checkASM returns whether assembly runtime checks have passed. -func checkASM() bool -*/ +func checkASM() bool { + return true +} // throw crashes the program. // For gccgo unless and until we port panic.go. @@ -251,3 +252,119 @@ type stringStruct struct { func stringStructOf(sp *string) *stringStruct { return (*stringStruct)(unsafe.Pointer(sp)) } + +// Here for gccgo unless and until we port slice.go. +type slice struct { + array unsafe.Pointer + len int + cap int +} + +// Here for gccgo until we port malloc.go. +const ( + _64bit = 1 << (^uintptr(0) >> 63) / 2 + _MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*32 + _MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1) +) + +// Here for gccgo until we port malloc.go. +//extern runtime_mallocgc +func c_mallocgc(size uintptr, typ uintptr, flag uint32) unsafe.Pointer +func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { + flag := uint32(0) + if !needzero { + flag = 1 << 3 + } + return c_mallocgc(size, uintptr(unsafe.Pointer(typ)), flag) +} + +// Here for gccgo unless and until we port string.go. +func rawstring(size int) (p unsafe.Pointer, s string) { + p = mallocgc(uintptr(size), nil, false) + + (*(*stringStruct)(unsafe.Pointer(&s))).str = p + (*(*stringStruct)(unsafe.Pointer(&s))).len = size + + return +} + +// Here for gccgo unless and until we port string.go. +func gostring(p *byte) string { + l := findnull(p) + if l == 0 { + return "" + } + m, s := rawstring(l) + memmove(m, unsafe.Pointer(p), uintptr(l)) + return s +} + +// Here for gccgo unless and until we port string.go. +func index(s, t string) int { + if len(t) == 0 { + return 0 + } + for i := 0; i < len(s); i++ { + if s[i] == t[0] && hasprefix(s[i:], t) { + return i + } + } + return -1 +} + +// Here for gccgo unless and until we port string.go. +func hasprefix(s, t string) bool { + return len(s) >= len(t) && s[:len(t)] == t +} + +// Here for gccgo unless and until we port string.go. +//go:nosplit +func findnull(s *byte) int { + if s == nil { + return 0 + } + p := (*[_MaxMem/2 - 1]byte)(unsafe.Pointer(s)) + l := 0 + for p[l] != 0 { + l++ + } + return l +} + +// Here for gccgo unless and until we port string.go. +//go:nosplit +func gostringnocopy(str *byte) string { + ss := stringStruct{str: unsafe.Pointer(str), len: findnull(str)} + return *(*string)(unsafe.Pointer(&ss)) +} + +// Here for gccgo unless and until we port string.go. +func atoi(s string) int { + n := 0 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + n = n*10 + int(s[0]) - '0' + s = s[1:] + } + return n +} + +// Here for gccgo until we port mgc.go. +var writeBarrier struct { + enabled bool // compiler emits a check of this before calling write barrier + needed bool // whether we need a write barrier for current GC phase + cgo bool // whether we need a write barrier for a cgo check + alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load +} + +// Here for gccgo until we port atomic_pointer.go and mgc.go. +//go:nosplit +func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { + if !atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) { + return false + } + return true +} + +// Here for gccgo until we port lock_*.go. +func lock(l *mutex) +func unlock(l *mutex) diff --git a/libgo/go/runtime/stubs2.go b/libgo/go/runtime/stubs2.go new file mode 100644 index 00000000000..d96022674b6 --- /dev/null +++ b/libgo/go/runtime/stubs2.go @@ -0,0 +1,29 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 +// +build !windows +// +build !nacl + +package runtime + +import "unsafe" + +func read(fd int32, p unsafe.Pointer, n int32) int32 +func closefd(fd int32) int32 + +//extern exit +func exit(code int32) +func nanotime() int64 +func usleep(usec uint32) + +func munmap(addr unsafe.Pointer, n uintptr) + +//go:noescape +func write(fd uintptr, p unsafe.Pointer, n int32) int32 + +//go:noescape +func open(name *byte, mode, perm int32) int32 + +func madvise(addr unsafe.Pointer, n uintptr, flags int32) diff --git a/libgo/go/runtime/vdso_none.go b/libgo/go/runtime/vdso_none.go new file mode 100644 index 00000000000..efae23f6eeb --- /dev/null +++ b/libgo/go/runtime/vdso_none.go @@ -0,0 +1,10 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package runtime + +func sysargs(argc int32, argv **byte) { +} diff --git a/libgo/runtime/env_posix.c b/libgo/runtime/env_posix.c index b93edd65a6b..3a606825972 100644 --- a/libgo/runtime/env_posix.c +++ b/libgo/runtime/env_posix.c @@ -9,7 +9,7 @@ #include "arch.h" #include "malloc.h" -extern Slice envs; +extern Slice runtime_get_envs(void); String runtime_getenv(const char *s) @@ -17,12 +17,14 @@ runtime_getenv(const char *s) int32 i, j; intgo len; const byte *v, *bs; + Slice envs; String* envv; int32 envc; String ret; bs = (const byte*)s; len = runtime_findnull(bs); + envs = runtime_get_envs(); envv = (String*)envs.__values; envc = envs.__count; for(i=0; i 0) + filename = (const char*)((String*)args.__values)[0].str; /* If there is no '/' in FILENAME, it was found on PATH, and might not be the same as the file with the same name in the current directory. */ - if (__builtin_strchr (filename, '/') == NULL) + if (filename != NULL && __builtin_strchr (filename, '/') == NULL) filename = NULL; /* If the file is small, then it's not the real executable. diff --git a/libgo/runtime/runtime.c b/libgo/runtime/runtime.c index 9abd096555d..e5e88ed292f 100644 --- a/libgo/runtime/runtime.c +++ b/libgo/runtime/runtime.c @@ -15,118 +15,27 @@ enum { maxround = sizeof(uintptr), }; -// Keep a cached value to make gotraceback fast, -// since we call it on every call to gentraceback. -// The cached value is a uint32 in which the low bit -// is the "crash" setting and the top 31 bits are the -// gotraceback value. -enum { - tracebackCrash = 1 << 0, - tracebackAll = 1 << 1, - tracebackShift = 2, -}; -static uint32 traceback_cache = 2 << tracebackShift; -static uint32 traceback_env; - extern volatile intgo runtime_MemProfileRate __asm__ (GOSYM_PREFIX "runtime.MemProfileRate"); +struct gotraceback_ret { + int32 level; + bool crash; +}; + +extern struct gotraceback_ret gotraceback(void) + __asm__ (GOSYM_PREFIX "runtime.gotraceback"); -// gotraceback returns the current traceback settings. -// -// If level is 0, suppress all tracebacks. -// If level is 1, show tracebacks, but exclude runtime frames. -// If level is 2, show tracebacks including runtime frames. -// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine. -// If crash is set, crash (core dump, etc) after tracebacking. +// runtime_gotraceback is the C interface to runtime.gotraceback. int32 runtime_gotraceback(bool *crash) { - uint32 x; + struct gotraceback_ret r; + r = gotraceback(); if(crash != nil) - *crash = false; - if(runtime_m()->traceback != 0) - return runtime_m()->traceback; - x = runtime_atomicload(&traceback_cache); - if(crash != nil) - *crash = x&tracebackCrash; - return x>>tracebackShift; -} - -static int32 argc; -static byte** argv; - -static Slice args; -Slice envs; - -void (*runtime_sysargs)(int32, uint8**); - -void -runtime_args(int32 c, byte **v) -{ - argc = c; - argv = v; - if(runtime_sysargs != nil) - runtime_sysargs(c, v); -} - -byte* -runtime_progname() -{ - return argc == 0 ? nil : argv[0]; -} - -void -runtime_goargs(void) -{ - String *s; - int32 i; - - // for windows implementation see "os" package - if(Windows) - return; - - s = runtime_malloc(argc*sizeof s[0]); - for(i=0; i 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0); } -static Lock ticksLock; -static int64 ticks; - -int64 -runtime_tickspersecond(void) -{ - int64 res, t0, t1, c0, c1; - - res = (int64)runtime_atomicload64((uint64*)&ticks); - if(res != 0) - return ticks; - runtime_lock(&ticksLock); - res = ticks; - if(res == 0) { - t0 = runtime_nanotime(); - c0 = runtime_cputicks(); - runtime_usleep(100*1000); - t1 = runtime_nanotime(); - c1 = runtime_cputicks(); - if(t1 == t0) - t1++; - res = (c1-c0)*1000*1000*1000/(t1-t0); - if(res == 0) - res++; - runtime_atomicstore64((uint64*)&ticks, res); - } - runtime_unlock(&ticksLock); - return res; -} - // Called to initialize a new m (including the bootstrap m). // Called on the parent thread (main thread in case of bootstrap), can allocate memory. void @@ -321,142 +155,58 @@ runtime_signalstack(byte *p, int32 n) *(int *)0xf1 = 0xf1; } -void setTraceback(String level) - __asm__ (GOSYM_PREFIX "runtime_debug.SetTraceback"); - -void setTraceback(String level) { - uint32 t; - - if (level.len == 4 && __builtin_memcmp(level.str, "none", 4) == 0) { - t = 0; - } else if (level.len == 0 || (level.len == 6 && __builtin_memcmp(level.str, "single", 6) == 0)) { - t = 1 << tracebackShift; - } else if (level.len == 3 && __builtin_memcmp(level.str, "all", 3) == 0) { - t = (1< n && runtime_mcmp(p, "memprofilerate", n) == 0 && p[n] == '=') - // Set the MemProfileRate directly since it - // is an int, not int32, and should only lbe - // set here if specified by GODEBUG - runtime_MemProfileRate = runtime_atoi(p+n+1, len-(n+1)); - else if(len > n && runtime_mcmp(p, dbgvar[i].name, n) == 0 && p[n] == '=') - *dbgvar[i].value = runtime_atoi(p+n+1, len-(n+1)); - } - pn = (const byte *)runtime_strstr((const char *)p, ","); - if(pn == nil || pn - p >= len) - break; - len -= (pn - p) - 1; - p = pn + 1; - } - - setTraceback(runtime_getenv("GOTRACEBACK")); - traceback_env = traceback_cache; + runtime_memclr(s.__values, s.__count); } -// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises -// the "environment" traceback level, so later calls to -// debug.SetTraceback (e.g., from testing timeouts) can't lower it. -void SetTracebackEnv(String level) - __asm__ (GOSYM_PREFIX "runtime.SetTracebackEnv"); +int32 go_open(char *, int32, int32) + __asm__ (GOSYM_PREFIX "runtime.open"); -void SetTracebackEnv(String level) { - setTraceback(level); - traceback_env = traceback_cache; +int32 +go_open(char *name, int32 mode, int32 perm) +{ + return runtime_open(name, mode, perm); } -// Poor mans 64-bit division. -// This is a very special function, do not use it if you are not sure what you are doing. -// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions. -// Handles overflow in a time-specific manner. +int32 go_read(int32, void *, int32) + __asm__ (GOSYM_PREFIX "runtime.read"); + int32 -runtime_timediv(int64 v, int32 div, int32 *rem) +go_read(int32 fd, void *p, int32 n) { - int32 res, bit; - - if(v >= (int64)div*0x7fffffffLL) { - if(rem != nil) - *rem = 0; - return 0x7fffffff; - } - res = 0; - for(bit = 30; bit >= 0; bit--) { - if(v >= ((int64)div<locks--; } - -extern Slice envs; - -func envs() (s Slice) { - s = envs; -} - -func setenvs(e Slice) { - envs = e; -}