-7a62a49e62c090118fa003d9265c5f5e2090c4f9
+4a31d064fd6996f64b620104e849292af8f25e12
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
-20a838ab94178c55bc4dc23ddc332fce8545a493
+edea4a79e8d7dea2456b688f492c8af33d381dc2
The first line of this file holds the git revision number of the
last merge done from the master library sources.
// Translate to zero-length array instead.
count = 0
}
- sub := c.loadType(dt.Type, pos, key)
+ sub := c.Type(dt.Type, pos)
t.Align = sub.Align
t.Go = &ast.ArrayType{
Len: c.intExpr(count),
c.ptrs[key] = append(c.ptrs[key], t)
case *dwarf.QualType:
- t1 := c.loadType(dt.Type, pos, key)
+ t1 := c.Type(dt.Type, pos)
t.Size = t1.Size
t.Align = t1.Align
t.Go = t1.Go
}
name := c.Ident("_Ctype_" + dt.Name)
goIdent[name.Name] = name
- sub := c.loadType(dt.Type, pos, key)
+ akey := ""
+ if c.anonymousStructTypedef(dt) {
+ // only load type recursively for typedefs of anonymous
+ // structs, see issues 37479 and 37621.
+ akey = key
+ }
+ sub := c.loadType(dt.Type, pos, akey)
if c.badPointerTypedef(dt) {
// Treat this typedef as a uintptr.
s := *sub
return prefix
}
+// anonymousStructTypedef reports whether dt is a C typedef for an anonymous
+// struct.
+func (c *typeConv) anonymousStructTypedef(dt *dwarf.TypedefType) bool {
+ st, ok := dt.Type.(*dwarf.StructType)
+ return ok && st.StructName == ""
+}
+
// badPointerTypedef reports whether t is a C typedef that should not be considered a pointer in Go.
// A typedef is bad if C code sometimes stores non-pointers in this type.
// TODO: Currently our best solution is to find these manually and list them as
"cmd/go/internal/cfg"
"cmd/go/internal/load"
"cmd/go/internal/modload"
+ "cmd/go/internal/str"
"cmd/go/internal/work"
)
cmd.Stderr = os.Stderr
// Run the command in the package directory.
cmd.Dir = g.dir
- cmd.Env = append(cfg.OrigEnv, g.env...)
+ cmd.Env = str.StringList(cfg.OrigEnv, g.env)
err := cmd.Run()
if err != nil {
g.errorf("running %q: %s", words[0], err)
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = a.Package.Dir
- cmd.Env = base.EnvForDir(cmd.Dir, cfg.OrigEnv)
+ cmd.Env = base.EnvForDir(cmd.Dir, cfg.OrigEnv[:len(cfg.OrigEnv):len(cfg.OrigEnv)])
cmd.Stdout = stdout
cmd.Stderr = stdout
if len(out) == 0 {
fmt.Fprintf(cmd.Stdout, "%s\n", err)
}
+ // NOTE(golang.org/issue/37555): test2json reports that a test passes
+ // unless "FAIL" is printed at the beginning of a line. The test may not
+ // actually print that if it panics, exits, or terminates abnormally,
+ // so we print it here. We can't always check whether it was printed
+ // because some tests need stdout to be a terminal (golang.org/issue/34791),
+ // not a pipe.
+ // TODO(golang.org/issue/29062): tests that exit with status 0 without
+ // printing a final result should fail.
fmt.Fprintf(cmd.Stdout, "FAIL\t%s\t%s\n", a.Package.ImportPath, t)
}
} else if cfg.BuildTrimpath && p.Module != nil {
fmt.Fprintf(h, "module %s@%s\n", p.Module.Path, p.Module.Version)
}
+ if p.Module != nil {
+ fmt.Fprintf(h, "go %s\n", p.Module.GoVersion)
+ }
fmt.Fprintf(h, "goos %s goarch %s\n", cfg.Goos, cfg.Goarch)
fmt.Fprintf(h, "import %q\n", p.ImportPath)
fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix)
grep 'go 1.9' go.mod
go build
+# Reverting the version should force a rebuild and error instead of using
+# the cached 1.9 build. (https://golang.org/issue/37804)
+go mod edit -go=1.8
+! go build
+stderr 'type aliases only supported as of'
+
+
-- go.mod --
module m
go 1.8
"internal/syscall/unix": {"L0", "syscall"},
"internal/syscall/windows": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
"internal/syscall/windows/registry": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
+ "internal/syscall/execenv": {"L0", "syscall", "internal/syscall/windows", "unicode/utf16"},
"time": {
// "L0" without the "io" package:
"errors",
"internal/cfg": {"L0"},
"internal/poll": {"L0", "internal/oserror", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8", "internal/syscall/windows", "internal/syscall/unix"},
"internal/testlog": {"L0"},
- "os": {"L1", "os", "syscall", "time", "internal/oserror", "internal/poll", "internal/syscall/windows", "internal/syscall/unix", "internal/testlog"},
+ "os": {"L1", "os", "syscall", "time", "internal/oserror", "internal/poll", "internal/syscall/windows", "internal/syscall/unix", "internal/syscall/execenv", "internal/testlog"},
"path/filepath": {"L2", "os", "syscall", "internal/syscall/windows"},
"io/ioutil": {"L2", "os", "path/filepath", "time"},
- "os/exec": {"L2", "os", "context", "path/filepath", "syscall"},
+ "os/exec": {"L2", "os", "context", "path/filepath", "syscall", "internal/syscall/execenv"},
"os/signal": {"L2", "os", "syscall"},
// OS enables basic operating system functionality,
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows
+
+package execenv
+
+import "syscall"
+
+// Default will return the default environment
+// variables based on the process attributes
+// provided.
+//
+// Defaults to syscall.Environ() on all platforms
+// other than Windows.
+func Default(sys *syscall.SysProcAttr) ([]string, error) {
+ return syscall.Environ(), nil
+}
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package execenv
+
+import (
+ "internal/syscall/windows"
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+// Default will return the default environment
+// variables based on the process attributes
+// provided.
+//
+// If the process attributes contain a token, then
+// the environment variables will be sourced from
+// the defaults for that user token, otherwise they
+// will be sourced from syscall.Environ().
+func Default(sys *syscall.SysProcAttr) (env []string, err error) {
+ if sys == nil || sys.Token == 0 {
+ return syscall.Environ(), nil
+ }
+ var block *uint16
+ err = windows.CreateEnvironmentBlock(&block, sys.Token, false)
+ if err != nil {
+ return nil, err
+ }
+ defer windows.DestroyEnvironmentBlock(block)
+ blockp := uintptr(unsafe.Pointer(block))
+ for {
+
+ // find NUL terminator
+ end := unsafe.Pointer(blockp)
+ for *(*uint16)(end) != 0 {
+ end = unsafe.Pointer(uintptr(end) + 2)
+ }
+
+ n := (uintptr(end) - uintptr(unsafe.Pointer(blockp))) / 2
+ if n == 0 {
+ // environment block ends with empty string
+ break
+ }
+
+ entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:n:n]
+ env = append(env, string(utf16.Decode(entry)))
+ blockp += 2 * (uintptr(len(entry)) + 1)
+ }
+ return
+}
+++ /dev/null
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !windows
-
-package os
-
-import "syscall"
-
-func environForSysProcAttr(sys *syscall.SysProcAttr) ([]string, error) {
- return Environ(), nil
-}
+++ /dev/null
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package os
-
-import (
- "internal/syscall/windows"
- "syscall"
- "unicode/utf16"
- "unsafe"
-)
-
-func environForSysProcAttr(sys *syscall.SysProcAttr) (env []string, err error) {
- if sys == nil || sys.Token == 0 {
- return Environ(), nil
- }
- var block *uint16
- err = windows.CreateEnvironmentBlock(&block, sys.Token, false)
- if err != nil {
- return nil, err
- }
- defer windows.DestroyEnvironmentBlock(block)
- blockp := uintptr(unsafe.Pointer(block))
- for {
-
- // find NUL terminator
- end := unsafe.Pointer(blockp)
- for *(*uint16)(end) != 0 {
- end = unsafe.Pointer(uintptr(end) + 2)
- }
-
- n := (uintptr(end) - uintptr(unsafe.Pointer(blockp))) / 2
- if n == 0 {
- // environment block ends with empty string
- break
- }
-
- entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:n:n]
- env = append(env, string(utf16.Decode(entry)))
- blockp += 2 * (uintptr(len(entry)) + 1)
- }
- return
-}
"bytes"
"context"
"errors"
+ "internal/syscall/execenv"
"io"
"os"
"path/filepath"
return a == b
}
-func (c *Cmd) envv() []string {
+func (c *Cmd) envv() ([]string, error) {
if c.Env != nil {
- return c.Env
+ return c.Env, nil
}
- return os.Environ()
+ return execenv.Default(c.SysProcAttr)
}
func (c *Cmd) argv() []string {
}
c.childFiles = append(c.childFiles, c.ExtraFiles...)
- var err error
+ envv, err := c.envv()
+ if err != nil {
+ return err
+ }
+
c.Process, err = os.StartProcess(c.Path, c.argv(), &os.ProcAttr{
Dir: c.Dir,
Files: c.childFiles,
- Env: addCriticalEnv(dedupEnv(c.envv())),
+ Env: addCriticalEnv(dedupEnv(envv)),
Sys: c.SysProcAttr,
})
if err != nil {
package os
import (
+ "internal/syscall/execenv"
"runtime"
"syscall"
)
Sys: attr.Sys,
}
if sysattr.Env == nil {
- sysattr.Env, err = environForSysProcAttr(sysattr.Sys)
+ sysattr.Env, err = execenv.Default(sysattr.Sys)
if err != nil {
return nil, err
}
// is slower but more general and is used for hashing interface types
// (called from interhash or nilinterhash, above) or for hashing in
// maps generated by reflect.MapOf (reflect_typehash, below).
+// Note: this function must match the compiler generated
+// functions exactly. See issue 37716.
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
if t.tflag&tflagRegularMemory != 0 {
- return memhash(p, h, t.size)
+ // Handle ptr sizes specially, see issue 37086.
+ switch t.size {
+ case 4:
+ return memhash32(p, h)
+ case 8:
+ return memhash64(p, h)
+ default:
+ return memhash(p, h, t.size)
+ }
}
switch t.kind & kindMask {
case kindFloat32:
func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr) {
// Check that (*[n]elem)(p) is appropriately aligned.
+ // Note that we allow unaligned pointers if the types they point to contain
+ // no pointers themselves. See issue 37298.
// TODO(mdempsky): What about fieldAlign?
- if uintptr(p)&(uintptr(elem.align)-1) != 0 {
+ if elem.ptrdata != 0 && uintptr(p)&(uintptr(elem.align)-1) != 0 {
throw("checkptr: unsafe pointer conversion")
}
cmd string
want string
}{
- {"CheckPtrAlignment", "fatal error: checkptr: unsafe pointer conversion\n"},
+ {"CheckPtrAlignmentPtr", "fatal error: checkptr: unsafe pointer conversion\n"},
+ {"CheckPtrAlignmentNoPtr", ""},
{"CheckPtrArithmetic", "fatal error: checkptr: unsafe pointer arithmetic\n"},
{"CheckPtrSize", "fatal error: checkptr: unsafe pointer conversion\n"},
{"CheckPtrSmall", "fatal error: checkptr: unsafe pointer arithmetic\n"},
if err != nil {
t.Log(err)
}
+ if tc.want == "" {
+ if len(got) > 0 {
+ t.Errorf("output:\n%s\nwant no output", got)
+ }
+ return
+ }
if !strings.HasPrefix(string(got), tc.want) {
t.Errorf("output:\n%s\n\nwant output starting with: %s", got, tc.want)
}
import (
"fmt"
+ "os"
"reflect"
"runtime"
"testing"
for {
}
}
+
+// Test case approximating issue #37664, where a recursive function (interpreter)
+// may do repeated recovers/re-panics until it reaches the frame where the panic
+// can actually be handled. The recurseFnPanicRec() function is testing that there
+// are no stale defer structs on the defer chain after the interpreter() sequence,
+// by writing a bunch of 0xffffffffs into several recursive stack frames, and then
+// doing a single panic-recover which would invoke any such stale defer structs.
+func TestDeferWithRepeatedRepanics(t *testing.T) {
+ interpreter(0, 6, 2)
+ recurseFnPanicRec(0, 10)
+ interpreter(0, 5, 1)
+ recurseFnPanicRec(0, 10)
+ interpreter(0, 6, 3)
+ recurseFnPanicRec(0, 10)
+}
+
+func interpreter(level int, maxlevel int, rec int) {
+ defer func() {
+ e := recover()
+ if e == nil {
+ return
+ }
+ if level != e.(int) {
+ //fmt.Fprintln(os.Stderr, "re-panicing, level", level)
+ panic(e)
+ }
+ //fmt.Fprintln(os.Stderr, "Recovered, level", level)
+ }()
+ if level+1 < maxlevel {
+ interpreter(level+1, maxlevel, rec)
+ } else {
+ //fmt.Fprintln(os.Stderr, "Initiating panic")
+ panic(rec)
+ }
+}
+
+func recurseFnPanicRec(level int, maxlevel int) {
+ defer func() {
+ recover()
+ }()
+ recurseFn(level, maxlevel)
+}
+
+func recurseFn(level int, maxlevel int) {
+ a := [40]uint32{0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff}
+ if level+1 < maxlevel {
+ // Need this print statement to keep a around. '_ = a[4]' doesn't do it.
+ fmt.Fprintln(os.Stderr, "recurseFn", level, a[4])
+ recurseFn(level+1, maxlevel)
+ } else {
+ panic("recurseFn panic")
+ }
+}
+
+// Try to reproduce issue #37688, where a pointer to an open-coded defer struct is
+// mistakenly held, and that struct keeps a pointer to a stack-allocated defer
+// struct, and that stack-allocated struct gets overwritten or the stack gets
+// moved, so a memory error happens on GC.
+func TestIssue37688(t *testing.T) {
+ for j := 0; j < 10; j++ {
+ g2()
+ g3()
+ }
+}
+
+type foo struct {
+}
+
+func (f *foo) method1() {
+ fmt.Fprintln(os.Stderr, "method1")
+}
+
+func (f *foo) method2() {
+ fmt.Fprintln(os.Stderr, "method2")
+}
+
+func g2() {
+ var a foo
+ ap := &a
+ // The loop forces this defer to be heap-allocated and the remaining two
+ // to be stack-allocated.
+ for i := 0; i < 1; i++ {
+ defer ap.method1()
+ }
+ defer ap.method2()
+ defer ap.method1()
+ ff1(ap, 1, 2, 3, 4, 5, 6, 7, 8, 9)
+ // Try to get the stack to be be moved by growing it too large, so
+ // existing stack-allocated defer becomes invalid.
+ rec1(2000)
+}
+
+func g3() {
+ // Mix up the stack layout by adding in an extra function frame
+ g2()
+}
+
+func ff1(ap *foo, a, b, c, d, e, f, g, h, i int) {
+ defer ap.method1()
+
+ // Make a defer that has a very large set of args, hence big size for the
+ // defer record for the open-coded frame (which means it won't use the
+ // defer pool)
+ defer func(ap *foo, a, b, c, d, e, f, g, h, i int) {
+ if v := recover(); v != nil {
+ fmt.Fprintln(os.Stderr, "did recover")
+ }
+ fmt.Fprintln(os.Stderr, "debug", ap, a, b, c, d, e, f, g, h)
+ }(ap, a, b, c, d, e, f, g, h, i)
+ panic("ff1 panic")
+}
+
+func rec1(max int) {
+ if max > 0 {
+ rec1(max - 1)
+ } else {
+ fmt.Fprintln(os.Stderr, "finished recursion", max)
+ }
+}
return atomic.Load(&root.nwait)
}
+// MapHashCheck computes the hash of the key k for the map m, twice.
+// Method 1 uses the built-in hasher for the map.
+// Method 2 uses the typehash function (the one used by reflect).
+// Returns the two hash values, which should always be equal.
+func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) {
+ // Unpack m.
+ mt := (*maptype)(unsafe.Pointer(efaceOf(&m)._type))
+ mh := (*hmap)(efaceOf(&m).data)
+
+ // Unpack k.
+ kt := efaceOf(&k)._type
+ var p unsafe.Pointer
+ if isDirectIface(kt) {
+ q := efaceOf(&k).data
+ p = unsafe.Pointer(&q)
+ } else {
+ p = efaceOf(&k).data
+ }
+
+ // Compute the hash functions.
+ x := mt.hasher(noescape(p), uintptr(mh.hash0))
+ y := typehash(kt, noescape(p), uintptr(mh.hash0))
+ return x, y
+}
+
var Pusestackmaps = &usestackmaps
"fmt"
"math"
"math/rand"
+ "reflect"
. "runtime"
"strings"
"testing"
}
}
+func TestCompilerVsRuntimeHash(t *testing.T) {
+ // Test to make sure the compiler's hash function and the runtime's hash function agree.
+ // See issue 37716.
+ for _, m := range []interface{}{
+ map[bool]int{},
+ map[int8]int{},
+ map[uint8]int{},
+ map[int16]int{},
+ map[uint16]int{},
+ map[int32]int{},
+ map[uint32]int{},
+ map[int64]int{},
+ map[uint64]int{},
+ map[int]int{},
+ map[uint]int{},
+ map[uintptr]int{},
+ map[*byte]int{},
+ map[chan int]int{},
+ map[unsafe.Pointer]int{},
+ map[float32]int{},
+ map[float64]int{},
+ map[complex64]int{},
+ map[complex128]int{},
+ map[string]int{},
+ //map[interface{}]int{},
+ //map[interface{F()}]int{},
+ map[[8]uint64]int{},
+ map[[8]string]int{},
+ map[struct{ a, b, c, d int32 }]int{}, // Note: tests AMEM128
+ map[struct{ a, b, _, d int32 }]int{},
+ map[struct {
+ a, b int32
+ c float32
+ d, e [8]byte
+ }]int{},
+ map[struct {
+ a int16
+ b int64
+ }]int{},
+ } {
+ k := reflect.New(reflect.TypeOf(m).Key()).Elem().Interface() // the zero key
+ x, y := MapHashCheck(m, k)
+ if x != y {
+ t.Errorf("hashes did not match (%x vs %x) for map %T", x, y, m)
+ }
+ }
+}
+
// Smhasher is a torture test for hash functions.
// https://code.google.com/p/smhasher/
// This code is a port of some of the Smhasher tests to Go.
goal = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100
}
- // If we let triggerRatio go too low, then if the application
- // is allocating very rapidly we might end up in a situation
- // where we're allocating black during a nearly always-on GC.
- // The result of this is a growing heap and ultimately an
- // increase in RSS. By capping us at a point >0, we're essentially
- // saying that we're OK using more CPU during the GC to prevent
- // this growth in RSS.
- //
- // The current constant was chosen empirically: given a sufficiently
- // fast/scalable allocator with 48 Ps that could drive the trigger ratio
- // to <0.05, this constant causes applications to retain the same peak
- // RSS compared to not having this allocator.
- const minTriggerRatio = 0.6
-
// Set the trigger ratio, capped to reasonable bounds.
- if triggerRatio < minTriggerRatio {
- // This can happen if the mutator is allocating very
- // quickly or the GC is scanning very slowly.
- triggerRatio = minTriggerRatio
- } else if gcpercent >= 0 {
+ if gcpercent >= 0 {
+ scalingFactor := float64(gcpercent) / 100
// Ensure there's always a little margin so that the
// mutator assist ratio isn't infinity.
- maxTriggerRatio := 0.95 * float64(gcpercent) / 100
+ maxTriggerRatio := 0.95 * scalingFactor
if triggerRatio > maxTriggerRatio {
triggerRatio = maxTriggerRatio
}
+
+ // If we let triggerRatio go too low, then if the application
+ // is allocating very rapidly we might end up in a situation
+ // where we're allocating black during a nearly always-on GC.
+ // The result of this is a growing heap and ultimately an
+ // increase in RSS. By capping us at a point >0, we're essentially
+ // saying that we're OK using more CPU during the GC to prevent
+ // this growth in RSS.
+ //
+ // The current constant was chosen empirically: given a sufficiently
+ // fast/scalable allocator with 48 Ps that could drive the trigger ratio
+ // to <0.05, this constant causes applications to retain the same peak
+ // RSS compared to not having this allocator.
+ minTriggerRatio := 0.6 * scalingFactor
+ if triggerRatio < minTriggerRatio {
+ triggerRatio = minTriggerRatio
+ }
+ } else if triggerRatio < 0 {
+ // gcpercent < 0, so just make sure we're not getting a negative
+ // triggerRatio. This case isn't expected to happen in practice,
+ // and doesn't really matter because if gcpercent < 0 then we won't
+ // ever consume triggerRatio further on in this function, but let's
+ // just be defensive here; the triggerRatio being negative is almost
+ // certainly undesirable.
+ triggerRatio = 0
}
memstats.triggerRatio = triggerRatio
// TODO: MXCSR register?
+ p("PUSHQ BP")
+ p("MOVQ SP, BP")
+ p("// Save flags before clobbering them")
+ p("PUSHFQ")
+ p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP")
+ p("ADJSP $%d", l.stack)
+ p("// But vet doesn't know ADJSP, so suppress vet stack checking")
+ p("NOP SP")
+
// Apparently, the signal handling code path in darwin kernel leaves
// the upper bits of Y registers in a dirty state, which causes
// many SSE operations (128-bit and narrower) become much slower.
// Clear the upper bits to get to a clean state. See issue #37174.
// It is safe here as Go code don't use the upper bits of Y registers.
p("#ifdef GOOS_darwin")
+ p("CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $0")
+ p("JE 2(PC)")
p("VZEROUPPER")
p("#endif")
- p("PUSHQ BP")
- p("MOVQ SP, BP")
- p("// Save flags before clobbering them")
- p("PUSHFQ")
- p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP")
- p("ADJSP $%d", l.stack)
- p("// But vet doesn't know ADJSP, so suppress vet stack checking")
- p("NOP SP")
l.save()
p("CALL ·asyncPreempt2(SB)")
l.restore()
sub := "SUB"
r28 := "R28"
regsize := 4
+ softfloat := "GOMIPS_softfloat"
if _64bit {
mov = "MOVV"
movf = "MOVD"
sub = "SUBV"
r28 = "RSB"
regsize = 8
+ softfloat = "GOMIPS64_softfloat"
}
// Add integer registers R1-R22, R24-R25, R28
mov+" LO, R1\n"+mov+" R1, %d(R29)",
mov+" %d(R29), R1\n"+mov+" R1, LO",
regsize)
+
// Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant)
- l.addSpecial(
+ var lfp = layout{sp: "R29", stack: l.stack}
+ lfp.addSpecial(
mov+" FCR31, R1\n"+mov+" R1, %d(R29)",
mov+" %d(R29), R1\n"+mov+" R1, FCR31",
regsize)
// Add floating point registers F0-F31.
for i := 0; i <= 31; i++ {
reg := fmt.Sprintf("F%d", i)
- l.add(movf, reg, regsize)
+ lfp.add(movf, reg, regsize)
}
// allocate frame, save PC of interrupted instruction (in LR)
- p(mov+" R31, -%d(R29)", l.stack)
- p(sub+" $%d, R29", l.stack)
+ p(mov+" R31, -%d(R29)", lfp.stack)
+ p(sub+" $%d, R29", lfp.stack)
l.save()
+ p("#ifndef %s", softfloat)
+ lfp.save()
+ p("#endif")
p("CALL ·asyncPreempt2(SB)")
+ p("#ifndef %s", softfloat)
+ lfp.restore()
+ p("#endif")
l.restore()
- p(mov+" %d(R29), R31", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
- p(mov + " (R29), R23") // load PC to REGTMP
- p(add+" $%d, R29", l.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall)
+ p(mov+" %d(R29), R31", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
+ p(mov + " (R29), R23") // load PC to REGTMP
+ p(add+" $%d, R29", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall)
p("JMP (R23)")
}
// pfn is a C function pointer.
// arg is a value to pass to pfn.
func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) {
+ gp := getg()
d := newdefer()
if d._panic != nil {
throw("deferproc: d.panic != nil after newdefer")
}
+ d.link = gp._defer
+ gp._defer = d
d.frame = frame
d.panicStack = getg()._panic
d.pfn = pfn
}
}
d.heap = true
- d.link = gp._defer
- gp._defer = d
return d
}
}
}
+// throwReportQuirk, if non-nil, is called by throw after dumping the stacks.
+//
+// TODO(austin): Remove this after Go 1.15 when we remove the
+// mlockGsignal workaround.
+var throwReportQuirk func()
+
var didothers bool
var deadlock mutex
printDebugLog()
+ if throwReportQuirk != nil {
+ throwReportQuirk()
+ }
+
return docrash
}
if len(m.freeStk) < len(stk) {
m.freeStk = make([]uintptr, 1024)
}
- e.stk = m.freeStk[:len(stk)]
+ // Limit cap to prevent append from clobbering freeStk.
+ e.stk = m.freeStk[:len(stk):len(stk)]
m.freeStk = m.freeStk[len(stk):]
for j := range stk {
{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
},
}, {
- name: "recursive_inlined_funcs",
+ name: "bug38096",
+ input: []uint64{
+ 3, 0, 500, // hz = 500. Must match the period.
+ // count (data[2]) == 0 && len(stk) == 1 is an overflow
+ // entry. The "stk" entry is actually the count.
+ 4, 0, 0, 4242,
+ },
+ wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
+ wantSamples: []*profile.Sample{
+ {Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
+ },
+ }, {
+ // If a function is called recursively then it must not be
+ // inlined in the caller.
+ //
+ // N.B. We're generating an impossible profile here, with a
+ // recursive inlineCallee call. This is simulating a non-Go
+ // function that looks like an inlined Go function other than
+ // its recursive property. See pcDeck.tryAdd.
+ name: "recursive_func_is_not_inlined",
input: []uint64{
3, 0, 500, // hz = 500. Must match the period.
5, 0, 30, inlinedCalleePtr, inlinedCalleePtr,
4, 0, 40, inlinedCalleePtr,
},
- wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}},
+ // inlinedCaller shows up here because
+ // runtime_expandFinalInlineFrame adds it to the stack frame.
+ wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}, {"runtime/pprof.inlinedCaller"}},
wantSamples: []*profile.Sample{
- {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
- {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}}},
+ {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
+ {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
},
}, {
name: "truncated_stack_trace_later",
4, 0, 70, inlinedCalleePtr,
5, 0, 80, inlinedCalleePtr, inlinedCallerPtr,
},
- wantLocs: [][]string{ // the inline info is screwed up, but better than a crash.
- {"runtime/pprof.inlinedCallee"},
+ wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
+ wantSamples: []*profile.Sample{
+ {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
+ {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
+ },
+ }, {
+ // We can recover the inlined caller from a truncated stack.
+ name: "truncated_stack_trace_only",
+ input: []uint64{
+ 3, 0, 500, // hz = 500. Must match the period.
+ 4, 0, 70, inlinedCalleePtr,
+ },
+ wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
+ wantSamples: []*profile.Sample{
+ {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
+ },
+ }, {
+ // The same location is used for duplicated stacks.
+ name: "truncated_stack_trace_twice",
+ input: []uint64{
+ 3, 0, 500, // hz = 500. Must match the period.
+ 4, 0, 70, inlinedCalleePtr,
+ 5, 0, 80, inlinedCallerPtr, inlinedCalleePtr,
+ },
+ wantLocs: [][]string{
+ {"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"},
{"runtime/pprof.inlinedCaller"}},
wantSamples: []*profile.Sample{
{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
- {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
+ {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
},
}}
// overflow record
count = uint64(stk[0])
stk = []uint64{
- uint64(funcPC(lostProfileEvent)),
+ // gentraceback guarantees that PCs in the
+ // stack can be unconditionally decremented and
+ // still be valid, so we must do the same.
+ uint64(funcPC(lostProfileEvent) + 1),
}
}
b.m.lookup(stk, tag).count += int64(count)
// It may emit to b.pb, so there must be no message encoding in progress.
func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLocs []uint64) {
b.deck.reset()
+
+ // The last frame might be truncated. Recover lost inline frames.
+ stk = runtime_expandFinalInlineFrame(stk)
+
for len(stk) > 0 {
addr := stk[0]
if l, ok := b.locs[addr]; ok {
// then, record the cached location.
locs = append(locs, l.id)
- // The stk may be truncated due to the stack depth limit
- // (e.g. See maxStack and maxCPUProfStack in runtime) or
- // bugs in runtime. Avoid the crash in either case.
- // TODO(hyangah): The correct fix may require using the exact
- // pcs as the key for b.locs cache management instead of just
- // relying on the very first pc. We are late in the go1.14 dev
- // cycle, so this is a workaround with little code change.
- if len(l.pcs) > len(stk) {
- stk = nil
- // TODO(hyangah): would be nice if we can enable
- // debug print out on demand and report the problematic
- // cached location entry and stack traces. Do we already
- // have such facility to utilize (e.g. GODEBUG)?
- } else {
- stk = stk[len(l.pcs):] // skip the matching pcs.
- }
+ // Skip the matching pcs.
+ //
+ // Even if stk was truncated due to the stack depth
+ // limit, expandFinalInlineFrame above has already
+ // fixed the truncation, ensuring it is long enough.
+ stk = stk[len(l.pcs):]
continue
}
stk = stk[1:]
continue
}
- // add failed because this addr is not inlined with
- // the existing PCs in the deck. Flush the deck and retry to
- // handle this pc.
+ // add failed because this addr is not inlined with the
+ // existing PCs in the deck. Flush the deck and retry handling
+ // this pc.
if id := b.emitLocation(); id > 0 {
locs = append(locs, id)
}
// the fake pcs and restore the inlined and entry functions. Inlined functions
// have the following properties:
// Frame's Func is nil (note: also true for non-Go functions), and
-// Frame's Entry matches its entry function frame's Entry. (note: could also be true for recursive calls and non-Go functions),
-// Frame's Name does not match its entry function frame's name.
+// Frame's Entry matches its entry function frame's Entry (note: could also be true for recursive calls and non-Go functions), and
+// Frame's Name does not match its entry function frame's name (note: inlined functions cannot be recursive).
//
// As reading and processing the pcs in a stack trace one by one (from leaf to the root),
// we use pcDeck to temporarily hold the observed pcs and their expanded frames
// to the deck. If it fails the caller needs to flush the deck and retry.
func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symbolizeFlag) (success bool) {
if existing := len(d.pcs); existing > 0 {
- // 'frames' are all expanded from one 'pc' and represent all inlined functions
- // so we check only the last one.
+ // 'd.frames' are all expanded from one 'pc' and represent all
+ // inlined functions so we check only the last one.
newFrame := frames[0]
last := d.frames[existing-1]
if last.Func != nil { // the last frame can't be inlined. Flush.
}
}
}
+
+// Make sure the profiler can handle an empty stack trace.
+// See issue 37967.
+func TestEmptyStack(t *testing.T) {
+ b := []uint64{
+ 3, 0, 500, // hz = 500
+ 3, 0, 10, // 10 samples with an empty stack trace
+ }
+ _, err := translateCPUProfile(b)
+ if err != nil {
+ t.Fatalf("translating profile: %v", err)
+ }
+}
"unsafe"
)
+// runtime_expandFinalInlineFrame is defined in runtime/symtab.go.
+func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr
+
// runtime_setProfLabel is defined in runtime/proflabel.go.
func runtime_setProfLabel(labels unsafe.Pointer)
// requested, but fails. Accessed atomically.
preemptGen uint32
+ // Whether this is a pending preemption signal on this M.
+ // Accessed atomically.
+ signalPending uint32
+
dlogPerM
mOS
// Acknowledge the preemption.
atomic.Xadd(&gp.m.preemptGen, 1)
+ atomic.Store(&gp.m.signalPending, 0)
}
// gccgo-specific definition.
package runtime
+import (
+ _ "unsafe" // for go:linkname
+)
+
// Frames may be used to get function/file/line information for a
// slice of PC values returned by Callers.
type Frames struct {
return frame, more
}
+//go:noescape
+// pcInlineCallers is written in C.
+func pcInlineCallers(pc uintptr, locbuf *location, max int32) int32
+
+// runtime_expandFinalInlineFrame expands the final pc in stk to include all
+// "callers" if pc is inline.
+//
+//go:linkname runtime_expandFinalInlineFrame runtime..z2fpprof.runtime_expandFinalInlineFrame
+func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr {
+ if len(stk) == 0 {
+ return stk
+ }
+ pc := stk[len(stk)-1]
+ tracepc := pc - 1
+
+ var locbuf [_TracebackMaxFrames]location
+ n := pcInlineCallers(tracepc, &locbuf[0], int32(len(locbuf)))
+
+ // Returning the same PC several times causes Frame.Next to do
+ // the right thing.
+ for i := int32(1); i < n; i++ {
+ stk = append(stk, pc)
+ }
+
+ return stk
+}
+
// NOTE: Func does not expose the actual unexported fields, because we return *Func
// values to users, and we want to keep them from being able to overwrite the data
// with (say) *f = Func{}.
import "unsafe"
func init() {
- register("CheckPtrAlignment", CheckPtrAlignment)
+ register("CheckPtrAlignmentNoPtr", CheckPtrAlignmentNoPtr)
+ register("CheckPtrAlignmentPtr", CheckPtrAlignmentPtr)
register("CheckPtrArithmetic", CheckPtrArithmetic)
register("CheckPtrSize", CheckPtrSize)
register("CheckPtrSmall", CheckPtrSmall)
}
-func CheckPtrAlignment() {
+func CheckPtrAlignmentNoPtr() {
var x [2]int64
p := unsafe.Pointer(&x[0])
sink2 = (*int64)(unsafe.Pointer(uintptr(p) + 1))
}
+func CheckPtrAlignmentPtr() {
+ var x [2]int64
+ p := unsafe.Pointer(&x[0])
+ sink2 = (**int64)(unsafe.Pointer(uintptr(p) + 1))
+}
+
func CheckPtrArithmetic() {
var x int
i := uintptr(unsafe.Pointer(&x))
// timerNoStatus -> timerWaiting
// anything else -> panic: invalid value
// deltimer:
-// timerWaiting -> timerDeleted
+// timerWaiting -> timerModifying -> timerDeleted
// timerModifiedEarlier -> timerModifying -> timerDeleted
-// timerModifiedLater -> timerDeleted
+// timerModifiedLater -> timerModifying -> timerDeleted
// timerNoStatus -> do nothing
// timerDeleted -> do nothing
// timerRemoving -> do nothing
// timerRemoved -> do nothing
// timerRunning -> wait until status changes
// timerMoving -> wait until status changes
-// timerModifying -> panic: concurrent deltimer/modtimer calls
+// timerModifying -> wait until status changes
// modtimer:
// timerWaiting -> timerModifying -> timerModifiedXX
// timerModifiedXX -> timerModifying -> timerModifiedYY
-// timerNoStatus -> timerWaiting
-// timerRemoved -> timerWaiting
+// timerNoStatus -> timerModifying -> timerWaiting
+// timerRemoved -> timerModifying -> timerWaiting
+// timerDeleted -> timerModifying -> timerModifiedXX
// timerRunning -> wait until status changes
// timerMoving -> wait until status changes
// timerRemoving -> wait until status changes
-// timerDeleted -> panic: concurrent modtimer/deltimer calls
-// timerModifying -> panic: concurrent modtimer calls
-// resettimer:
-// timerNoStatus -> timerWaiting
-// timerRemoved -> timerWaiting
-// timerDeleted -> timerModifying -> timerModifiedXX
-// timerRemoving -> wait until status changes
-// timerRunning -> wait until status changes
-// timerWaiting -> panic: resettimer called on active timer
-// timerMoving -> panic: resettimer called on active timer
-// timerModifiedXX -> panic: resettimer called on active timer
-// timerModifying -> panic: resettimer called on active timer
+// timerModifying -> wait until status changes
// cleantimers (looks in P's timer heap):
// timerDeleted -> timerRemoving -> timerRemoved
// timerModifiedXX -> timerMoving -> timerWaiting
t.when = maxWhen
}
if t.status != timerNoStatus {
- badTimer()
+ throw("addtimer called with initialized timer")
}
t.status = timerWaiting
- addInitializedTimer(t)
-}
-
-// addInitializedTimer adds an initialized timer to the current P.
-func addInitializedTimer(t *timer) {
when := t.when
pp := getg().m.p.ptr()
lock(&pp.timersLock)
- ok := cleantimers(pp) && doaddtimer(pp, t)
+ cleantimers(pp)
+ doaddtimer(pp, t)
unlock(&pp.timersLock)
- if !ok {
- badTimer()
- }
wakeNetPoller(when)
}
// doaddtimer adds t to the current P's heap.
-// It reports whether it saw no problems due to races.
// The caller must have locked the timers for pp.
-func doaddtimer(pp *p, t *timer) bool {
+func doaddtimer(pp *p, t *timer) {
// Timers rely on the network poller, so make sure the poller
// has started.
if netpollInited == 0 {
t.pp.set(pp)
i := len(pp.timers)
pp.timers = append(pp.timers, t)
- ok := siftupTimer(pp.timers, i)
+ siftupTimer(pp.timers, i)
if t == pp.timers[0] {
atomic.Store64(&pp.timer0When, uint64(t.when))
}
atomic.Xadd(&pp.numTimers, 1)
- return ok
}
// deltimer deletes the timer t. It may be on some other P, so we can't
for {
switch s := atomic.Load(&t.status); s {
case timerWaiting, timerModifiedLater:
- tpp := t.pp.ptr()
- if atomic.Cas(&t.status, s, timerDeleted) {
+ // Prevent preemption while the timer is in timerModifying.
+ // This could lead to a self-deadlock. See #38070.
+ mp := acquirem()
+ if atomic.Cas(&t.status, s, timerModifying) {
+ // Must fetch t.pp before changing status,
+ // as cleantimers in another goroutine
+ // can clear t.pp of a timerDeleted timer.
+ tpp := t.pp.ptr()
+ if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
+ badTimer()
+ }
+ releasem(mp)
atomic.Xadd(&tpp.deletedTimers, 1)
// Timer was not yet run.
return true
+ } else {
+ releasem(mp)
}
case timerModifiedEarlier:
- tpp := t.pp.ptr()
+ // Prevent preemption while the timer is in timerModifying.
+ // This could lead to a self-deadlock. See #38070.
+ mp := acquirem()
if atomic.Cas(&t.status, s, timerModifying) {
+ // Must fetch t.pp before setting status
+ // to timerDeleted.
+ tpp := t.pp.ptr()
atomic.Xadd(&tpp.adjustTimers, -1)
if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
badTimer()
}
+ releasem(mp)
atomic.Xadd(&tpp.deletedTimers, 1)
// Timer was not yet run.
return true
+ } else {
+ releasem(mp)
}
case timerDeleted, timerRemoving, timerRemoved:
// Timer was already run.
return false
case timerModifying:
// Simultaneous calls to deltimer and modtimer.
- badTimer()
+ // Wait for the other call to complete.
+ osyield()
default:
badTimer()
}
// We are locked on the P when this is called.
// It reports whether it saw no problems due to races.
// The caller must have locked the timers for pp.
-func dodeltimer(pp *p, i int) bool {
+func dodeltimer(pp *p, i int) {
if t := pp.timers[i]; t.pp.ptr() != pp {
throw("dodeltimer: wrong P")
} else {
}
pp.timers[last] = nil
pp.timers = pp.timers[:last]
- ok := true
if i != last {
// Moving to i may have moved the last timer to a new parent,
// so sift up to preserve the heap guarantee.
- if !siftupTimer(pp.timers, i) {
- ok = false
- }
- if !siftdownTimer(pp.timers, i) {
- ok = false
- }
+ siftupTimer(pp.timers, i)
+ siftdownTimer(pp.timers, i)
}
if i == 0 {
updateTimer0When(pp)
}
atomic.Xadd(&pp.numTimers, -1)
- return ok
}
// dodeltimer0 removes timer 0 from the current P's heap.
// We are locked on the P when this is called.
// It reports whether it saw no problems due to races.
// The caller must have locked the timers for pp.
-func dodeltimer0(pp *p) bool {
+func dodeltimer0(pp *p) {
if t := pp.timers[0]; t.pp.ptr() != pp {
throw("dodeltimer0: wrong P")
} else {
}
pp.timers[last] = nil
pp.timers = pp.timers[:last]
- ok := true
if last > 0 {
- ok = siftdownTimer(pp.timers, 0)
+ siftdownTimer(pp.timers, 0)
}
updateTimer0When(pp)
atomic.Xadd(&pp.numTimers, -1)
- return ok
}
// modtimer modifies an existing timer.
status := uint32(timerNoStatus)
wasRemoved := false
+ var mp *m
loop:
for {
switch status = atomic.Load(&t.status); status {
case timerWaiting, timerModifiedEarlier, timerModifiedLater:
+ // Prevent preemption while the timer is in timerModifying.
+ // This could lead to a self-deadlock. See #38070.
+ mp = acquirem()
if atomic.Cas(&t.status, status, timerModifying) {
break loop
}
+ releasem(mp)
case timerNoStatus, timerRemoved:
+ // Prevent preemption while the timer is in timerModifying.
+ // This could lead to a self-deadlock. See #38070.
+ mp = acquirem()
+
// Timer was already run and t is no longer in a heap.
// Act like addtimer.
- if atomic.Cas(&t.status, status, timerWaiting) {
+ if atomic.Cas(&t.status, status, timerModifying) {
wasRemoved = true
break loop
}
+ releasem(mp)
+ case timerDeleted:
+ // Prevent preemption while the timer is in timerModifying.
+ // This could lead to a self-deadlock. See #38070.
+ mp = acquirem()
+ if atomic.Cas(&t.status, status, timerModifying) {
+ atomic.Xadd(&t.pp.ptr().deletedTimers, -1)
+ break loop
+ }
+ releasem(mp)
case timerRunning, timerRemoving, timerMoving:
// The timer is being run or moved, by a different P.
// Wait for it to complete.
osyield()
- case timerDeleted:
- // Simultaneous calls to modtimer and deltimer.
- badTimer()
case timerModifying:
// Multiple simultaneous calls to modtimer.
- badTimer()
+ // Wait for the other call to complete.
+ osyield()
default:
badTimer()
}
if wasRemoved {
t.when = when
- addInitializedTimer(t)
+ pp := getg().m.p.ptr()
+ lock(&pp.timersLock)
+ doaddtimer(pp, t)
+ unlock(&pp.timersLock)
+ if !atomic.Cas(&t.status, timerModifying, timerWaiting) {
+ badTimer()
+ }
+ releasem(mp)
+ wakeNetPoller(when)
} else {
// The timer is in some other P's heap, so we can't change
// the when field. If we did, the other P's heap would
// Update the adjustTimers field. Subtract one if we
// are removing a timerModifiedEarlier, add one if we
// are adding a timerModifiedEarlier.
- tpp := t.pp.ptr()
adjust := int32(0)
if status == timerModifiedEarlier {
adjust--
adjust++
}
if adjust != 0 {
- atomic.Xadd(&tpp.adjustTimers, adjust)
+ atomic.Xadd(&t.pp.ptr().adjustTimers, adjust)
}
// Set the new status of the timer.
if !atomic.Cas(&t.status, timerModifying, newStatus) {
badTimer()
}
+ releasem(mp)
// If the new status is earlier, wake up the poller.
if newStatus == timerModifiedEarlier {
}
}
-// resettimer resets an existing inactive timer to turn it into an active timer,
-// with a new time for when the timer should fire.
+// resettimer resets the time when a timer should fire.
+// If used for an inactive timer, the timer will become active.
// This should be called instead of addtimer if the timer value has been,
// or may have been, used previously.
func resettimer(t *timer, when int64) {
- if when < 0 {
- when = maxWhen
- }
-
- for {
- switch s := atomic.Load(&t.status); s {
- case timerNoStatus, timerRemoved:
- if atomic.Cas(&t.status, s, timerWaiting) {
- t.when = when
- addInitializedTimer(t)
- return
- }
- case timerDeleted:
- tpp := t.pp.ptr()
- if atomic.Cas(&t.status, s, timerModifying) {
- t.nextwhen = when
- newStatus := uint32(timerModifiedLater)
- if when < t.when {
- newStatus = timerModifiedEarlier
- atomic.Xadd(&t.pp.ptr().adjustTimers, 1)
- }
- if !atomic.Cas(&t.status, timerModifying, newStatus) {
- badTimer()
- }
- atomic.Xadd(&tpp.deletedTimers, -1)
- if newStatus == timerModifiedEarlier {
- wakeNetPoller(when)
- }
- return
- }
- case timerRemoving:
- // Wait for the removal to complete.
- osyield()
- case timerRunning:
- // Even though the timer should not be active,
- // we can see timerRunning if the timer function
- // permits some other goroutine to call resettimer.
- // Wait until the run is complete.
- osyield()
- case timerWaiting, timerModifying, timerModifiedEarlier, timerModifiedLater, timerMoving:
- // Called resettimer on active timer.
- badTimer()
- default:
- badTimer()
- }
- }
+ modtimer(t, when, t.period, t.f, t.arg, t.seq)
}
// cleantimers cleans up the head of the timer queue. This speeds up
// programs that create and delete timers; leaving them in the heap
// slows down addtimer. Reports whether no timer problems were found.
// The caller must have locked the timers for pp.
-func cleantimers(pp *p) bool {
+func cleantimers(pp *p) {
for {
if len(pp.timers) == 0 {
- return true
+ return
}
t := pp.timers[0]
if t.pp.ptr() != pp {
if !atomic.Cas(&t.status, s, timerRemoving) {
continue
}
- if !dodeltimer0(pp) {
- return false
- }
+ dodeltimer0(pp)
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
- return false
+ badTimer()
}
atomic.Xadd(&pp.deletedTimers, -1)
case timerModifiedEarlier, timerModifiedLater:
// Now we can change the when field.
t.when = t.nextwhen
// Move t to the right position.
- if !dodeltimer0(pp) {
- return false
- }
- if !doaddtimer(pp, t) {
- return false
- }
+ dodeltimer0(pp)
+ doaddtimer(pp, t)
if s == timerModifiedEarlier {
atomic.Xadd(&pp.adjustTimers, -1)
}
if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
- return false
+ badTimer()
}
default:
// Head of timers does not need adjustment.
- return true
+ return
}
}
}
switch s := atomic.Load(&t.status); s {
case timerWaiting:
t.pp = 0
- if !doaddtimer(pp, t) {
- badTimer()
- }
+ doaddtimer(pp, t)
break loop
case timerModifiedEarlier, timerModifiedLater:
if !atomic.Cas(&t.status, s, timerMoving) {
}
t.when = t.nextwhen
t.pp = 0
- if !doaddtimer(pp, t) {
- badTimer()
- }
+ doaddtimer(pp, t)
if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
badTimer()
}
switch s := atomic.Load(&t.status); s {
case timerDeleted:
if atomic.Cas(&t.status, s, timerRemoving) {
- if !dodeltimer(pp, i) {
- badTimer()
- }
+ dodeltimer(pp, i)
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
badTimer()
}
// We don't add it back yet because the
// heap manipulation could cause our
// loop to skip some other timer.
- if !dodeltimer(pp, i) {
- badTimer()
- }
+ dodeltimer(pp, i)
moved = append(moved, t)
if s == timerModifiedEarlier {
if n := atomic.Xadd(&pp.adjustTimers, -1); int32(n) <= 0 {
// back to the timer heap.
func addAdjustedTimers(pp *p, moved []*timer) {
for _, t := range moved {
- if !doaddtimer(pp, t) {
- badTimer()
- }
+ doaddtimer(pp, t)
if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
badTimer()
}
if !atomic.Cas(&t.status, s, timerRemoving) {
continue
}
- if !dodeltimer0(pp) {
- badTimer()
- }
+ dodeltimer0(pp)
if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
badTimer()
}
continue
}
t.when = t.nextwhen
- if !dodeltimer0(pp) {
- badTimer()
- }
- if !doaddtimer(pp, t) {
- badTimer()
- }
+ dodeltimer0(pp)
+ doaddtimer(pp, t)
if s == timerModifiedEarlier {
atomic.Xadd(&pp.adjustTimers, -1)
}
// Leave in heap but adjust next time to fire.
delta := t.when - now
t.when += t.period * (1 + -delta/t.period)
- if !siftdownTimer(pp.timers, 0) {
- badTimer()
- }
+ siftdownTimer(pp.timers, 0)
if !atomic.Cas(&t.status, timerRunning, timerWaiting) {
badTimer()
}
updateTimer0When(pp)
} else {
// Remove from heap.
- if !dodeltimer0(pp) {
- badTimer()
- }
+ dodeltimer0(pp)
if !atomic.Cas(&t.status, timerRunning, timerNoStatus) {
badTimer()
}
// "panic holding locks" message. Instead, we panic while not
// holding a lock.
-func siftupTimer(t []*timer, i int) bool {
+func siftupTimer(t []*timer, i int) {
if i >= len(t) {
- return false
+ badTimer()
}
when := t[i].when
tmp := t[i]
if tmp != t[i] {
t[i] = tmp
}
- return true
}
-func siftdownTimer(t []*timer, i int) bool {
+func siftdownTimer(t []*timer, i int) {
n := len(t)
if i >= n {
- return false
+ badTimer()
}
when := t[i].when
tmp := t[i]
if tmp != t[i] {
t[i] = tmp
}
- return true
}
// badTimer is called if the timer data structures have been corrupted,
// panicing due to invalid slice access while holding locks.
// See issue #25686.
func badTimer() {
- panic(errorString("racy use of timers"))
+ throw("timer data corruption")
}
t.Logf("cleanup panicked with %v", r)
}
// Flush the output log up to the root before dying.
- t.mu.Lock()
- root := &t.common
- for ; root.parent != nil; root = root.parent {
+ for root := &t.common; root.parent != nil; root = root.parent {
+ root.mu.Lock()
root.duration += time.Since(root.start)
- fmt.Fprintf(root.parent.w, "--- FAIL: %s (%s)\n", root.name, fmtDuration(root.duration))
+ d := root.duration
+ root.mu.Unlock()
+ root.flushToParent("--- FAIL: %s (%s)\n", root.name, fmtDuration(d))
if r := root.parent.runCleanup(recoverAndReturnPanic); r != nil {
fmt.Fprintf(root.parent.w, "cleanup panicked with %v", r)
}
- root.parent.mu.Lock()
- io.Copy(root.parent.w, bytes.NewReader(root.output))
}
panic(err)
}
"encoding/gob"
"encoding/json"
"fmt"
- "internal/race"
"math/big"
"math/rand"
"os"
}
// Issue 25686: hard crash on concurrent timer access.
+// Issue 37400: panic with "racy use of timers"
// This test deliberately invokes a race condition.
-// We are testing that we don't crash with "fatal error: panic holding locks".
+// We are testing that we don't crash with "fatal error: panic holding locks",
+// and that we also don't panic.
func TestConcurrentTimerReset(t *testing.T) {
- if race.Enabled {
- t.Skip("skipping test under race detector")
- }
-
- // We expect this code to panic rather than crash.
- // Don't worry if it doesn't panic.
- catch := func(i int) {
- if e := recover(); e != nil {
- t.Logf("panic in goroutine %d, as expected, with %q", i, e)
- } else {
- t.Logf("no panic in goroutine %d", i)
- }
+ const goroutines = 8
+ const tries = 1000
+ var wg sync.WaitGroup
+ wg.Add(goroutines)
+ timer := NewTimer(Hour)
+ for i := 0; i < goroutines; i++ {
+ go func(i int) {
+ defer wg.Done()
+ for j := 0; j < tries; j++ {
+ timer.Reset(Hour + Duration(i*j))
+ }
+ }(i)
}
+ wg.Wait()
+}
+// Issue 37400: panic with "racy use of timers".
+func TestConcurrentTimerResetStop(t *testing.T) {
const goroutines = 8
const tries = 1000
var wg sync.WaitGroup
- wg.Add(goroutines)
+ wg.Add(goroutines * 2)
timer := NewTimer(Hour)
for i := 0; i < goroutines; i++ {
go func(i int) {
defer wg.Done()
- defer catch(i)
for j := 0; j < tries; j++ {
timer.Reset(Hour + Duration(i*j))
}
}(i)
+ go func(i int) {
+ defer wg.Done()
+ timer.Stop()
+ }(i)
}
wg.Wait()
}
internal/race
internal/reflectlite
internal/singleflight
+internal/syscall/execenv
internal/syscall/unix
internal/testenv
internal/testlog
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// +build ignore
+
+package main
+
+/*
+typedef struct A A;
+
+typedef struct {
+ struct A *next;
+ struct A **prev;
+} N;
+
+struct A
+{
+ N n;
+};
+
+typedef struct B
+{
+ A* a;
+} B;
+*/
+import "C"
+
+type N C.N
+
+type A C.A
+
+type B C.B
--- /dev/null
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// +build ignore
+
+package main
+
+/*
+struct tt {
+ long long a;
+ long long b;
+};
+
+struct s {
+ struct tt ts[3];
+};
+*/
+import "C"
+
+type TT C.struct_tt
+
+type S C.struct_s
// Test that P, Q, and R all point to byte.
var v3 = Issue8478{P: (*byte)(nil), Q: (**byte)(nil), R: (***byte)(nil)}
+// Test that N, A and B are fully defined
+var v4 = N{}
+var v5 = A{}
+var v6 = B{}
+
+// Test that S is fully defined
+var v7 = S{}
+
func main() {
}
"anonunion",
"issue8478",
"fieldtypedef",
+ "issue37479",
+ "issue37621",
}
func TestGoDefs(t *testing.T) {
return data.index;
}
+/* runtime_pcInlineCallers returns the inline stack of calls for a PC.
+ This is like runtime_callers, but instead of doing a backtrace,
+ just finds the information for a single PC value. */
+
+int32 runtime_pcInlineCallers (uintptr, Location *, int32)
+ __asm__ (GOSYM_PREFIX "runtime.pcInlineCallers");
+
+int32
+runtime_pcInlineCallers (uintptr pc, Location *locbuf, int32 m)
+{
+ struct callers_data data;
+ struct backtrace_state *state;
+ int32 i;
+
+ data.locbuf = locbuf;
+ data.skip = 0;
+ data.index = 0;
+ data.max = m;
+ data.keep_thunks = false;
+ data.saw_sigtramp = 0;
+ runtime_xadd (&__go_runtime_in_callers, 1);
+ state = __go_get_backtrace_state ();
+ backtrace_pcinfo (state, pc, callback, error_callback, &data);
+ runtime_xadd (&__go_runtime_in_callers, -1);
+
+ /* Try to use backtrace_syminfo to fill in missing names. See
+ runtime_callers. */
+ for (i = 0; i < data.index; ++i)
+ {
+ if (locbuf[i].function.len == 0 && locbuf[i].pc != 0)
+ backtrace_syminfo (state, locbuf[i].pc, __go_syminfo_fnname_callback,
+ error_callback, &locbuf[i].function);
+ }
+
+ return data.index;
+}