libgo: update to almost the 1.14.2 release
authorIan Lance Taylor <iant@golang.org>
Mon, 6 Apr 2020 21:04:45 +0000 (14:04 -0700)
committerIan Lance Taylor <iant@golang.org>
Mon, 6 Apr 2020 23:37:24 +0000 (16:37 -0700)
Update to edea4a79e8d7dea2456b688f492c8af33d381dc2 which is likely to
be approximately the 1.14.2 release.

Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/227377

42 files changed:
gcc/go/gofrontend/MERGE
libgo/MERGE
libgo/VERSION
libgo/go/cmd/cgo/gcc.go
libgo/go/cmd/go/internal/generate/generate.go
libgo/go/cmd/go/internal/test/test.go
libgo/go/cmd/go/internal/work/exec.go
libgo/go/cmd/go/testdata/script/mod_edit_go.txt
libgo/go/go/build/deps_test.go
libgo/go/internal/syscall/execenv/execenv_default.go [new file with mode: 0644]
libgo/go/internal/syscall/execenv/execenv_windows.go [new file with mode: 0644]
libgo/go/os/env_default.go [deleted file]
libgo/go/os/env_windows.go [deleted file]
libgo/go/os/exec/exec.go
libgo/go/os/exec_posix.go
libgo/go/runtime/alg.go
libgo/go/runtime/checkptr.go
libgo/go/runtime/checkptr_test.go
libgo/go/runtime/defer_test.go
libgo/go/runtime/export_test.go
libgo/go/runtime/hash_test.go
libgo/go/runtime/mgc.go
libgo/go/runtime/mkpreempt.go
libgo/go/runtime/panic.go
libgo/go/runtime/pprof/map.go
libgo/go/runtime/pprof/pprof_test.go
libgo/go/runtime/pprof/proto.go
libgo/go/runtime/pprof/proto_test.go
libgo/go/runtime/pprof/runtime.go
libgo/go/runtime/runtime2.go
libgo/go/runtime/signal_unix.go
libgo/go/runtime/symtab.go
libgo/go/runtime/testdata/testprog/checkptr.go
libgo/go/runtime/time.go
libgo/go/testing/testing.go
libgo/go/time/time_test.go
libgo/libgo-packages.txt
libgo/misc/cgo/testgodefs/testdata/issue37479.go [new file with mode: 0644]
libgo/misc/cgo/testgodefs/testdata/issue37621.go [new file with mode: 0644]
libgo/misc/cgo/testgodefs/testdata/main.go
libgo/misc/cgo/testgodefs/testgodefs_test.go
libgo/runtime/go-callers.c

index 6b4c21fabf5ad172557a0deee2472a0baa983e0f..c5e8b29879eb8dc987324d9a14bdfcabbbc5029e 100644 (file)
@@ -1,4 +1,4 @@
-7a62a49e62c090118fa003d9265c5f5e2090c4f9
+4a31d064fd6996f64b620104e849292af8f25e12
 
 The first line of this file holds the git revision number of the last
 merge done from the gofrontend repository.
index 233492ada69ba877029691817b47cabb224ba24d..4398cd7dc28972870d1857490c97aa5f04b81dfa 100644 (file)
@@ -1,4 +1,4 @@
-20a838ab94178c55bc4dc23ddc332fce8545a493
+edea4a79e8d7dea2456b688f492c8af33d381dc2
 
 The first line of this file holds the git revision number of the
 last merge done from the master library sources.
index f000507a7fe7649efb56eaf39972338864860451..864916ef4996134e87bb7be0070c1da86fd0dece 100644 (file)
@@ -1 +1 @@
-go1.14
+go1.14.2
index 678b348c89b0727f5e4ea5a22e541f0966d675a5..310316bdda495196c02ba9fa174d336606fe4806 100644 (file)
@@ -2265,7 +2265,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
                        // Translate to zero-length array instead.
                        count = 0
                }
-               sub := c.loadType(dt.Type, pos, key)
+               sub := c.Type(dt.Type, pos)
                t.Align = sub.Align
                t.Go = &ast.ArrayType{
                        Len: c.intExpr(count),
@@ -2410,7 +2410,7 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
                c.ptrs[key] = append(c.ptrs[key], t)
 
        case *dwarf.QualType:
-               t1 := c.loadType(dt.Type, pos, key)
+               t1 := c.Type(dt.Type, pos)
                t.Size = t1.Size
                t.Align = t1.Align
                t.Go = t1.Go
@@ -2494,7 +2494,13 @@ func (c *typeConv) loadType(dtype dwarf.Type, pos token.Pos, parent string) *Typ
                }
                name := c.Ident("_Ctype_" + dt.Name)
                goIdent[name.Name] = name
-               sub := c.loadType(dt.Type, pos, key)
+               akey := ""
+               if c.anonymousStructTypedef(dt) {
+                       // only load type recursively for typedefs of anonymous
+                       // structs, see issues 37479 and 37621.
+                       akey = key
+               }
+               sub := c.loadType(dt.Type, pos, akey)
                if c.badPointerTypedef(dt) {
                        // Treat this typedef as a uintptr.
                        s := *sub
@@ -3015,6 +3021,13 @@ func fieldPrefix(fld []*ast.Field) string {
        return prefix
 }
 
+// anonymousStructTypedef reports whether dt is a C typedef for an anonymous
+// struct.
+func (c *typeConv) anonymousStructTypedef(dt *dwarf.TypedefType) bool {
+       st, ok := dt.Type.(*dwarf.StructType)
+       return ok && st.StructName == ""
+}
+
 // badPointerTypedef reports whether t is a C typedef that should not be considered a pointer in Go.
 // A typedef is bad if C code sometimes stores non-pointers in this type.
 // TODO: Currently our best solution is to find these manually and list them as
index 198ca1c1b9486703aebb1c4381b2d9af23e85f13..315db69de8a56b2fb522a39dfd408a61009192f4 100644 (file)
@@ -22,6 +22,7 @@ import (
        "cmd/go/internal/cfg"
        "cmd/go/internal/load"
        "cmd/go/internal/modload"
+       "cmd/go/internal/str"
        "cmd/go/internal/work"
 )
 
@@ -438,7 +439,7 @@ func (g *Generator) exec(words []string) {
        cmd.Stderr = os.Stderr
        // Run the command in the package directory.
        cmd.Dir = g.dir
-       cmd.Env = append(cfg.OrigEnv, g.env...)
+       cmd.Env = str.StringList(cfg.OrigEnv, g.env)
        err := cmd.Run()
        if err != nil {
                g.errorf("running %q: %s", words[0], err)
index fb011d4c03f18f2ed50938c86372cf27789eb0f3..4ad142ccd874886d2acf0cc0272164f408dfa789 100644 (file)
@@ -1142,7 +1142,7 @@ func (c *runCache) builderRunTest(b *work.Builder, a *work.Action) error {
 
        cmd := exec.Command(args[0], args[1:]...)
        cmd.Dir = a.Package.Dir
-       cmd.Env = base.EnvForDir(cmd.Dir, cfg.OrigEnv)
+       cmd.Env = base.EnvForDir(cmd.Dir, cfg.OrigEnv[:len(cfg.OrigEnv):len(cfg.OrigEnv)])
        cmd.Stdout = stdout
        cmd.Stderr = stdout
 
@@ -1224,6 +1224,14 @@ func (c *runCache) builderRunTest(b *work.Builder, a *work.Action) error {
                if len(out) == 0 {
                        fmt.Fprintf(cmd.Stdout, "%s\n", err)
                }
+               // NOTE(golang.org/issue/37555): test2json reports that a test passes
+               // unless "FAIL" is printed at the beginning of a line. The test may not
+               // actually print that if it panics, exits, or terminates abnormally,
+               // so we print it here. We can't always check whether it was printed
+               // because some tests need stdout to be a terminal (golang.org/issue/34791),
+               // not a pipe.
+               // TODO(golang.org/issue/29062): tests that exit with status 0 without
+               // printing a final result should fail.
                fmt.Fprintf(cmd.Stdout, "FAIL\t%s\t%s\n", a.Package.ImportPath, t)
        }
 
index ba81543fdf7ec705e7e773fbca4f527414b2d8d0..764291b0584e8178d811ca69e3cdbfb2cf60236c 100644 (file)
@@ -213,6 +213,9 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID {
        } else if cfg.BuildTrimpath && p.Module != nil {
                fmt.Fprintf(h, "module %s@%s\n", p.Module.Path, p.Module.Version)
        }
+       if p.Module != nil {
+               fmt.Fprintf(h, "go %s\n", p.Module.GoVersion)
+       }
        fmt.Fprintf(h, "goos %s goarch %s\n", cfg.Goos, cfg.Goarch)
        fmt.Fprintf(h, "import %q\n", p.ImportPath)
        fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix)
index 0f4e093573e3fd515dde3250c2cf3061a3e512e9..78b48f291c0131f1e574b8d5e218b1a7813fa2d6 100644 (file)
@@ -9,6 +9,13 @@ go mod edit -go=1.9
 grep 'go 1.9' go.mod
 go build
 
+# Reverting the version should force a rebuild and error instead of using
+# the cached 1.9 build. (https://golang.org/issue/37804)
+go mod edit -go=1.8
+! go build
+stderr 'type aliases only supported as of'
+
+
 -- go.mod --
 module m
 go 1.8
index a64c2b32419cdd769d7eef7ff576c8a7f081aee5..8eed6260a2b4438308ec75f696c1112f424b027e 100644 (file)
@@ -153,6 +153,7 @@ var pkgDeps = map[string][]string{
        "internal/syscall/unix":             {"L0", "syscall"},
        "internal/syscall/windows":          {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
        "internal/syscall/windows/registry": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
+       "internal/syscall/execenv":          {"L0", "syscall", "internal/syscall/windows", "unicode/utf16"},
        "time": {
                // "L0" without the "io" package:
                "errors",
@@ -170,10 +171,10 @@ var pkgDeps = map[string][]string{
        "internal/cfg":     {"L0"},
        "internal/poll":    {"L0", "internal/oserror", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8", "internal/syscall/windows", "internal/syscall/unix"},
        "internal/testlog": {"L0"},
-       "os":               {"L1", "os", "syscall", "time", "internal/oserror", "internal/poll", "internal/syscall/windows", "internal/syscall/unix", "internal/testlog"},
+       "os":               {"L1", "os", "syscall", "time", "internal/oserror", "internal/poll", "internal/syscall/windows", "internal/syscall/unix", "internal/syscall/execenv", "internal/testlog"},
        "path/filepath":    {"L2", "os", "syscall", "internal/syscall/windows"},
        "io/ioutil":        {"L2", "os", "path/filepath", "time"},
-       "os/exec":          {"L2", "os", "context", "path/filepath", "syscall"},
+       "os/exec":          {"L2", "os", "context", "path/filepath", "syscall", "internal/syscall/execenv"},
        "os/signal":        {"L2", "os", "syscall"},
 
        // OS enables basic operating system functionality,
diff --git a/libgo/go/internal/syscall/execenv/execenv_default.go b/libgo/go/internal/syscall/execenv/execenv_default.go
new file mode 100644 (file)
index 0000000..4bdbb55
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows
+
+package execenv
+
+import "syscall"
+
+// Default will return the default environment
+// variables based on the process attributes
+// provided.
+//
+// Defaults to syscall.Environ() on all platforms
+// other than Windows.
+func Default(sys *syscall.SysProcAttr) ([]string, error) {
+       return syscall.Environ(), nil
+}
diff --git a/libgo/go/internal/syscall/execenv/execenv_windows.go b/libgo/go/internal/syscall/execenv/execenv_windows.go
new file mode 100644 (file)
index 0000000..b50029c
--- /dev/null
@@ -0,0 +1,54 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package execenv
+
+import (
+       "internal/syscall/windows"
+       "syscall"
+       "unicode/utf16"
+       "unsafe"
+)
+
+// Default will return the default environment
+// variables based on the process attributes
+// provided.
+//
+// If the process attributes contain a token, then
+// the environment variables will be sourced from
+// the defaults for that user token, otherwise they
+// will be sourced from syscall.Environ().
+func Default(sys *syscall.SysProcAttr) (env []string, err error) {
+       if sys == nil || sys.Token == 0 {
+               return syscall.Environ(), nil
+       }
+       var block *uint16
+       err = windows.CreateEnvironmentBlock(&block, sys.Token, false)
+       if err != nil {
+               return nil, err
+       }
+       defer windows.DestroyEnvironmentBlock(block)
+       blockp := uintptr(unsafe.Pointer(block))
+       for {
+
+               // find NUL terminator
+               end := unsafe.Pointer(blockp)
+               for *(*uint16)(end) != 0 {
+                       end = unsafe.Pointer(uintptr(end) + 2)
+               }
+
+               n := (uintptr(end) - uintptr(unsafe.Pointer(blockp))) / 2
+               if n == 0 {
+                       // environment block ends with empty string
+                       break
+               }
+
+               entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:n:n]
+               env = append(env, string(utf16.Decode(entry)))
+               blockp += 2 * (uintptr(len(entry)) + 1)
+       }
+       return
+}
diff --git a/libgo/go/os/env_default.go b/libgo/go/os/env_default.go
deleted file mode 100644 (file)
index c11ccce..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !windows
-
-package os
-
-import "syscall"
-
-func environForSysProcAttr(sys *syscall.SysProcAttr) ([]string, error) {
-       return Environ(), nil
-}
diff --git a/libgo/go/os/env_windows.go b/libgo/go/os/env_windows.go
deleted file mode 100644 (file)
index b1b1ee4..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package os
-
-import (
-       "internal/syscall/windows"
-       "syscall"
-       "unicode/utf16"
-       "unsafe"
-)
-
-func environForSysProcAttr(sys *syscall.SysProcAttr) (env []string, err error) {
-       if sys == nil || sys.Token == 0 {
-               return Environ(), nil
-       }
-       var block *uint16
-       err = windows.CreateEnvironmentBlock(&block, sys.Token, false)
-       if err != nil {
-               return nil, err
-       }
-       defer windows.DestroyEnvironmentBlock(block)
-       blockp := uintptr(unsafe.Pointer(block))
-       for {
-
-               // find NUL terminator
-               end := unsafe.Pointer(blockp)
-               for *(*uint16)(end) != 0 {
-                       end = unsafe.Pointer(uintptr(end) + 2)
-               }
-
-               n := (uintptr(end) - uintptr(unsafe.Pointer(blockp))) / 2
-               if n == 0 {
-                       // environment block ends with empty string
-                       break
-               }
-
-               entry := (*[(1 << 30) - 1]uint16)(unsafe.Pointer(blockp))[:n:n]
-               env = append(env, string(utf16.Decode(entry)))
-               blockp += 2 * (uintptr(len(entry)) + 1)
-       }
-       return
-}
index 3474ae0ca497e91369b32e05c7b458b81e1a3254..0c495755116f5254030d45284ea31515cadb8487 100644 (file)
@@ -24,6 +24,7 @@ import (
        "bytes"
        "context"
        "errors"
+       "internal/syscall/execenv"
        "io"
        "os"
        "path/filepath"
@@ -222,11 +223,11 @@ func interfaceEqual(a, b interface{}) bool {
        return a == b
 }
 
-func (c *Cmd) envv() []string {
+func (c *Cmd) envv() ([]string, error) {
        if c.Env != nil {
-               return c.Env
+               return c.Env, nil
        }
-       return os.Environ()
+       return execenv.Default(c.SysProcAttr)
 }
 
 func (c *Cmd) argv() []string {
@@ -413,11 +414,15 @@ func (c *Cmd) Start() error {
        }
        c.childFiles = append(c.childFiles, c.ExtraFiles...)
 
-       var err error
+       envv, err := c.envv()
+       if err != nil {
+               return err
+       }
+
        c.Process, err = os.StartProcess(c.Path, c.argv(), &os.ProcAttr{
                Dir:   c.Dir,
                Files: c.childFiles,
-               Env:   addCriticalEnv(dedupEnv(c.envv())),
+               Env:   addCriticalEnv(dedupEnv(envv)),
                Sys:   c.SysProcAttr,
        })
        if err != nil {
index a944e4e035081bcf7060ce2a802b50c5361322cf..dc9947c903eb731c34e5107c06279c9aaa45f455 100644 (file)
@@ -7,6 +7,7 @@
 package os
 
 import (
+       "internal/syscall/execenv"
        "runtime"
        "syscall"
 )
@@ -39,7 +40,7 @@ func startProcess(name string, argv []string, attr *ProcAttr) (p *Process, err e
                Sys: attr.Sys,
        }
        if sysattr.Env == nil {
-               sysattr.Env, err = environForSysProcAttr(sysattr.Sys)
+               sysattr.Env, err = execenv.Default(sysattr.Sys)
                if err != nil {
                        return nil, err
                }
index 101402ca49cc84a861514ee951ef325ba5da3e74..95f02aa31c4425d4f705cb0f0e303b38d1412b68 100644 (file)
@@ -176,9 +176,19 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
 // is slower but more general and is used for hashing interface types
 // (called from interhash or nilinterhash, above) or for hashing in
 // maps generated by reflect.MapOf (reflect_typehash, below).
+// Note: this function must match the compiler generated
+// functions exactly. See issue 37716.
 func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
        if t.tflag&tflagRegularMemory != 0 {
-               return memhash(p, h, t.size)
+               // Handle ptr sizes specially, see issue 37086.
+               switch t.size {
+               case 4:
+                       return memhash32(p, h)
+               case 8:
+                       return memhash64(p, h)
+               default:
+                       return memhash(p, h, t.size)
+               }
        }
        switch t.kind & kindMask {
        case kindFloat32:
index 974f0a060b76f382156aa43a7c1091665af208ac..d5f116c39278a8b00f99931ef57ddbd6f7b86219 100644 (file)
@@ -10,8 +10,10 @@ import "unsafe"
 
 func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr) {
        // Check that (*[n]elem)(p) is appropriately aligned.
+       // Note that we allow unaligned pointers if the types they point to contain
+       // no pointers themselves. See issue 37298.
        // TODO(mdempsky): What about fieldAlign?
-       if uintptr(p)&(uintptr(elem.align)-1) != 0 {
+       if elem.ptrdata != 0 && uintptr(p)&(uintptr(elem.align)-1) != 0 {
                throw("checkptr: unsafe pointer conversion")
        }
 
index ab3058f7336784606cfb30aebe739532d22f5c62..0ca7b20cfd43b6507e0f39e44174554690e4ba6e 100644 (file)
@@ -28,7 +28,8 @@ func TestCheckPtr(t *testing.T) {
                cmd  string
                want string
        }{
-               {"CheckPtrAlignment", "fatal error: checkptr: unsafe pointer conversion\n"},
+               {"CheckPtrAlignmentPtr", "fatal error: checkptr: unsafe pointer conversion\n"},
+               {"CheckPtrAlignmentNoPtr", ""},
                {"CheckPtrArithmetic", "fatal error: checkptr: unsafe pointer arithmetic\n"},
                {"CheckPtrSize", "fatal error: checkptr: unsafe pointer conversion\n"},
                {"CheckPtrSmall", "fatal error: checkptr: unsafe pointer arithmetic\n"},
@@ -42,6 +43,12 @@ func TestCheckPtr(t *testing.T) {
                        if err != nil {
                                t.Log(err)
                        }
+                       if tc.want == "" {
+                               if len(got) > 0 {
+                                       t.Errorf("output:\n%s\nwant no output", got)
+                               }
+                               return
+                       }
                        if !strings.HasPrefix(string(got), tc.want) {
                                t.Errorf("output:\n%s\n\nwant output starting with: %s", got, tc.want)
                        }
index 3d8f81277f312dd4c62bf053433f2c191d7cd8b1..11436a1f08be1668d71adaad034c26f20dad85ae 100644 (file)
@@ -6,6 +6,7 @@ package runtime_test
 
 import (
        "fmt"
+       "os"
        "reflect"
        "runtime"
        "testing"
@@ -281,3 +282,122 @@ func TestDeferForFuncWithNoExit(t *testing.T) {
        for {
        }
 }
+
+// Test case approximating issue #37664, where a recursive function (interpreter)
+// may do repeated recovers/re-panics until it reaches the frame where the panic
+// can actually be handled. The recurseFnPanicRec() function is testing that there
+// are no stale defer structs on the defer chain after the interpreter() sequence,
+// by writing a bunch of 0xffffffffs into several recursive stack frames, and then
+// doing a single panic-recover which would invoke any such stale defer structs.
+func TestDeferWithRepeatedRepanics(t *testing.T) {
+       interpreter(0, 6, 2)
+       recurseFnPanicRec(0, 10)
+       interpreter(0, 5, 1)
+       recurseFnPanicRec(0, 10)
+       interpreter(0, 6, 3)
+       recurseFnPanicRec(0, 10)
+}
+
+func interpreter(level int, maxlevel int, rec int) {
+       defer func() {
+               e := recover()
+               if e == nil {
+                       return
+               }
+               if level != e.(int) {
+                       //fmt.Fprintln(os.Stderr, "re-panicing, level", level)
+                       panic(e)
+               }
+               //fmt.Fprintln(os.Stderr, "Recovered, level", level)
+       }()
+       if level+1 < maxlevel {
+               interpreter(level+1, maxlevel, rec)
+       } else {
+               //fmt.Fprintln(os.Stderr, "Initiating panic")
+               panic(rec)
+       }
+}
+
+func recurseFnPanicRec(level int, maxlevel int) {
+       defer func() {
+               recover()
+       }()
+       recurseFn(level, maxlevel)
+}
+
+func recurseFn(level int, maxlevel int) {
+       a := [40]uint32{0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff}
+       if level+1 < maxlevel {
+               // Need this print statement to keep a around.  '_ = a[4]' doesn't do it.
+               fmt.Fprintln(os.Stderr, "recurseFn", level, a[4])
+               recurseFn(level+1, maxlevel)
+       } else {
+               panic("recurseFn panic")
+       }
+}
+
+// Try to reproduce issue #37688, where a pointer to an open-coded defer struct is
+// mistakenly held, and that struct keeps a pointer to a stack-allocated defer
+// struct, and that stack-allocated struct gets overwritten or the stack gets
+// moved, so a memory error happens on GC.
+func TestIssue37688(t *testing.T) {
+       for j := 0; j < 10; j++ {
+               g2()
+               g3()
+       }
+}
+
+type foo struct {
+}
+
+func (f *foo) method1() {
+       fmt.Fprintln(os.Stderr, "method1")
+}
+
+func (f *foo) method2() {
+       fmt.Fprintln(os.Stderr, "method2")
+}
+
+func g2() {
+       var a foo
+       ap := &a
+       // The loop forces this defer to be heap-allocated and the remaining two
+       // to be stack-allocated.
+       for i := 0; i < 1; i++ {
+               defer ap.method1()
+       }
+       defer ap.method2()
+       defer ap.method1()
+       ff1(ap, 1, 2, 3, 4, 5, 6, 7, 8, 9)
+       // Try to get the stack to be be moved by growing it too large, so
+       // existing stack-allocated defer becomes invalid.
+       rec1(2000)
+}
+
+func g3() {
+       // Mix up the stack layout by adding in an extra function frame
+       g2()
+}
+
+func ff1(ap *foo, a, b, c, d, e, f, g, h, i int) {
+       defer ap.method1()
+
+       // Make a defer that has a very large set of args, hence big size for the
+       // defer record for the open-coded frame (which means it won't use the
+       // defer pool)
+       defer func(ap *foo, a, b, c, d, e, f, g, h, i int) {
+               if v := recover(); v != nil {
+                       fmt.Fprintln(os.Stderr, "did recover")
+               }
+               fmt.Fprintln(os.Stderr, "debug", ap, a, b, c, d, e, f, g, h)
+       }(ap, a, b, c, d, e, f, g, h, i)
+       panic("ff1 panic")
+}
+
+func rec1(max int) {
+       if max > 0 {
+               rec1(max - 1)
+       } else {
+               fmt.Fprintln(os.Stderr, "finished recursion", max)
+       }
+}
index b60c19bbcec8b49c64ff3f8d5884a4de466fe773..6595fafe3689de14c81819300e3409635e037195 100644 (file)
@@ -945,4 +945,29 @@ func SemNwait(addr *uint32) uint32 {
        return atomic.Load(&root.nwait)
 }
 
+// MapHashCheck computes the hash of the key k for the map m, twice.
+// Method 1 uses the built-in hasher for the map.
+// Method 2 uses the typehash function (the one used by reflect).
+// Returns the two hash values, which should always be equal.
+func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) {
+       // Unpack m.
+       mt := (*maptype)(unsafe.Pointer(efaceOf(&m)._type))
+       mh := (*hmap)(efaceOf(&m).data)
+
+       // Unpack k.
+       kt := efaceOf(&k)._type
+       var p unsafe.Pointer
+       if isDirectIface(kt) {
+               q := efaceOf(&k).data
+               p = unsafe.Pointer(&q)
+       } else {
+               p = efaceOf(&k).data
+       }
+
+       // Compute the hash functions.
+       x := mt.hasher(noescape(p), uintptr(mh.hash0))
+       y := typehash(kt, noescape(p), uintptr(mh.hash0))
+       return x, y
+}
+
 var Pusestackmaps = &usestackmaps
index d57be4c8f7313a41780467e8fcd24d44679e0c00..522b7febf9b9a2243232c7bb5006d3c758fa6fe5 100644 (file)
@@ -8,6 +8,7 @@ import (
        "fmt"
        "math"
        "math/rand"
+       "reflect"
        . "runtime"
        "strings"
        "testing"
@@ -48,6 +49,54 @@ func TestMemHash64Equality(t *testing.T) {
        }
 }
 
+func TestCompilerVsRuntimeHash(t *testing.T) {
+       // Test to make sure the compiler's hash function and the runtime's hash function agree.
+       // See issue 37716.
+       for _, m := range []interface{}{
+               map[bool]int{},
+               map[int8]int{},
+               map[uint8]int{},
+               map[int16]int{},
+               map[uint16]int{},
+               map[int32]int{},
+               map[uint32]int{},
+               map[int64]int{},
+               map[uint64]int{},
+               map[int]int{},
+               map[uint]int{},
+               map[uintptr]int{},
+               map[*byte]int{},
+               map[chan int]int{},
+               map[unsafe.Pointer]int{},
+               map[float32]int{},
+               map[float64]int{},
+               map[complex64]int{},
+               map[complex128]int{},
+               map[string]int{},
+               //map[interface{}]int{},
+               //map[interface{F()}]int{},
+               map[[8]uint64]int{},
+               map[[8]string]int{},
+               map[struct{ a, b, c, d int32 }]int{}, // Note: tests AMEM128
+               map[struct{ a, b, _, d int32 }]int{},
+               map[struct {
+                       a, b int32
+                       c    float32
+                       d, e [8]byte
+               }]int{},
+               map[struct {
+                       a int16
+                       b int64
+               }]int{},
+       } {
+               k := reflect.New(reflect.TypeOf(m).Key()).Elem().Interface() // the zero key
+               x, y := MapHashCheck(m, k)
+               if x != y {
+                       t.Errorf("hashes did not match (%x vs %x) for map %T", x, y, m)
+               }
+       }
+}
+
 // Smhasher is a torture test for hash functions.
 // https://code.google.com/p/smhasher/
 // This code is a port of some of the Smhasher tests to Go.
index 8ded306ae6b9f1960f34e9a9d411557aaf46dbe8..24043cfb2e20aa64a4137409d8664650fcd83d26 100644 (file)
@@ -771,32 +771,40 @@ func gcSetTriggerRatio(triggerRatio float64) {
                goal = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100
        }
 
-       // If we let triggerRatio go too low, then if the application
-       // is allocating very rapidly we might end up in a situation
-       // where we're allocating black during a nearly always-on GC.
-       // The result of this is a growing heap and ultimately an
-       // increase in RSS. By capping us at a point >0, we're essentially
-       // saying that we're OK using more CPU during the GC to prevent
-       // this growth in RSS.
-       //
-       // The current constant was chosen empirically: given a sufficiently
-       // fast/scalable allocator with 48 Ps that could drive the trigger ratio
-       // to <0.05, this constant causes applications to retain the same peak
-       // RSS compared to not having this allocator.
-       const minTriggerRatio = 0.6
-
        // Set the trigger ratio, capped to reasonable bounds.
-       if triggerRatio < minTriggerRatio {
-               // This can happen if the mutator is allocating very
-               // quickly or the GC is scanning very slowly.
-               triggerRatio = minTriggerRatio
-       } else if gcpercent >= 0 {
+       if gcpercent >= 0 {
+               scalingFactor := float64(gcpercent) / 100
                // Ensure there's always a little margin so that the
                // mutator assist ratio isn't infinity.
-               maxTriggerRatio := 0.95 * float64(gcpercent) / 100
+               maxTriggerRatio := 0.95 * scalingFactor
                if triggerRatio > maxTriggerRatio {
                        triggerRatio = maxTriggerRatio
                }
+
+               // If we let triggerRatio go too low, then if the application
+               // is allocating very rapidly we might end up in a situation
+               // where we're allocating black during a nearly always-on GC.
+               // The result of this is a growing heap and ultimately an
+               // increase in RSS. By capping us at a point >0, we're essentially
+               // saying that we're OK using more CPU during the GC to prevent
+               // this growth in RSS.
+               //
+               // The current constant was chosen empirically: given a sufficiently
+               // fast/scalable allocator with 48 Ps that could drive the trigger ratio
+               // to <0.05, this constant causes applications to retain the same peak
+               // RSS compared to not having this allocator.
+               minTriggerRatio := 0.6 * scalingFactor
+               if triggerRatio < minTriggerRatio {
+                       triggerRatio = minTriggerRatio
+               }
+       } else if triggerRatio < 0 {
+               // gcpercent < 0, so just make sure we're not getting a negative
+               // triggerRatio. This case isn't expected to happen in practice,
+               // and doesn't really matter because if gcpercent < 0 then we won't
+               // ever consume triggerRatio further on in this function, but let's
+               // just be defensive here; the triggerRatio being negative is almost
+               // certainly undesirable.
+               triggerRatio = 0
        }
        memstats.triggerRatio = triggerRatio
 
index 31b6f5cbac3ad1b9319dcb4fe1d3840dfcf12445..35ed42871f11dfef02ce02d951cbe85ce7740ccb 100644 (file)
@@ -244,23 +244,26 @@ func genAMD64() {
 
        // TODO: MXCSR register?
 
+       p("PUSHQ BP")
+       p("MOVQ SP, BP")
+       p("// Save flags before clobbering them")
+       p("PUSHFQ")
+       p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP")
+       p("ADJSP $%d", l.stack)
+       p("// But vet doesn't know ADJSP, so suppress vet stack checking")
+       p("NOP SP")
+
        // Apparently, the signal handling code path in darwin kernel leaves
        // the upper bits of Y registers in a dirty state, which causes
        // many SSE operations (128-bit and narrower) become much slower.
        // Clear the upper bits to get to a clean state. See issue #37174.
        // It is safe here as Go code don't use the upper bits of Y registers.
        p("#ifdef GOOS_darwin")
+       p("CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $0")
+       p("JE 2(PC)")
        p("VZEROUPPER")
        p("#endif")
 
-       p("PUSHQ BP")
-       p("MOVQ SP, BP")
-       p("// Save flags before clobbering them")
-       p("PUSHFQ")
-       p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP")
-       p("ADJSP $%d", l.stack)
-       p("// But vet doesn't know ADJSP, so suppress vet stack checking")
-       p("NOP SP")
        l.save()
        p("CALL Â·asyncPreempt2(SB)")
        l.restore()
@@ -379,6 +382,7 @@ func genMIPS(_64bit bool) {
        sub := "SUB"
        r28 := "R28"
        regsize := 4
+       softfloat := "GOMIPS_softfloat"
        if _64bit {
                mov = "MOVV"
                movf = "MOVD"
@@ -386,6 +390,7 @@ func genMIPS(_64bit bool) {
                sub = "SUBV"
                r28 = "RSB"
                regsize = 8
+               softfloat = "GOMIPS64_softfloat"
        }
 
        // Add integer registers R1-R22, R24-R25, R28
@@ -408,28 +413,36 @@ func genMIPS(_64bit bool) {
                mov+" LO, R1\n"+mov+" R1, %d(R29)",
                mov+" %d(R29), R1\n"+mov+" R1, LO",
                regsize)
+
        // Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant)
-       l.addSpecial(
+       var lfp = layout{sp: "R29", stack: l.stack}
+       lfp.addSpecial(
                mov+" FCR31, R1\n"+mov+" R1, %d(R29)",
                mov+" %d(R29), R1\n"+mov+" R1, FCR31",
                regsize)
        // Add floating point registers F0-F31.
        for i := 0; i <= 31; i++ {
                reg := fmt.Sprintf("F%d", i)
-               l.add(movf, reg, regsize)
+               lfp.add(movf, reg, regsize)
        }
 
        // allocate frame, save PC of interrupted instruction (in LR)
-       p(mov+" R31, -%d(R29)", l.stack)
-       p(sub+" $%d, R29", l.stack)
+       p(mov+" R31, -%d(R29)", lfp.stack)
+       p(sub+" $%d, R29", lfp.stack)
 
        l.save()
+       p("#ifndef %s", softfloat)
+       lfp.save()
+       p("#endif")
        p("CALL Â·asyncPreempt2(SB)")
+       p("#ifndef %s", softfloat)
+       lfp.restore()
+       p("#endif")
        l.restore()
 
-       p(mov+" %d(R29), R31", l.stack)     // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
-       p(mov + " (R29), R23")              // load PC to REGTMP
-       p(add+" $%d, R29", l.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall)
+       p(mov+" %d(R29), R31", lfp.stack)     // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
+       p(mov + " (R29), R23")                // load PC to REGTMP
+       p(add+" $%d, R29", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall)
        p("JMP (R23)")
 }
 
index 88c598ee980250ae0537e2159c0888e9f7bf88c9..f6747f5c87919deb97da064c980cf3bdb9d9a23f 100644 (file)
@@ -218,10 +218,13 @@ func panicmem() {
 // pfn is a C function pointer.
 // arg is a value to pass to pfn.
 func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) {
+       gp := getg()
        d := newdefer()
        if d._panic != nil {
                throw("deferproc: d.panic != nil after newdefer")
        }
+       d.link = gp._defer
+       gp._defer = d
        d.frame = frame
        d.panicStack = getg()._panic
        d.pfn = pfn
@@ -300,8 +303,6 @@ func newdefer() *_defer {
                }
        }
        d.heap = true
-       d.link = gp._defer
-       gp._defer = d
        return d
 }
 
@@ -1175,6 +1176,12 @@ func startpanic_m() bool {
        }
 }
 
+// throwReportQuirk, if non-nil, is called by throw after dumping the stacks.
+//
+// TODO(austin): Remove this after Go 1.15 when we remove the
+// mlockGsignal workaround.
+var throwReportQuirk func()
+
 var didothers bool
 var deadlock mutex
 
@@ -1221,6 +1228,10 @@ func dopanic_m(gp *g, pc, sp uintptr) bool {
 
        printDebugLog()
 
+       if throwReportQuirk != nil {
+               throwReportQuirk()
+       }
+
        return docrash
 }
 
index a271ad022e743f8e78a6fe8fec1ae22f9fed784f..7c75872351383012fc506d8c40596f04290dccdf 100644 (file)
@@ -68,7 +68,8 @@ Search:
        if len(m.freeStk) < len(stk) {
                m.freeStk = make([]uintptr, 1024)
        }
-       e.stk = m.freeStk[:len(stk)]
+       // Limit cap to prevent append from clobbering freeStk.
+       e.stk = m.freeStk[:len(stk):len(stk)]
        m.freeStk = m.freeStk[len(stk):]
 
        for j := range stk {
index bba9ee36c6ac9518c89ed02f57b163bd8243e470..239466fecc6f75c91594fec29f5103b9200f9c3e 100644 (file)
@@ -1192,16 +1192,37 @@ func TestTryAdd(t *testing.T) {
                        {Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
                },
        }, {
-               name: "recursive_inlined_funcs",
+               name: "bug38096",
+               input: []uint64{
+                       3, 0, 500, // hz = 500. Must match the period.
+                       // count (data[2]) == 0 && len(stk) == 1 is an overflow
+                       // entry. The "stk" entry is actually the count.
+                       4, 0, 0, 4242,
+               },
+               wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
+               wantSamples: []*profile.Sample{
+                       {Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
+               },
+       }, {
+               // If a function is called recursively then it must not be
+               // inlined in the caller.
+               //
+               // N.B. We're generating an impossible profile here, with a
+               // recursive inlineCallee call. This is simulating a non-Go
+               // function that looks like an inlined Go function other than
+               // its recursive property. See pcDeck.tryAdd.
+               name: "recursive_func_is_not_inlined",
                input: []uint64{
                        3, 0, 500, // hz = 500. Must match the period.
                        5, 0, 30, inlinedCalleePtr, inlinedCalleePtr,
                        4, 0, 40, inlinedCalleePtr,
                },
-               wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}},
+               // inlinedCaller shows up here because
+               // runtime_expandFinalInlineFrame adds it to the stack frame.
+               wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}, {"runtime/pprof.inlinedCaller"}},
                wantSamples: []*profile.Sample{
-                       {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
-                       {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}}},
+                       {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
+                       {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
                },
        }, {
                name: "truncated_stack_trace_later",
@@ -1222,12 +1243,36 @@ func TestTryAdd(t *testing.T) {
                        4, 0, 70, inlinedCalleePtr,
                        5, 0, 80, inlinedCalleePtr, inlinedCallerPtr,
                },
-               wantLocs: [][]string{ // the inline info is screwed up, but better than a crash.
-                       {"runtime/pprof.inlinedCallee"},
+               wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
+               wantSamples: []*profile.Sample{
+                       {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
+                       {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
+               },
+       }, {
+               // We can recover the inlined caller from a truncated stack.
+               name: "truncated_stack_trace_only",
+               input: []uint64{
+                       3, 0, 500, // hz = 500. Must match the period.
+                       4, 0, 70, inlinedCalleePtr,
+               },
+               wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
+               wantSamples: []*profile.Sample{
+                       {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
+               },
+       }, {
+               // The same location is used for duplicated stacks.
+               name: "truncated_stack_trace_twice",
+               input: []uint64{
+                       3, 0, 500, // hz = 500. Must match the period.
+                       4, 0, 70, inlinedCalleePtr,
+                       5, 0, 80, inlinedCallerPtr, inlinedCalleePtr,
+               },
+               wantLocs: [][]string{
+                       {"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"},
                        {"runtime/pprof.inlinedCaller"}},
                wantSamples: []*profile.Sample{
                        {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
-                       {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
+                       {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
                },
        }}
 
index ba5db853b1bb3dc453913e618c68a5b7575db94f..15fa44b991efe93112450504fa46b55da7e10d38 100644 (file)
@@ -335,7 +335,10 @@ func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error
                        // overflow record
                        count = uint64(stk[0])
                        stk = []uint64{
-                               uint64(funcPC(lostProfileEvent)),
+                               // gentraceback guarantees that PCs in the
+                               // stack can be unconditionally decremented and
+                               // still be valid, so we must do the same.
+                               uint64(funcPC(lostProfileEvent) + 1),
                        }
                }
                b.m.lookup(stk, tag).count += int64(count)
@@ -397,6 +400,10 @@ func (b *profileBuilder) build() {
 // It may emit to b.pb, so there must be no message encoding in progress.
 func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLocs []uint64) {
        b.deck.reset()
+
+       // The last frame might be truncated. Recover lost inline frames.
+       stk = runtime_expandFinalInlineFrame(stk)
+
        for len(stk) > 0 {
                addr := stk[0]
                if l, ok := b.locs[addr]; ok {
@@ -408,22 +415,12 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo
                        // then, record the cached location.
                        locs = append(locs, l.id)
 
-                       // The stk may be truncated due to the stack depth limit
-                       // (e.g. See maxStack and maxCPUProfStack in runtime) or
-                       // bugs in runtime. Avoid the crash in either case.
-                       // TODO(hyangah): The correct fix may require using the exact
-                       // pcs as the key for b.locs cache management instead of just
-                       // relying on the very first pc. We are late in the go1.14 dev
-                       // cycle, so this is a workaround with little code change.
-                       if len(l.pcs) > len(stk) {
-                               stk = nil
-                               // TODO(hyangah): would be nice if we can enable
-                               // debug print out on demand and report the problematic
-                               // cached location entry and stack traces. Do we already
-                               // have such facility to utilize (e.g. GODEBUG)?
-                       } else {
-                               stk = stk[len(l.pcs):] // skip the matching pcs.
-                       }
+                       // Skip the matching pcs.
+                       //
+                       // Even if stk was truncated due to the stack depth
+                       // limit, expandFinalInlineFrame above has already
+                       // fixed the truncation, ensuring it is long enough.
+                       stk = stk[len(l.pcs):]
                        continue
                }
 
@@ -440,9 +437,9 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo
                        stk = stk[1:]
                        continue
                }
-               // add failed because this addr is not inlined with
-               // the existing PCs in the deck. Flush the deck and retry to
-               // handle this pc.
+               // add failed because this addr is not inlined with the
+               // existing PCs in the deck. Flush the deck and retry handling
+               // this pc.
                if id := b.emitLocation(); id > 0 {
                        locs = append(locs, id)
                }
@@ -476,8 +473,8 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo
 // the fake pcs and restore the inlined and entry functions. Inlined functions
 // have the following properties:
 //   Frame's Func is nil (note: also true for non-Go functions), and
-//   Frame's Entry matches its entry function frame's Entry. (note: could also be true for recursive calls and non-Go functions),
-//   Frame's Name does not match its entry function frame's name.
+//   Frame's Entry matches its entry function frame's Entry (note: could also be true for recursive calls and non-Go functions), and
+//   Frame's Name does not match its entry function frame's name (note: inlined functions cannot be recursive).
 //
 // As reading and processing the pcs in a stack trace one by one (from leaf to the root),
 // we use pcDeck to temporarily hold the observed pcs and their expanded frames
@@ -499,8 +496,8 @@ func (d *pcDeck) reset() {
 // to the deck. If it fails the caller needs to flush the deck and retry.
 func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symbolizeFlag) (success bool) {
        if existing := len(d.pcs); existing > 0 {
-               // 'frames' are all expanded from one 'pc' and represent all inlined functions
-               // so we check only the last one.
+               // 'd.frames' are all expanded from one 'pc' and represent all
+               // inlined functions so we check only the last one.
                newFrame := frames[0]
                last := d.frames[existing-1]
                if last.Func != nil { // the last frame can't be inlined. Flush.
index 67d6b5d0ddb6eed0ab9d74b5d8dbcb227a198360..81cd5591d1d8f7dff9ac3ac6106ce2ec71bd9fa1 100644 (file)
@@ -424,3 +424,16 @@ func TestFakeMapping(t *testing.T) {
                }
        }
 }
+
+// Make sure the profiler can handle an empty stack trace.
+// See issue 37967.
+func TestEmptyStack(t *testing.T) {
+       b := []uint64{
+               3, 0, 500, // hz = 500
+               3, 0, 10, // 10 samples with an empty stack trace
+       }
+       _, err := translateCPUProfile(b)
+       if err != nil {
+               t.Fatalf("translating profile: %v", err)
+       }
+}
index b71bbad9a69d176f3f721c6a26367759104caf9f..dd2545b3390dbc6dcd150c3fad2502296cb30e32 100644 (file)
@@ -9,6 +9,9 @@ import (
        "unsafe"
 )
 
+// runtime_expandFinalInlineFrame is defined in runtime/symtab.go.
+func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr
+
 // runtime_setProfLabel is defined in runtime/proflabel.go.
 func runtime_setProfLabel(labels unsafe.Pointer)
 
index f5bfc089c103e29624fae65ba5cfbadea265d2d1..75b42f713090caa1af250401ca602d5e61958f44 100644 (file)
@@ -593,6 +593,10 @@ type m struct {
        // requested, but fails. Accessed atomically.
        preemptGen uint32
 
+       // Whether this is a pending preemption signal on this M.
+       // Accessed atomically.
+       signalPending uint32
+
        dlogPerM
 
        mOS
index 150345f05122c644df29fe76640ed744646f7ee9..1e057f6bf245e620a5e7c612663607cf598d2627 100644 (file)
@@ -343,6 +343,7 @@ func doSigPreempt(gp *g, ctxt *sigctxt, sigpc uintptr) {
 
        // Acknowledge the preemption.
        atomic.Xadd(&gp.m.preemptGen, 1)
+       atomic.Store(&gp.m.signalPending, 0)
 }
 
 // gccgo-specific definition.
index 5dd38949aa35923dfe925afe2cf4c06294c0ae6a..867345740702eaf7cf31783d21d89cb5e4d0d94b 100644 (file)
@@ -4,6 +4,10 @@
 
 package runtime
 
+import (
+       _ "unsafe" // for go:linkname
+)
+
 // Frames may be used to get function/file/line information for a
 // slice of PC values returned by Callers.
 type Frames struct {
@@ -108,6 +112,33 @@ func (ci *Frames) Next() (frame Frame, more bool) {
        return frame, more
 }
 
+//go:noescape
+// pcInlineCallers is written in C.
+func pcInlineCallers(pc uintptr, locbuf *location, max int32) int32
+
+// runtime_expandFinalInlineFrame expands the final pc in stk to include all
+// "callers" if pc is inline.
+//
+//go:linkname runtime_expandFinalInlineFrame runtime..z2fpprof.runtime_expandFinalInlineFrame
+func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr {
+       if len(stk) == 0 {
+               return stk
+       }
+       pc := stk[len(stk)-1]
+       tracepc := pc - 1
+
+       var locbuf [_TracebackMaxFrames]location
+       n := pcInlineCallers(tracepc, &locbuf[0], int32(len(locbuf)))
+
+       // Returning the same PC several times causes Frame.Next to do
+       // the right thing.
+       for i := int32(1); i < n; i++ {
+               stk = append(stk, pc)
+       }
+
+       return stk
+}
+
 // NOTE: Func does not expose the actual unexported fields, because we return *Func
 // values to users, and we want to keep them from being able to overwrite the data
 // with (say) *f = Func{}.
index 177db38e5ab68e3103790878bb36484573ed95c5..45e6fb1aa53f6c463ad7a8196439505372a00526 100644 (file)
@@ -7,18 +7,25 @@ package main
 import "unsafe"
 
 func init() {
-       register("CheckPtrAlignment", CheckPtrAlignment)
+       register("CheckPtrAlignmentNoPtr", CheckPtrAlignmentNoPtr)
+       register("CheckPtrAlignmentPtr", CheckPtrAlignmentPtr)
        register("CheckPtrArithmetic", CheckPtrArithmetic)
        register("CheckPtrSize", CheckPtrSize)
        register("CheckPtrSmall", CheckPtrSmall)
 }
 
-func CheckPtrAlignment() {
+func CheckPtrAlignmentNoPtr() {
        var x [2]int64
        p := unsafe.Pointer(&x[0])
        sink2 = (*int64)(unsafe.Pointer(uintptr(p) + 1))
 }
 
+func CheckPtrAlignmentPtr() {
+       var x [2]int64
+       p := unsafe.Pointer(&x[0])
+       sink2 = (**int64)(unsafe.Pointer(uintptr(p) + 1))
+}
+
 func CheckPtrArithmetic() {
        var x int
        i := uintptr(unsafe.Pointer(&x))
index d0dd3a49e4112f1aa8c382f8fa457f1ec770cab8..27d88d43105ecd750db5c70c46c6e5a328b82852 100644 (file)
@@ -73,36 +73,26 @@ type timer struct {
 //   timerNoStatus   -> timerWaiting
 //   anything else   -> panic: invalid value
 // deltimer:
-//   timerWaiting         -> timerDeleted
+//   timerWaiting         -> timerModifying -> timerDeleted
 //   timerModifiedEarlier -> timerModifying -> timerDeleted
-//   timerModifiedLater   -> timerDeleted
+//   timerModifiedLater   -> timerModifying -> timerDeleted
 //   timerNoStatus        -> do nothing
 //   timerDeleted         -> do nothing
 //   timerRemoving        -> do nothing
 //   timerRemoved         -> do nothing
 //   timerRunning         -> wait until status changes
 //   timerMoving          -> wait until status changes
-//   timerModifying  -> panic: concurrent deltimer/modtimer calls
+//   timerModifying       -> wait until status changes
 // modtimer:
 //   timerWaiting    -> timerModifying -> timerModifiedXX
 //   timerModifiedXX -> timerModifying -> timerModifiedYY
-//   timerNoStatus   -> timerWaiting
-//   timerRemoved    -> timerWaiting
+//   timerNoStatus   -> timerModifying -> timerWaiting
+//   timerRemoved    -> timerModifying -> timerWaiting
+//   timerDeleted    -> timerModifying -> timerModifiedXX
 //   timerRunning    -> wait until status changes
 //   timerMoving     -> wait until status changes
 //   timerRemoving   -> wait until status changes
-//   timerDeleted    -> panic: concurrent modtimer/deltimer calls
-//   timerModifying  -> panic: concurrent modtimer calls
-// resettimer:
-//   timerNoStatus   -> timerWaiting
-//   timerRemoved    -> timerWaiting
-//   timerDeleted    -> timerModifying -> timerModifiedXX
-//   timerRemoving   -> wait until status changes
-//   timerRunning    -> wait until status changes
-//   timerWaiting    -> panic: resettimer called on active timer
-//   timerMoving     -> panic: resettimer called on active timer
-//   timerModifiedXX -> panic: resettimer called on active timer
-//   timerModifying  -> panic: resettimer called on active timer
+//   timerModifying  -> wait until status changes
 // cleantimers (looks in P's timer heap):
 //   timerDeleted    -> timerRemoving -> timerRemoved
 //   timerModifiedXX -> timerMoving -> timerWaiting
@@ -250,32 +240,24 @@ func addtimer(t *timer) {
                t.when = maxWhen
        }
        if t.status != timerNoStatus {
-               badTimer()
+               throw("addtimer called with initialized timer")
        }
        t.status = timerWaiting
 
-       addInitializedTimer(t)
-}
-
-// addInitializedTimer adds an initialized timer to the current P.
-func addInitializedTimer(t *timer) {
        when := t.when
 
        pp := getg().m.p.ptr()
        lock(&pp.timersLock)
-       ok := cleantimers(pp) && doaddtimer(pp, t)
+       cleantimers(pp)
+       doaddtimer(pp, t)
        unlock(&pp.timersLock)
-       if !ok {
-               badTimer()
-       }
 
        wakeNetPoller(when)
 }
 
 // doaddtimer adds t to the current P's heap.
-// It reports whether it saw no problems due to races.
 // The caller must have locked the timers for pp.
-func doaddtimer(pp *p, t *timer) bool {
+func doaddtimer(pp *p, t *timer) {
        // Timers rely on the network poller, so make sure the poller
        // has started.
        if netpollInited == 0 {
@@ -288,12 +270,11 @@ func doaddtimer(pp *p, t *timer) bool {
        t.pp.set(pp)
        i := len(pp.timers)
        pp.timers = append(pp.timers, t)
-       ok := siftupTimer(pp.timers, i)
+       siftupTimer(pp.timers, i)
        if t == pp.timers[0] {
                atomic.Store64(&pp.timer0When, uint64(t.when))
        }
        atomic.Xadd(&pp.numTimers, 1)
-       return ok
 }
 
 // deltimer deletes the timer t. It may be on some other P, so we can't
@@ -304,22 +285,42 @@ func deltimer(t *timer) bool {
        for {
                switch s := atomic.Load(&t.status); s {
                case timerWaiting, timerModifiedLater:
-                       tpp := t.pp.ptr()
-                       if atomic.Cas(&t.status, s, timerDeleted) {
+                       // Prevent preemption while the timer is in timerModifying.
+                       // This could lead to a self-deadlock. See #38070.
+                       mp := acquirem()
+                       if atomic.Cas(&t.status, s, timerModifying) {
+                               // Must fetch t.pp before changing status,
+                               // as cleantimers in another goroutine
+                               // can clear t.pp of a timerDeleted timer.
+                               tpp := t.pp.ptr()
+                               if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
+                                       badTimer()
+                               }
+                               releasem(mp)
                                atomic.Xadd(&tpp.deletedTimers, 1)
                                // Timer was not yet run.
                                return true
+                       } else {
+                               releasem(mp)
                        }
                case timerModifiedEarlier:
-                       tpp := t.pp.ptr()
+                       // Prevent preemption while the timer is in timerModifying.
+                       // This could lead to a self-deadlock. See #38070.
+                       mp := acquirem()
                        if atomic.Cas(&t.status, s, timerModifying) {
+                               // Must fetch t.pp before setting status
+                               // to timerDeleted.
+                               tpp := t.pp.ptr()
                                atomic.Xadd(&tpp.adjustTimers, -1)
                                if !atomic.Cas(&t.status, timerModifying, timerDeleted) {
                                        badTimer()
                                }
+                               releasem(mp)
                                atomic.Xadd(&tpp.deletedTimers, 1)
                                // Timer was not yet run.
                                return true
+                       } else {
+                               releasem(mp)
                        }
                case timerDeleted, timerRemoving, timerRemoved:
                        // Timer was already run.
@@ -334,7 +335,8 @@ func deltimer(t *timer) bool {
                        return false
                case timerModifying:
                        // Simultaneous calls to deltimer and modtimer.
-                       badTimer()
+                       // Wait for the other call to complete.
+                       osyield()
                default:
                        badTimer()
                }
@@ -345,7 +347,7 @@ func deltimer(t *timer) bool {
 // We are locked on the P when this is called.
 // It reports whether it saw no problems due to races.
 // The caller must have locked the timers for pp.
-func dodeltimer(pp *p, i int) bool {
+func dodeltimer(pp *p, i int) {
        if t := pp.timers[i]; t.pp.ptr() != pp {
                throw("dodeltimer: wrong P")
        } else {
@@ -357,29 +359,23 @@ func dodeltimer(pp *p, i int) bool {
        }
        pp.timers[last] = nil
        pp.timers = pp.timers[:last]
-       ok := true
        if i != last {
                // Moving to i may have moved the last timer to a new parent,
                // so sift up to preserve the heap guarantee.
-               if !siftupTimer(pp.timers, i) {
-                       ok = false
-               }
-               if !siftdownTimer(pp.timers, i) {
-                       ok = false
-               }
+               siftupTimer(pp.timers, i)
+               siftdownTimer(pp.timers, i)
        }
        if i == 0 {
                updateTimer0When(pp)
        }
        atomic.Xadd(&pp.numTimers, -1)
-       return ok
 }
 
 // dodeltimer0 removes timer 0 from the current P's heap.
 // We are locked on the P when this is called.
 // It reports whether it saw no problems due to races.
 // The caller must have locked the timers for pp.
-func dodeltimer0(pp *p) bool {
+func dodeltimer0(pp *p) {
        if t := pp.timers[0]; t.pp.ptr() != pp {
                throw("dodeltimer0: wrong P")
        } else {
@@ -391,13 +387,11 @@ func dodeltimer0(pp *p) bool {
        }
        pp.timers[last] = nil
        pp.timers = pp.timers[:last]
-       ok := true
        if last > 0 {
-               ok = siftdownTimer(pp.timers, 0)
+               siftdownTimer(pp.timers, 0)
        }
        updateTimer0When(pp)
        atomic.Xadd(&pp.numTimers, -1)
-       return ok
 }
 
 // modtimer modifies an existing timer.
@@ -409,30 +403,47 @@ func modtimer(t *timer, when, period int64, f func(interface{}, uintptr), arg in
 
        status := uint32(timerNoStatus)
        wasRemoved := false
+       var mp *m
 loop:
        for {
                switch status = atomic.Load(&t.status); status {
                case timerWaiting, timerModifiedEarlier, timerModifiedLater:
+                       // Prevent preemption while the timer is in timerModifying.
+                       // This could lead to a self-deadlock. See #38070.
+                       mp = acquirem()
                        if atomic.Cas(&t.status, status, timerModifying) {
                                break loop
                        }
+                       releasem(mp)
                case timerNoStatus, timerRemoved:
+                       // Prevent preemption while the timer is in timerModifying.
+                       // This could lead to a self-deadlock. See #38070.
+                       mp = acquirem()
+
                        // Timer was already run and t is no longer in a heap.
                        // Act like addtimer.
-                       if atomic.Cas(&t.status, status, timerWaiting) {
+                       if atomic.Cas(&t.status, status, timerModifying) {
                                wasRemoved = true
                                break loop
                        }
+                       releasem(mp)
+               case timerDeleted:
+                       // Prevent preemption while the timer is in timerModifying.
+                       // This could lead to a self-deadlock. See #38070.
+                       mp = acquirem()
+                       if atomic.Cas(&t.status, status, timerModifying) {
+                               atomic.Xadd(&t.pp.ptr().deletedTimers, -1)
+                               break loop
+                       }
+                       releasem(mp)
                case timerRunning, timerRemoving, timerMoving:
                        // The timer is being run or moved, by a different P.
                        // Wait for it to complete.
                        osyield()
-               case timerDeleted:
-                       // Simultaneous calls to modtimer and deltimer.
-                       badTimer()
                case timerModifying:
                        // Multiple simultaneous calls to modtimer.
-                       badTimer()
+                       // Wait for the other call to complete.
+                       osyield()
                default:
                        badTimer()
                }
@@ -445,7 +456,15 @@ loop:
 
        if wasRemoved {
                t.when = when
-               addInitializedTimer(t)
+               pp := getg().m.p.ptr()
+               lock(&pp.timersLock)
+               doaddtimer(pp, t)
+               unlock(&pp.timersLock)
+               if !atomic.Cas(&t.status, timerModifying, timerWaiting) {
+                       badTimer()
+               }
+               releasem(mp)
+               wakeNetPoller(when)
        } else {
                // The timer is in some other P's heap, so we can't change
                // the when field. If we did, the other P's heap would
@@ -462,7 +481,6 @@ loop:
                // Update the adjustTimers field.  Subtract one if we
                // are removing a timerModifiedEarlier, add one if we
                // are adding a timerModifiedEarlier.
-               tpp := t.pp.ptr()
                adjust := int32(0)
                if status == timerModifiedEarlier {
                        adjust--
@@ -471,13 +489,14 @@ loop:
                        adjust++
                }
                if adjust != 0 {
-                       atomic.Xadd(&tpp.adjustTimers, adjust)
+                       atomic.Xadd(&t.pp.ptr().adjustTimers, adjust)
                }
 
                // Set the new status of the timer.
                if !atomic.Cas(&t.status, timerModifying, newStatus) {
                        badTimer()
                }
+               releasem(mp)
 
                // If the new status is earlier, wake up the poller.
                if newStatus == timerModifiedEarlier {
@@ -486,67 +505,22 @@ loop:
        }
 }
 
-// resettimer resets an existing inactive timer to turn it into an active timer,
-// with a new time for when the timer should fire.
+// resettimer resets the time when a timer should fire.
+// If used for an inactive timer, the timer will become active.
 // This should be called instead of addtimer if the timer value has been,
 // or may have been, used previously.
 func resettimer(t *timer, when int64) {
-       if when < 0 {
-               when = maxWhen
-       }
-
-       for {
-               switch s := atomic.Load(&t.status); s {
-               case timerNoStatus, timerRemoved:
-                       if atomic.Cas(&t.status, s, timerWaiting) {
-                               t.when = when
-                               addInitializedTimer(t)
-                               return
-                       }
-               case timerDeleted:
-                       tpp := t.pp.ptr()
-                       if atomic.Cas(&t.status, s, timerModifying) {
-                               t.nextwhen = when
-                               newStatus := uint32(timerModifiedLater)
-                               if when < t.when {
-                                       newStatus = timerModifiedEarlier
-                                       atomic.Xadd(&t.pp.ptr().adjustTimers, 1)
-                               }
-                               if !atomic.Cas(&t.status, timerModifying, newStatus) {
-                                       badTimer()
-                               }
-                               atomic.Xadd(&tpp.deletedTimers, -1)
-                               if newStatus == timerModifiedEarlier {
-                                       wakeNetPoller(when)
-                               }
-                               return
-                       }
-               case timerRemoving:
-                       // Wait for the removal to complete.
-                       osyield()
-               case timerRunning:
-                       // Even though the timer should not be active,
-                       // we can see timerRunning if the timer function
-                       // permits some other goroutine to call resettimer.
-                       // Wait until the run is complete.
-                       osyield()
-               case timerWaiting, timerModifying, timerModifiedEarlier, timerModifiedLater, timerMoving:
-                       // Called resettimer on active timer.
-                       badTimer()
-               default:
-                       badTimer()
-               }
-       }
+       modtimer(t, when, t.period, t.f, t.arg, t.seq)
 }
 
 // cleantimers cleans up the head of the timer queue. This speeds up
 // programs that create and delete timers; leaving them in the heap
 // slows down addtimer. Reports whether no timer problems were found.
 // The caller must have locked the timers for pp.
-func cleantimers(pp *p) bool {
+func cleantimers(pp *p) {
        for {
                if len(pp.timers) == 0 {
-                       return true
+                       return
                }
                t := pp.timers[0]
                if t.pp.ptr() != pp {
@@ -557,11 +531,9 @@ func cleantimers(pp *p) bool {
                        if !atomic.Cas(&t.status, s, timerRemoving) {
                                continue
                        }
-                       if !dodeltimer0(pp) {
-                               return false
-                       }
+                       dodeltimer0(pp)
                        if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
-                               return false
+                               badTimer()
                        }
                        atomic.Xadd(&pp.deletedTimers, -1)
                case timerModifiedEarlier, timerModifiedLater:
@@ -571,21 +543,17 @@ func cleantimers(pp *p) bool {
                        // Now we can change the when field.
                        t.when = t.nextwhen
                        // Move t to the right position.
-                       if !dodeltimer0(pp) {
-                               return false
-                       }
-                       if !doaddtimer(pp, t) {
-                               return false
-                       }
+                       dodeltimer0(pp)
+                       doaddtimer(pp, t)
                        if s == timerModifiedEarlier {
                                atomic.Xadd(&pp.adjustTimers, -1)
                        }
                        if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
-                               return false
+                               badTimer()
                        }
                default:
                        // Head of timers does not need adjustment.
-                       return true
+                       return
                }
        }
 }
@@ -601,9 +569,7 @@ func moveTimers(pp *p, timers []*timer) {
                        switch s := atomic.Load(&t.status); s {
                        case timerWaiting:
                                t.pp = 0
-                               if !doaddtimer(pp, t) {
-                                       badTimer()
-                               }
+                               doaddtimer(pp, t)
                                break loop
                        case timerModifiedEarlier, timerModifiedLater:
                                if !atomic.Cas(&t.status, s, timerMoving) {
@@ -611,9 +577,7 @@ func moveTimers(pp *p, timers []*timer) {
                                }
                                t.when = t.nextwhen
                                t.pp = 0
-                               if !doaddtimer(pp, t) {
-                                       badTimer()
-                               }
+                               doaddtimer(pp, t)
                                if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
                                        badTimer()
                                }
@@ -667,9 +631,7 @@ loop:
                switch s := atomic.Load(&t.status); s {
                case timerDeleted:
                        if atomic.Cas(&t.status, s, timerRemoving) {
-                               if !dodeltimer(pp, i) {
-                                       badTimer()
-                               }
+                               dodeltimer(pp, i)
                                if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
                                        badTimer()
                                }
@@ -685,9 +647,7 @@ loop:
                                // We don't add it back yet because the
                                // heap manipulation could cause our
                                // loop to skip some other timer.
-                               if !dodeltimer(pp, i) {
-                                       badTimer()
-                               }
+                               dodeltimer(pp, i)
                                moved = append(moved, t)
                                if s == timerModifiedEarlier {
                                        if n := atomic.Xadd(&pp.adjustTimers, -1); int32(n) <= 0 {
@@ -723,9 +683,7 @@ loop:
 // back to the timer heap.
 func addAdjustedTimers(pp *p, moved []*timer) {
        for _, t := range moved {
-               if !doaddtimer(pp, t) {
-                       badTimer()
-               }
+               doaddtimer(pp, t)
                if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
                        badTimer()
                }
@@ -779,9 +737,7 @@ func runtimer(pp *p, now int64) int64 {
                        if !atomic.Cas(&t.status, s, timerRemoving) {
                                continue
                        }
-                       if !dodeltimer0(pp) {
-                               badTimer()
-                       }
+                       dodeltimer0(pp)
                        if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
                                badTimer()
                        }
@@ -795,12 +751,8 @@ func runtimer(pp *p, now int64) int64 {
                                continue
                        }
                        t.when = t.nextwhen
-                       if !dodeltimer0(pp) {
-                               badTimer()
-                       }
-                       if !doaddtimer(pp, t) {
-                               badTimer()
-                       }
+                       dodeltimer0(pp)
+                       doaddtimer(pp, t)
                        if s == timerModifiedEarlier {
                                atomic.Xadd(&pp.adjustTimers, -1)
                        }
@@ -838,18 +790,14 @@ func runOneTimer(pp *p, t *timer, now int64) {
                // Leave in heap but adjust next time to fire.
                delta := t.when - now
                t.when += t.period * (1 + -delta/t.period)
-               if !siftdownTimer(pp.timers, 0) {
-                       badTimer()
-               }
+               siftdownTimer(pp.timers, 0)
                if !atomic.Cas(&t.status, timerRunning, timerWaiting) {
                        badTimer()
                }
                updateTimer0When(pp)
        } else {
                // Remove from heap.
-               if !dodeltimer0(pp) {
-                       badTimer()
-               }
+               dodeltimer0(pp)
                if !atomic.Cas(&t.status, timerRunning, timerNoStatus) {
                        badTimer()
                }
@@ -1053,9 +1001,9 @@ func timeSleepUntil() (int64, *p) {
 // "panic holding locks" message. Instead, we panic while not
 // holding a lock.
 
-func siftupTimer(t []*timer, i int) bool {
+func siftupTimer(t []*timer, i int) {
        if i >= len(t) {
-               return false
+               badTimer()
        }
        when := t[i].when
        tmp := t[i]
@@ -1070,13 +1018,12 @@ func siftupTimer(t []*timer, i int) bool {
        if tmp != t[i] {
                t[i] = tmp
        }
-       return true
 }
 
-func siftdownTimer(t []*timer, i int) bool {
+func siftdownTimer(t []*timer, i int) {
        n := len(t)
        if i >= n {
-               return false
+               badTimer()
        }
        when := t[i].when
        tmp := t[i]
@@ -1111,7 +1058,6 @@ func siftdownTimer(t []*timer, i int) bool {
        if tmp != t[i] {
                t[i] = tmp
        }
-       return true
 }
 
 // badTimer is called if the timer data structures have been corrupted,
@@ -1119,5 +1065,5 @@ func siftdownTimer(t []*timer, i int) bool {
 // panicing due to invalid slice access while holding locks.
 // See issue #25686.
 func badTimer() {
-       panic(errorString("racy use of timers"))
+       throw("timer data corruption")
 }
index 0891142d2c48ea7fe3d8b2e4aa044f714ab5f2f0..758af7487c7665eeb0f063be80d7ac8957aea864 100644 (file)
@@ -963,16 +963,15 @@ func tRunner(t *T, fn func(t *T)) {
                                t.Logf("cleanup panicked with %v", r)
                        }
                        // Flush the output log up to the root before dying.
-                       t.mu.Lock()
-                       root := &t.common
-                       for ; root.parent != nil; root = root.parent {
+                       for root := &t.common; root.parent != nil; root = root.parent {
+                               root.mu.Lock()
                                root.duration += time.Since(root.start)
-                               fmt.Fprintf(root.parent.w, "--- FAIL: %s (%s)\n", root.name, fmtDuration(root.duration))
+                               d := root.duration
+                               root.mu.Unlock()
+                               root.flushToParent("--- FAIL: %s (%s)\n", root.name, fmtDuration(d))
                                if r := root.parent.runCleanup(recoverAndReturnPanic); r != nil {
                                        fmt.Fprintf(root.parent.w, "cleanup panicked with %v", r)
                                }
-                               root.parent.mu.Lock()
-                               io.Copy(root.parent.w, bytes.NewReader(root.output))
                        }
                        panic(err)
                }
index 95998c362f09f97ef0c34e0c3808cebedefb53a9..2fc23c4feebc0ad4f6c6dd1028c79bb13effa9d0 100644 (file)
@@ -9,7 +9,6 @@ import (
        "encoding/gob"
        "encoding/json"
        "fmt"
-       "internal/race"
        "math/big"
        "math/rand"
        "os"
@@ -1393,36 +1392,45 @@ func TestReadFileLimit(t *testing.T) {
 }
 
 // Issue 25686: hard crash on concurrent timer access.
+// Issue 37400: panic with "racy use of timers"
 // This test deliberately invokes a race condition.
-// We are testing that we don't crash with "fatal error: panic holding locks".
+// We are testing that we don't crash with "fatal error: panic holding locks",
+// and that we also don't panic.
 func TestConcurrentTimerReset(t *testing.T) {
-       if race.Enabled {
-               t.Skip("skipping test under race detector")
-       }
-
-       // We expect this code to panic rather than crash.
-       // Don't worry if it doesn't panic.
-       catch := func(i int) {
-               if e := recover(); e != nil {
-                       t.Logf("panic in goroutine %d, as expected, with %q", i, e)
-               } else {
-                       t.Logf("no panic in goroutine %d", i)
-               }
+       const goroutines = 8
+       const tries = 1000
+       var wg sync.WaitGroup
+       wg.Add(goroutines)
+       timer := NewTimer(Hour)
+       for i := 0; i < goroutines; i++ {
+               go func(i int) {
+                       defer wg.Done()
+                       for j := 0; j < tries; j++ {
+                               timer.Reset(Hour + Duration(i*j))
+                       }
+               }(i)
        }
+       wg.Wait()
+}
 
+// Issue 37400: panic with "racy use of timers".
+func TestConcurrentTimerResetStop(t *testing.T) {
        const goroutines = 8
        const tries = 1000
        var wg sync.WaitGroup
-       wg.Add(goroutines)
+       wg.Add(goroutines * 2)
        timer := NewTimer(Hour)
        for i := 0; i < goroutines; i++ {
                go func(i int) {
                        defer wg.Done()
-                       defer catch(i)
                        for j := 0; j < tries; j++ {
                                timer.Reset(Hour + Duration(i*j))
                        }
                }(i)
+               go func(i int) {
+                       defer wg.Done()
+                       timer.Stop()
+               }(i)
        }
        wg.Wait()
 }
index 2b5fba806c6d49bdc8dedf96b87fa63e4d2599ee..a3bdb9484996d58ec5cd4dff7e235c43a22e7196 100644 (file)
@@ -123,6 +123,7 @@ internal/poll
 internal/race
 internal/reflectlite
 internal/singleflight
+internal/syscall/execenv
 internal/syscall/unix
 internal/testenv
 internal/testlog
diff --git a/libgo/misc/cgo/testgodefs/testdata/issue37479.go b/libgo/misc/cgo/testgodefs/testdata/issue37479.go
new file mode 100644 (file)
index 0000000..a210eb5
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// +build ignore
+
+package main
+
+/*
+typedef struct A A;
+
+typedef struct {
+       struct A *next;
+       struct A **prev;
+} N;
+
+struct A
+{
+       N n;
+};
+
+typedef struct B
+{
+       A* a;
+} B;
+*/
+import "C"
+
+type N C.N
+
+type A C.A
+
+type B C.B
diff --git a/libgo/misc/cgo/testgodefs/testdata/issue37621.go b/libgo/misc/cgo/testgodefs/testdata/issue37621.go
new file mode 100644 (file)
index 0000000..d5ace3f
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// +build ignore
+
+package main
+
+/*
+struct tt {
+       long long a;
+       long long b;
+};
+
+struct s {
+       struct tt ts[3];
+};
+*/
+import "C"
+
+type TT C.struct_tt
+
+type S C.struct_s
index 1ce0fd0d1e196472ca4c32ada4810e5283de2968..ef45b95e6553e34bc51385fb68c5574993885a10 100644 (file)
@@ -11,5 +11,13 @@ var v2 = v1.L
 // Test that P, Q, and R all point to byte.
 var v3 = Issue8478{P: (*byte)(nil), Q: (**byte)(nil), R: (***byte)(nil)}
 
+// Test that N, A and B are fully defined
+var v4 = N{}
+var v5 = A{}
+var v6 = B{}
+
+// Test that S is fully defined
+var v7 = S{}
+
 func main() {
 }
index c02c3ff0acd95ce5d7ddb74c2ce9fa4c1b647248..438d23d65c068f0d44d5af88495cc380483f8b8e 100644 (file)
@@ -21,6 +21,8 @@ var filePrefixes = []string{
        "anonunion",
        "issue8478",
        "fieldtypedef",
+       "issue37479",
+       "issue37621",
 }
 
 func TestGoDefs(t *testing.T) {
index 33956ca4dfce15e5483d0183ecf2aeef2e5a5359..31783696a08231226d550be32e72d61224d27cfd 100644 (file)
@@ -364,3 +364,39 @@ runtime_callersRaw (uintptr *pcbuf, int32 m)
   return data.index;
 }
 
+/* runtime_pcInlineCallers returns the inline stack of calls for a PC.
+   This is like runtime_callers, but instead of doing a backtrace,
+   just finds the information for a single PC value.  */
+
+int32 runtime_pcInlineCallers (uintptr, Location *, int32)
+  __asm__ (GOSYM_PREFIX "runtime.pcInlineCallers");
+
+int32
+runtime_pcInlineCallers (uintptr pc, Location *locbuf, int32 m)
+{
+  struct callers_data data;
+  struct backtrace_state *state;
+  int32 i;
+
+  data.locbuf = locbuf;
+  data.skip = 0;
+  data.index = 0;
+  data.max = m;
+  data.keep_thunks = false;
+  data.saw_sigtramp = 0;
+  runtime_xadd (&__go_runtime_in_callers, 1);
+  state = __go_get_backtrace_state ();
+  backtrace_pcinfo (state, pc, callback, error_callback, &data);
+  runtime_xadd (&__go_runtime_in_callers, -1);
+
+  /* Try to use backtrace_syminfo to fill in missing names.  See
+     runtime_callers.  */
+  for (i = 0; i < data.index; ++i)
+    {
+      if (locbuf[i].function.len == 0 && locbuf[i].pc != 0)
+       backtrace_syminfo (state, locbuf[i].pc, __go_syminfo_fnname_callback,
+                          error_callback, &locbuf[i].function);
+    }
+
+  return data.index;
+}