`,
+ // html is disallowed in a pipeline that is in an unquoted attribute context,
+ // even if it is the last command in the pipeline.
+ `predefined escaper "html" disallowed in template`,
+ },
+ {
+ `Hello, {{. | urlquery | html}}!`,
+ // html is allowed since it is the last command in the pipeline, but urlquery is not.
+ `predefined escaper "urlquery" disallowed in template`,
+ },
}
-
for _, test := range tests {
buf := new(bytes.Buffer)
tmpl, err := New("z").Parse(test.input)
@@ -1396,6 +1455,16 @@ func TestEscapeText(t *testing.T) {
`", "mime: expected token after slash"},
- {"bogus/bogus", "mime: unexpected content after media subtype"},
+ {"bogus ;=========", "bogus", "mime: invalid media parameter"},
+ // The following example is from real email delivered by gmail (error: missing semicolon)
+ // and it is there to check behavior described in #19498
+ {"application/pdf; x-mac-type=\"3F3F3F3F\"; x-mac-creator=\"3F3F3F3F\" name=\"a.pdf\";",
+ "application/pdf", "mime: invalid media parameter"},
+ {"bogus/", "", "mime: expected token after slash"},
+ {"bogus/bogus", "", "mime: unexpected content after media subtype"},
}
func TestParseMediaTypeBogus(t *testing.T) {
@@ -275,8 +280,11 @@ func TestParseMediaTypeBogus(t *testing.T) {
if params != nil {
t.Errorf("ParseMediaType(%q): got non-nil params on error", tt.in)
}
- if mt != "" {
- t.Errorf("ParseMediaType(%q): got non-empty media type string on error", tt.in)
+ if err != ErrInvalidMediaParameter && mt != "" {
+ t.Errorf("ParseMediaType(%q): got unexpected non-empty media type string", tt.in)
+ }
+ if err == ErrInvalidMediaParameter && mt != tt.mt {
+ t.Errorf("ParseMediaType(%q): in case of invalid parameters: expected type %q, got %q", tt.in, tt.mt, mt)
}
}
}
diff --git a/libgo/go/mime/multipart/formdata.go b/libgo/go/mime/multipart/formdata.go
index c9e3188c33a..832d0ad6936 100644
--- a/libgo/go/mime/multipart/formdata.go
+++ b/libgo/go/mime/multipart/formdata.go
@@ -13,13 +13,20 @@ import (
"os"
)
+// ErrMessageTooLarge is returned by ReadForm if the message form
+// data is too large to be processed.
+var ErrMessageTooLarge = errors.New("multipart: message too large")
+
// TODO(adg,bradfitz): find a way to unify the DoS-prevention strategy here
// with that of the http package's ParseForm.
// ReadForm parses an entire multipart message whose parts have
// a Content-Disposition of "form-data".
-// It stores up to maxMemory bytes of the file parts in memory
-// and the remainder on disk in temporary files.
+// It stores up to maxMemory bytes + 10MB (reserved for non-file parts)
+// in memory. File parts which can't be stored in memory will be stored on
+// disk in temporary files.
+// It returns ErrMessageTooLarge if all non-file parts can't be stored in
+// memory.
func (r *Reader) ReadForm(maxMemory int64) (*Form, error) {
return r.readForm(maxMemory)
}
@@ -32,7 +39,8 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
}
}()
- maxValueBytes := int64(10 << 20) // 10 MB is a lot of text.
+ // Reserve an additional 10 MB for non-file parts.
+ maxValueBytes := maxMemory + int64(10<<20)
for {
p, err := r.NextPart()
if err == io.EOF {
@@ -52,13 +60,13 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
if filename == "" {
// value, store as string in memory
- n, err := io.CopyN(&b, p, maxValueBytes)
+ n, err := io.CopyN(&b, p, maxValueBytes+1)
if err != nil && err != io.EOF {
return nil, err
}
maxValueBytes -= n
- if maxValueBytes == 0 {
- return nil, errors.New("multipart: message too large")
+ if maxValueBytes < 0 {
+ return nil, ErrMessageTooLarge
}
form.Value[name] = append(form.Value[name], b.String())
continue
@@ -79,7 +87,7 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
if err != nil {
return nil, err
}
- _, err = io.Copy(file, io.MultiReader(&b, p))
+ size, err := io.Copy(file, io.MultiReader(&b, p))
if cerr := file.Close(); err == nil {
err = cerr
}
@@ -88,9 +96,12 @@ func (r *Reader) readForm(maxMemory int64) (_ *Form, err error) {
return nil, err
}
fh.tmpfile = file.Name()
+ fh.Size = size
} else {
fh.content = b.Bytes()
+ fh.Size = int64(len(fh.content))
maxMemory -= n
+ maxValueBytes -= n
}
form.File[name] = append(form.File[name], fh)
}
@@ -128,6 +139,7 @@ func (f *Form) RemoveAll() error {
type FileHeader struct {
Filename string
Header textproto.MIMEHeader
+ Size int64
content []byte
tmpfile string
diff --git a/libgo/go/mime/multipart/formdata_test.go b/libgo/go/mime/multipart/formdata_test.go
index 1deca0b94df..979ae5c4e10 100644
--- a/libgo/go/mime/multipart/formdata_test.go
+++ b/libgo/go/mime/multipart/formdata_test.go
@@ -8,14 +8,12 @@ import (
"bytes"
"io"
"os"
- "regexp"
"strings"
"testing"
)
func TestReadForm(t *testing.T) {
- testBody := regexp.MustCompile("\n").ReplaceAllString(message, "\r\n")
- b := strings.NewReader(testBody)
+ b := strings.NewReader(strings.Replace(message, "\n", "\r\n", -1))
r := NewReader(b, boundary)
f, err := r.ReadForm(25)
if err != nil {
@@ -44,6 +42,9 @@ func testFile(t *testing.T, fh *FileHeader, efn, econtent string) File {
if fh.Filename != efn {
t.Errorf("filename = %q, want %q", fh.Filename, efn)
}
+ if fh.Size != int64(len(econtent)) {
+ t.Errorf("size = %d, want %d", fh.Size, len(econtent))
+ }
f, err := fh.Open()
if err != nil {
t.Fatal("opening file:", err)
@@ -124,3 +125,44 @@ func (r *failOnReadAfterErrorReader) Read(p []byte) (n int, err error) {
r.sawErr = err
return
}
+
+// TestReadForm_NonFileMaxMemory asserts that the ReadForm maxMemory limit is applied
+// while processing non-file form data as well as file form data.
+func TestReadForm_NonFileMaxMemory(t *testing.T) {
+ largeTextValue := strings.Repeat("1", (10<<20)+25)
+ message := `--MyBoundary
+Content-Disposition: form-data; name="largetext"
+
+` + largeTextValue + `
+--MyBoundary--
+`
+
+ testBody := strings.Replace(message, "\n", "\r\n", -1)
+ testCases := []struct {
+ name string
+ maxMemory int64
+ err error
+ }{
+ {"smaller", 50, nil},
+ {"exact-fit", 25, nil},
+ {"too-large", 0, ErrMessageTooLarge},
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ b := strings.NewReader(testBody)
+ r := NewReader(b, boundary)
+ f, err := r.ReadForm(tc.maxMemory)
+ if err == nil {
+ defer f.RemoveAll()
+ }
+ if tc.err != err {
+ t.Fatalf("ReadForm error - got: %v; expected: %v", tc.err, err)
+ }
+ if err == nil {
+ if g := f.Value["largetext"][0]; g != largeTextValue {
+ t.Errorf("largetext mismatch: got size: %v, expected size: %v", len(g), len(largeTextValue))
+ }
+ }
+ })
+ }
+}
diff --git a/libgo/go/mime/multipart/writer.go b/libgo/go/mime/multipart/writer.go
index f82756d5518..3dd0c8fb136 100644
--- a/libgo/go/mime/multipart/writer.go
+++ b/libgo/go/mime/multipart/writer.go
@@ -41,22 +41,27 @@ func (w *Writer) Boundary() string {
//
// SetBoundary must be called before any parts are created, may only
// contain certain ASCII characters, and must be non-empty and
-// at most 69 bytes long.
+// at most 70 bytes long.
func (w *Writer) SetBoundary(boundary string) error {
if w.lastpart != nil {
return errors.New("mime: SetBoundary called after write")
}
// rfc2046#section-5.1.1
- if len(boundary) < 1 || len(boundary) > 69 {
+ if len(boundary) < 1 || len(boundary) > 70 {
return errors.New("mime: invalid boundary length")
}
- for _, b := range boundary {
+ end := len(boundary) - 1
+ for i, b := range boundary {
if 'A' <= b && b <= 'Z' || 'a' <= b && b <= 'z' || '0' <= b && b <= '9' {
continue
}
switch b {
case '\'', '(', ')', '+', '_', ',', '-', '.', '/', ':', '=', '?':
continue
+ case ' ':
+ if i != end {
+ continue
+ }
}
return errors.New("mime: invalid boundary character")
}
diff --git a/libgo/go/mime/multipart/writer_test.go b/libgo/go/mime/multipart/writer_test.go
index 9670c660a4a..8b1bcd68d87 100644
--- a/libgo/go/mime/multipart/writer_test.go
+++ b/libgo/go/mime/multipart/writer_test.go
@@ -80,8 +80,6 @@ func TestWriter(t *testing.T) {
}
func TestWriterSetBoundary(t *testing.T) {
- var b bytes.Buffer
- w := NewWriter(&b)
tests := []struct {
b string
ok bool
@@ -90,12 +88,16 @@ func TestWriterSetBoundary(t *testing.T) {
{"", false},
{"ungültig", false},
{"!", false},
- {strings.Repeat("x", 69), true},
- {strings.Repeat("x", 70), false},
+ {strings.Repeat("x", 70), true},
+ {strings.Repeat("x", 71), false},
{"bad!ascii!", false},
{"my-separator", true},
+ {"with space", true},
+ {"badspace ", false},
}
for i, tt := range tests {
+ var b bytes.Buffer
+ w := NewWriter(&b)
err := w.SetBoundary(tt.b)
got := err == nil
if got != tt.ok {
@@ -105,12 +107,13 @@ func TestWriterSetBoundary(t *testing.T) {
if got != tt.b {
t.Errorf("boundary = %q; want %q", got, tt.b)
}
+ w.Close()
+ wantSub := "\r\n--" + tt.b + "--\r\n"
+ if got := b.String(); !strings.Contains(got, wantSub) {
+ t.Errorf("expected %q in output. got: %q", wantSub, got)
+ }
}
}
- w.Close()
- if got := b.String(); !strings.Contains(got, "\r\n--my-separator--\r\n") {
- t.Errorf("expected my-separator in output. got: %q", got)
- }
}
func TestWriterBoundaryGoroutines(t *testing.T) {
diff --git a/libgo/go/mime/type.go b/libgo/go/mime/type.go
index d369259d8b1..78fc6b6714e 100644
--- a/libgo/go/mime/type.go
+++ b/libgo/go/mime/type.go
@@ -12,24 +12,48 @@ import (
)
var (
- mimeLock sync.RWMutex // guards following 3 maps
- mimeTypes map[string]string // ".Z" => "application/x-compress"
- mimeTypesLower map[string]string // ".z" => "application/x-compress"
+ mimeTypes sync.Map // map[string]string; ".Z" => "application/x-compress"
+ mimeTypesLower sync.Map // map[string]string; ".z" => "application/x-compress"
// extensions maps from MIME type to list of lowercase file
// extensions: "image/jpeg" => [".jpg", ".jpeg"]
- extensions map[string][]string
+ extensionsMu sync.Mutex // Guards stores (but not loads) on extensions.
+ extensions sync.Map // map[string][]string; slice values are append-only.
)
+func clearSyncMap(m *sync.Map) {
+ m.Range(func(k, _ interface{}) bool {
+ m.Delete(k)
+ return true
+ })
+}
+
// setMimeTypes is used by initMime's non-test path, and by tests.
-// The two maps must not be the same, or nil.
func setMimeTypes(lowerExt, mixExt map[string]string) {
- if lowerExt == nil || mixExt == nil {
- panic("nil map")
+ clearSyncMap(&mimeTypes)
+ clearSyncMap(&mimeTypesLower)
+ clearSyncMap(&extensions)
+
+ for k, v := range lowerExt {
+ mimeTypesLower.Store(k, v)
+ }
+ for k, v := range mixExt {
+ mimeTypes.Store(k, v)
+ }
+
+ extensionsMu.Lock()
+ defer extensionsMu.Unlock()
+ for k, v := range lowerExt {
+ justType, _, err := ParseMediaType(v)
+ if err != nil {
+ panic(err)
+ }
+ var exts []string
+ if ei, ok := extensions.Load(k); ok {
+ exts = ei.([]string)
+ }
+ extensions.Store(justType, append(exts, k))
}
- mimeTypesLower = lowerExt
- mimeTypes = mixExt
- extensions = invert(lowerExt)
}
var builtinTypesLower = map[string]string{
@@ -45,29 +69,6 @@ var builtinTypesLower = map[string]string{
".xml": "text/xml; charset=utf-8",
}
-func clone(m map[string]string) map[string]string {
- m2 := make(map[string]string, len(m))
- for k, v := range m {
- m2[k] = v
- if strings.ToLower(k) != k {
- panic("keys in builtinTypesLower must be lowercase")
- }
- }
- return m2
-}
-
-func invert(m map[string]string) map[string][]string {
- m2 := make(map[string][]string, len(m))
- for k, v := range m {
- justType, _, err := ParseMediaType(v)
- if err != nil {
- panic(err)
- }
- m2[justType] = append(m2[justType], k)
- }
- return m2
-}
-
var once sync.Once // guards initMime
var testInitMime, osInitMime func()
@@ -76,7 +77,7 @@ func initMime() {
if fn := testInitMime; fn != nil {
fn()
} else {
- setMimeTypes(builtinTypesLower, clone(builtinTypesLower))
+ setMimeTypes(builtinTypesLower, builtinTypesLower)
osInitMime()
}
}
@@ -100,12 +101,10 @@ func initMime() {
// Text types have the charset parameter set to "utf-8" by default.
func TypeByExtension(ext string) string {
once.Do(initMime)
- mimeLock.RLock()
- defer mimeLock.RUnlock()
// Case-sensitive lookup.
- if v := mimeTypes[ext]; v != "" {
- return v
+ if v, ok := mimeTypes.Load(ext); ok {
+ return v.(string)
}
// Case-insensitive lookup.
@@ -118,7 +117,9 @@ func TypeByExtension(ext string) string {
c := ext[i]
if c >= utf8RuneSelf {
// Slow path.
- return mimeTypesLower[strings.ToLower(ext)]
+ si, _ := mimeTypesLower.Load(strings.ToLower(ext))
+ s, _ := si.(string)
+ return s
}
if 'A' <= c && c <= 'Z' {
lower = append(lower, c+('a'-'A'))
@@ -126,9 +127,9 @@ func TypeByExtension(ext string) string {
lower = append(lower, c)
}
}
- // The conversion from []byte to string doesn't allocate in
- // a map lookup.
- return mimeTypesLower[string(lower)]
+ si, _ := mimeTypesLower.Load(string(lower))
+ s, _ := si.(string)
+ return s
}
// ExtensionsByType returns the extensions known to be associated with the MIME
@@ -142,13 +143,11 @@ func ExtensionsByType(typ string) ([]string, error) {
}
once.Do(initMime)
- mimeLock.RLock()
- defer mimeLock.RUnlock()
- s, ok := extensions[justType]
+ s, ok := extensions.Load(justType)
if !ok {
return nil, nil
}
- return append([]string{}, s...), nil
+ return append([]string{}, s.([]string)...), nil
}
// AddExtensionType sets the MIME type associated with
@@ -173,15 +172,20 @@ func setExtensionType(extension, mimeType string) error {
}
extLower := strings.ToLower(extension)
- mimeLock.Lock()
- defer mimeLock.Unlock()
- mimeTypes[extension] = mimeType
- mimeTypesLower[extLower] = mimeType
- for _, v := range extensions[justType] {
+ mimeTypes.Store(extension, mimeType)
+ mimeTypesLower.Store(extLower, mimeType)
+
+ extensionsMu.Lock()
+ defer extensionsMu.Unlock()
+ var exts []string
+ if ei, ok := extensions.Load(justType); ok {
+ exts = ei.([]string)
+ }
+ for _, v := range exts {
if v == extLower {
return nil
}
}
- extensions[justType] = append(extensions[justType], extLower)
+ extensions.Store(justType, append(exts, extLower))
return nil
}
diff --git a/libgo/go/mime/type_test.go b/libgo/go/mime/type_test.go
index 48735ef4470..de5c700faaf 100644
--- a/libgo/go/mime/type_test.go
+++ b/libgo/go/mime/type_test.go
@@ -149,3 +149,43 @@ func TestLookupMallocs(t *testing.T) {
t.Errorf("allocs = %v; want 0", n)
}
}
+
+func BenchmarkTypeByExtension(b *testing.B) {
+ initMime()
+ b.ResetTimer()
+
+ for _, ext := range []string{
+ ".html",
+ ".HTML",
+ ".unused",
+ } {
+ b.Run(ext, func(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ TypeByExtension(ext)
+ }
+ })
+ })
+ }
+}
+
+func BenchmarkExtensionsByType(b *testing.B) {
+ initMime()
+ b.ResetTimer()
+
+ for _, typ := range []string{
+ "text/html",
+ "text/html; charset=utf-8",
+ "application/octet-stream",
+ } {
+ b.Run(typ, func(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if _, err := ExtensionsByType(typ); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ })
+ }
+}
diff --git a/libgo/go/net/cgo_unix.go b/libgo/go/net/cgo_unix.go
index 09cfb2a71ae..0de3ff8bebd 100644
--- a/libgo/go/net/cgo_unix.go
+++ b/libgo/go/net/cgo_unix.go
@@ -220,7 +220,7 @@ func cgoLookupIPCNAME(name string) (addrs []IPAddr, cname string, err error) {
addrs = append(addrs, addr)
case syscall.AF_INET6:
sa := (*syscall.RawSockaddrInet6)(unsafe.Pointer(r.Ai_addr))
- addr := IPAddr{IP: copyIP(sa.Addr[:]), Zone: zoneToString(int(sa.Scope_id))}
+ addr := IPAddr{IP: copyIP(sa.Addr[:]), Zone: zoneCache.name(int(sa.Scope_id))}
addrs = append(addrs, addr)
}
}
@@ -345,7 +345,7 @@ func cgoSockaddr(ip IP, zone string) (*syscall.RawSockaddr, syscall.Socklen_t) {
return cgoSockaddrInet4(ip4), syscall.Socklen_t(syscall.SizeofSockaddrInet4)
}
if ip6 := ip.To16(); ip6 != nil {
- return cgoSockaddrInet6(ip6, zoneToInt(zone)), syscall.Socklen_t(syscall.SizeofSockaddrInet6)
+ return cgoSockaddrInet6(ip6, zoneCache.index(zone)), syscall.Socklen_t(syscall.SizeofSockaddrInet6)
}
return nil, 0
}
diff --git a/libgo/go/net/dial.go b/libgo/go/net/dial.go
index 50bba5a49e4..f8b4aa22742 100644
--- a/libgo/go/net/dial.go
+++ b/libgo/go/net/dial.go
@@ -7,6 +7,7 @@ package net
import (
"context"
"internal/nettrace"
+ "internal/poll"
"time"
)
@@ -22,8 +23,8 @@ type Dialer struct {
//
// The default is no timeout.
//
- // When dialing a name with multiple IP addresses, the timeout
- // may be divided between them.
+ // When using TCP and dialing a host name with multiple IP
+ // addresses, the timeout may be divided between them.
//
// With or without a timeout, the operating system may impose
// its own earlier timeout. For instance, TCP timeouts are
@@ -42,10 +43,11 @@ type Dialer struct {
// If nil, a local address is automatically chosen.
LocalAddr Addr
- // DualStack enables RFC 6555-compliant "Happy Eyeballs" dialing
- // when the network is "tcp" and the destination is a host name
- // with both IPv4 and IPv6 addresses. This allows a client to
- // tolerate networks where one address family is silently broken.
+ // DualStack enables RFC 6555-compliant "Happy Eyeballs"
+ // dialing when the network is "tcp" and the host in the
+ // address parameter resolves to both IPv4 and IPv6 addresses.
+ // This allows a client to tolerate networks where one address
+ // family is silently broken.
DualStack bool
// FallbackDelay specifies the length of time to wait before
@@ -110,7 +112,7 @@ func partialDeadline(now, deadline time.Time, addrsRemaining int) (time.Time, er
}
timeRemaining := deadline.Sub(now)
if timeRemaining <= 0 {
- return time.Time{}, errTimeout
+ return time.Time{}, poll.ErrTimeout
}
// Tentatively allocate equal time to each remaining address.
timeout := timeRemaining / time.Duration(addrsRemaining)
@@ -134,23 +136,26 @@ func (d *Dialer) fallbackDelay() time.Duration {
}
}
-func parseNetwork(ctx context.Context, net string) (afnet string, proto int, err error) {
- i := last(net, ':')
+func parseNetwork(ctx context.Context, network string, needsProto bool) (afnet string, proto int, err error) {
+ i := last(network, ':')
if i < 0 { // no colon
- switch net {
+ switch network {
case "tcp", "tcp4", "tcp6":
case "udp", "udp4", "udp6":
case "ip", "ip4", "ip6":
+ if needsProto {
+ return "", 0, UnknownNetworkError(network)
+ }
case "unix", "unixgram", "unixpacket":
default:
- return "", 0, UnknownNetworkError(net)
+ return "", 0, UnknownNetworkError(network)
}
- return net, 0, nil
+ return network, 0, nil
}
- afnet = net[:i]
+ afnet = network[:i]
switch afnet {
case "ip", "ip4", "ip6":
- protostr := net[i+1:]
+ protostr := network[i+1:]
proto, i, ok := dtoi(protostr)
if !ok || i != len(protostr) {
proto, err = lookupProtocol(ctx, protostr)
@@ -160,14 +165,14 @@ func parseNetwork(ctx context.Context, net string) (afnet string, proto int, err
}
return afnet, proto, nil
}
- return "", 0, UnknownNetworkError(net)
+ return "", 0, UnknownNetworkError(network)
}
// resolveAddrList resolves addr using hint and returns a list of
// addresses. The result contains at least one address when error is
// nil.
func (r *Resolver) resolveAddrList(ctx context.Context, op, network, addr string, hint Addr) (addrList, error) {
- afnet, _, err := parseNetwork(ctx, network)
+ afnet, _, err := parseNetwork(ctx, network, true)
if err != nil {
return nil, err
}
@@ -242,39 +247,60 @@ func (r *Resolver) resolveAddrList(ctx context.Context, op, network, addr string
// (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and
// "unixpacket".
//
-// For TCP and UDP networks, addresses have the form host:port.
-// If host is a literal IPv6 address it must be enclosed
-// in square brackets as in "[::1]:80" or "[ipv6-host%zone]:80".
-// The functions JoinHostPort and SplitHostPort manipulate addresses
-// in this form.
-// If the host is empty, as in ":80", the local system is assumed.
+// For TCP and UDP networks, the address has the form "host:port".
+// The host must be a literal IP address, or a host name that can be
+// resolved to IP addresses.
+// The port must be a literal port number or a service name.
+// If the host is a literal IPv6 address it must be enclosed in square
+// brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80".
+// The zone specifies the scope of the literal IPv6 address as defined
+// in RFC 4007.
+// The functions JoinHostPort and SplitHostPort manipulate a pair of
+// host and port in this form.
+// When using TCP, and the host resolves to multiple IP addresses,
+// Dial will try each IP address in order until one succeeds.
//
// Examples:
-// Dial("tcp", "192.0.2.1:80")
// Dial("tcp", "golang.org:http")
-// Dial("tcp", "[2001:db8::1]:http")
-// Dial("tcp", "[fe80::1%lo0]:80")
+// Dial("tcp", "192.0.2.1:http")
+// Dial("tcp", "198.51.100.1:80")
+// Dial("udp", "[2001:db8::1]:domain")
+// Dial("udp", "[fe80::1%lo0]:53")
// Dial("tcp", ":80")
//
// For IP networks, the network must be "ip", "ip4" or "ip6" followed
-// by a colon and a protocol number or name and the addr must be a
-// literal IP address.
+// by a colon and a literal protocol number or a protocol name, and
+// the address has the form "host". The host must be a literal IP
+// address or a literal IPv6 address with zone.
+// It depends on each operating system how the operating system
+// behaves with a non-well known protocol number such as "0" or "255".
//
// Examples:
// Dial("ip4:1", "192.0.2.1")
// Dial("ip6:ipv6-icmp", "2001:db8::1")
+// Dial("ip6:58", "fe80::1%lo0")
//
-// For Unix networks, the address must be a file system path.
+// For TCP, UDP and IP networks, if the host is empty or a literal
+// unspecified IP address, as in ":80", "0.0.0.0:80" or "[::]:80" for
+// TCP and UDP, "", "0.0.0.0" or "::" for IP, the local system is
+// assumed.
//
-// If the host is resolved to multiple addresses,
-// Dial will try each address in order until one succeeds.
+// For Unix networks, the address must be a file system path.
func Dial(network, address string) (Conn, error) {
var d Dialer
return d.Dial(network, address)
}
// DialTimeout acts like Dial but takes a timeout.
+//
// The timeout includes name resolution, if required.
+// When using TCP, and the host in the address parameter resolves to
+// multiple IP addresses, the timeout is spread over each consecutive
+// dial, such that each is given an appropriate fraction of the time
+// to connect.
+//
+// See func Dial for a description of the network and address
+// parameters.
func DialTimeout(network, address string, timeout time.Duration) (Conn, error) {
d := Dialer{Timeout: timeout}
return d.Dial(network, address)
@@ -537,29 +563,37 @@ func dialSingle(ctx context.Context, dp *dialParam, ra Addr) (c Conn, err error)
return c, nil
}
-// Listen announces on the local network address laddr.
-// The network net must be a stream-oriented network: "tcp", "tcp4",
-// "tcp6", "unix" or "unixpacket".
-// For TCP and UDP, the syntax of laddr is "host:port", like "127.0.0.1:8080".
-// If host is omitted, as in ":8080", Listen listens on all available interfaces
-// instead of just the interface with the given host address.
-// See Dial for more details about address syntax.
+// Listen announces on the local network address.
+//
+// The network must be "tcp", "tcp4", "tcp6", "unix" or "unixpacket".
//
-// Listening on a hostname is not recommended because this creates a socket
-// for at most one of its IP addresses.
-func Listen(net, laddr string) (Listener, error) {
- addrs, err := DefaultResolver.resolveAddrList(context.Background(), "listen", net, laddr, nil)
+// For TCP networks, if the host in the address parameter is empty or
+// a literal unspecified IP address, Listen listens on all available
+// unicast and anycast IP addresses of the local system.
+// To only use IPv4, use network "tcp4".
+// The address can use a host name, but this is not recommended,
+// because it will create a listener for at most one of the host's IP
+// addresses.
+// If the port in the address parameter is empty or "0", as in
+// "127.0.0.1:" or "[::1]:0", a port number is automatically chosen.
+// The Addr method of Listener can be used to discover the chosen
+// port.
+//
+// See func Dial for a description of the network and address
+// parameters.
+func Listen(network, address string) (Listener, error) {
+ addrs, err := DefaultResolver.resolveAddrList(context.Background(), "listen", network, address, nil)
if err != nil {
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: nil, Err: err}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: nil, Err: err}
}
var l Listener
switch la := addrs.first(isIPv4).(type) {
case *TCPAddr:
- l, err = ListenTCP(net, la)
+ l, err = ListenTCP(network, la)
case *UnixAddr:
- l, err = ListenUnix(net, la)
+ l, err = ListenUnix(network, la)
default:
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: la, Err: &AddrError{Err: "unexpected address type", Addr: laddr}}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: la, Err: &AddrError{Err: "unexpected address type", Addr: address}}
}
if err != nil {
return nil, err // l is non-nil interface containing nil pointer
@@ -567,31 +601,43 @@ func Listen(net, laddr string) (Listener, error) {
return l, nil
}
-// ListenPacket announces on the local network address laddr.
-// The network net must be a packet-oriented network: "udp", "udp4",
-// "udp6", "ip", "ip4", "ip6" or "unixgram".
-// For TCP and UDP, the syntax of laddr is "host:port", like "127.0.0.1:8080".
-// If host is omitted, as in ":8080", ListenPacket listens on all available interfaces
-// instead of just the interface with the given host address.
-// See Dial for the syntax of laddr.
+// ListenPacket announces on the local network address.
+//
+// The network must be "udp", "udp4", "udp6", "unixgram", or an IP
+// transport. The IP transports are "ip", "ip4", or "ip6" followed by
+// a colon and a literal protocol number or a protocol name, as in
+// "ip:1" or "ip:icmp".
//
-// Listening on a hostname is not recommended because this creates a socket
-// for at most one of its IP addresses.
-func ListenPacket(net, laddr string) (PacketConn, error) {
- addrs, err := DefaultResolver.resolveAddrList(context.Background(), "listen", net, laddr, nil)
+// For UDP and IP networks, if the host in the address parameter is
+// empty or a literal unspecified IP address, ListenPacket listens on
+// all available IP addresses of the local system except multicast IP
+// addresses.
+// To only use IPv4, use network "udp4" or "ip4:proto".
+// The address can use a host name, but this is not recommended,
+// because it will create a listener for at most one of the host's IP
+// addresses.
+// If the port in the address parameter is empty or "0", as in
+// "127.0.0.1:" or "[::1]:0", a port number is automatically chosen.
+// The LocalAddr method of PacketConn can be used to discover the
+// chosen port.
+//
+// See func Dial for a description of the network and address
+// parameters.
+func ListenPacket(network, address string) (PacketConn, error) {
+ addrs, err := DefaultResolver.resolveAddrList(context.Background(), "listen", network, address, nil)
if err != nil {
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: nil, Err: err}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: nil, Err: err}
}
var l PacketConn
switch la := addrs.first(isIPv4).(type) {
case *UDPAddr:
- l, err = ListenUDP(net, la)
+ l, err = ListenUDP(network, la)
case *IPAddr:
- l, err = ListenIP(net, la)
+ l, err = ListenIP(network, la)
case *UnixAddr:
- l, err = ListenUnixgram(net, la)
+ l, err = ListenUnixgram(network, la)
default:
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: la, Err: &AddrError{Err: "unexpected address type", Addr: laddr}}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: la, Err: &AddrError{Err: "unexpected address type", Addr: address}}
}
if err != nil {
return nil, err // l is non-nil interface containing nil pointer
diff --git a/libgo/go/net/dial_test.go b/libgo/go/net/dial_test.go
index 9919d72ce3b..a892bf1e140 100644
--- a/libgo/go/net/dial_test.go
+++ b/libgo/go/net/dial_test.go
@@ -7,9 +7,9 @@ package net
import (
"bufio"
"context"
+ "internal/poll"
"internal/testenv"
"io"
- "net/internal/socktest"
"runtime"
"sync"
"testing"
@@ -31,7 +31,7 @@ func TestProhibitionaryDialArg(t *testing.T) {
case "plan9":
t.Skipf("not supported on %s", runtime.GOOS)
}
- if !supportsIPv4map {
+ if !supportsIPv4map() {
t.Skip("mapping ipv4 address inside ipv6 address not supported")
}
@@ -72,70 +72,6 @@ func TestDialLocal(t *testing.T) {
c.Close()
}
-func TestDialTimeoutFDLeak(t *testing.T) {
- switch runtime.GOOS {
- case "plan9":
- t.Skipf("%s does not have full support of socktest", runtime.GOOS)
- case "openbsd":
- testenv.SkipFlaky(t, 15157)
- }
-
- const T = 100 * time.Millisecond
-
- switch runtime.GOOS {
- case "plan9", "windows":
- origTestHookDialChannel := testHookDialChannel
- testHookDialChannel = func() { time.Sleep(2 * T) }
- defer func() { testHookDialChannel = origTestHookDialChannel }()
- if runtime.GOOS == "plan9" {
- break
- }
- fallthrough
- default:
- sw.Set(socktest.FilterConnect, func(so *socktest.Status) (socktest.AfterFilter, error) {
- time.Sleep(2 * T)
- return nil, errTimeout
- })
- defer sw.Set(socktest.FilterConnect, nil)
- }
-
- // Avoid tracking open-close jitterbugs between netFD and
- // socket that leads to confusion of information inside
- // socktest.Switch.
- // It may happen when the Dial call bumps against TCP
- // simultaneous open. See selfConnect in tcpsock_posix.go.
- defer func() { sw.Set(socktest.FilterClose, nil) }()
- var mu sync.Mutex
- var attempts int
- sw.Set(socktest.FilterClose, func(so *socktest.Status) (socktest.AfterFilter, error) {
- mu.Lock()
- attempts++
- mu.Unlock()
- return nil, nil
- })
-
- const N = 100
- var wg sync.WaitGroup
- wg.Add(N)
- for i := 0; i < N; i++ {
- go func() {
- defer wg.Done()
- // This dial never starts to send any SYN
- // segment because of above socket filter and
- // test hook.
- c, err := DialTimeout("tcp", "127.0.0.1:0", T)
- if err == nil {
- t.Errorf("unexpectedly established: tcp:%s->%s", c.LocalAddr(), c.RemoteAddr())
- c.Close()
- }
- }()
- }
- wg.Wait()
- if attempts < N {
- t.Errorf("got %d; want >= %d", attempts, N)
- }
-}
-
func TestDialerDualStackFDLeak(t *testing.T) {
switch runtime.GOOS {
case "plan9":
@@ -145,7 +81,7 @@ func TestDialerDualStackFDLeak(t *testing.T) {
case "openbsd":
testenv.SkipFlaky(t, 15157)
}
- if !supportsIPv4 || !supportsIPv6 {
+ if !supportsIPv4() || !supportsIPv6() {
t.Skip("both IPv4 and IPv6 are required")
}
@@ -254,7 +190,7 @@ func dialClosedPort() (actual, expected time.Duration) {
func TestDialParallel(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
- if !supportsIPv4 || !supportsIPv6 {
+ if !supportsIPv4() || !supportsIPv6() {
t.Skip("both IPv4 and IPv6 are required")
}
@@ -425,7 +361,7 @@ func lookupSlowFast(ctx context.Context, fn func(context.Context, string) ([]IPA
func TestDialerFallbackDelay(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
- if !supportsIPv4 || !supportsIPv6 {
+ if !supportsIPv4() || !supportsIPv6() {
t.Skip("both IPv4 and IPv6 are required")
}
@@ -491,7 +427,7 @@ func TestDialerFallbackDelay(t *testing.T) {
}
func TestDialParallelSpuriousConnection(t *testing.T) {
- if !supportsIPv4 || !supportsIPv6 {
+ if !supportsIPv4() || !supportsIPv6() {
t.Skip("both IPv4 and IPv6 are required")
}
@@ -585,22 +521,22 @@ func TestDialerPartialDeadline(t *testing.T) {
{now, noDeadline, 1, noDeadline, nil},
// Step the clock forward and cross the deadline.
{now.Add(-1 * time.Millisecond), now, 1, now, nil},
- {now.Add(0 * time.Millisecond), now, 1, noDeadline, errTimeout},
- {now.Add(1 * time.Millisecond), now, 1, noDeadline, errTimeout},
+ {now.Add(0 * time.Millisecond), now, 1, noDeadline, poll.ErrTimeout},
+ {now.Add(1 * time.Millisecond), now, 1, noDeadline, poll.ErrTimeout},
}
for i, tt := range testCases {
deadline, err := partialDeadline(tt.now, tt.deadline, tt.addrs)
if err != tt.expectErr {
t.Errorf("#%d: got %v; want %v", i, err, tt.expectErr)
}
- if deadline != tt.expectDeadline {
+ if !deadline.Equal(tt.expectDeadline) {
t.Errorf("#%d: got %v; want %v", i, deadline, tt.expectDeadline)
}
}
}
func TestDialerLocalAddr(t *testing.T) {
- if !supportsIPv4 || !supportsIPv6 {
+ if !supportsIPv4() || !supportsIPv6() {
t.Skip("both IPv4 and IPv6 are required")
}
@@ -654,7 +590,7 @@ func TestDialerLocalAddr(t *testing.T) {
{"tcp", "::1", &UnixAddr{}, &AddrError{Err: "some error"}},
}
- if supportsIPv4map {
+ if supportsIPv4map() {
tests = append(tests, test{
"tcp", "127.0.0.1", &TCPAddr{IP: ParseIP("::")}, nil,
})
@@ -714,12 +650,9 @@ func TestDialerLocalAddr(t *testing.T) {
}
func TestDialerDualStack(t *testing.T) {
- // This test is known to be flaky. Don't frighten regular
- // users about it; only fail on the build dashboard.
- if testenv.Builder() == "" {
- testenv.SkipFlaky(t, 13324)
- }
- if !supportsIPv4 || !supportsIPv6 {
+ testenv.SkipFlaky(t, 13324)
+
+ if !supportsIPv4() || !supportsIPv6() {
t.Skip("both IPv4 and IPv6 are required")
}
@@ -822,7 +755,7 @@ func TestDialCancel(t *testing.T) {
}
blackholeIPPort := JoinHostPort(slowDst4, "1234")
- if !supportsIPv4 {
+ if !supportsIPv4() {
blackholeIPPort = JoinHostPort(slowDst6, "1234")
}
@@ -954,3 +887,24 @@ func TestCancelAfterDial(t *testing.T) {
try()
}
}
+
+// Issue 18806: it should always be possible to net.Dial a
+// net.Listener().Addr().String when the listen address was ":n", even
+// if the machine has halfway configured IPv6 such that it can bind on
+// "::" not connect back to that same address.
+func TestDialListenerAddr(t *testing.T) {
+ if testenv.Builder() == "" {
+ testenv.MustHaveExternalNetwork(t)
+ }
+ ln, err := Listen("tcp", ":0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ln.Close()
+ addr := ln.Addr().String()
+ c, err := Dial("tcp", addr)
+ if err != nil {
+ t.Fatalf("for addr %q, dial error: %v", addr, err)
+ }
+ c.Close()
+}
diff --git a/libgo/go/net/dnsclient_unix.go b/libgo/go/net/dnsclient_unix.go
index 0647b9c3052..ff6a4f69dcd 100644
--- a/libgo/go/net/dnsclient_unix.go
+++ b/libgo/go/net/dnsclient_unix.go
@@ -25,13 +25,6 @@ import (
"time"
)
-// A dnsDialer provides dialing suitable for DNS queries.
-type dnsDialer interface {
- dialDNS(ctx context.Context, network, addr string) (dnsConn, error)
-}
-
-var testHookDNSDialer = func() dnsDialer { return &Dialer{} }
-
// A dnsConn represents a DNS transport endpoint.
type dnsConn interface {
io.Closer
@@ -43,14 +36,14 @@ type dnsConn interface {
dnsRoundTrip(query *dnsMsg) (*dnsMsg, error)
}
-func (c *UDPConn) dnsRoundTrip(query *dnsMsg) (*dnsMsg, error) {
- return dnsRoundTripUDP(c, query)
+// dnsPacketConn implements the dnsConn interface for RFC 1035's
+// "UDP usage" transport mechanism. Conn is a packet-oriented connection,
+// such as a *UDPConn.
+type dnsPacketConn struct {
+ Conn
}
-// dnsRoundTripUDP implements the dnsRoundTrip interface for RFC 1035's
-// "UDP usage" transport mechanism. c should be a packet-oriented connection,
-// such as a *UDPConn.
-func dnsRoundTripUDP(c io.ReadWriter, query *dnsMsg) (*dnsMsg, error) {
+func (c *dnsPacketConn) dnsRoundTrip(query *dnsMsg) (*dnsMsg, error) {
b, ok := query.Pack()
if !ok {
return nil, errors.New("cannot marshal DNS message")
@@ -76,14 +69,14 @@ func dnsRoundTripUDP(c io.ReadWriter, query *dnsMsg) (*dnsMsg, error) {
}
}
-func (c *TCPConn) dnsRoundTrip(out *dnsMsg) (*dnsMsg, error) {
- return dnsRoundTripTCP(c, out)
+// dnsStreamConn implements the dnsConn interface for RFC 1035's
+// "TCP usage" transport mechanism. Conn is a stream-oriented connection,
+// such as a *TCPConn.
+type dnsStreamConn struct {
+ Conn
}
-// dnsRoundTripTCP implements the dnsRoundTrip interface for RFC 1035's
-// "TCP usage" transport mechanism. c should be a stream-oriented connection,
-// such as a *TCPConn.
-func dnsRoundTripTCP(c io.ReadWriter, query *dnsMsg) (*dnsMsg, error) {
+func (c *dnsStreamConn) dnsRoundTrip(query *dnsMsg) (*dnsMsg, error) {
b, ok := query.Pack()
if !ok {
return nil, errors.New("cannot marshal DNS message")
@@ -116,33 +109,8 @@ func dnsRoundTripTCP(c io.ReadWriter, query *dnsMsg) (*dnsMsg, error) {
return resp, nil
}
-func (d *Dialer) dialDNS(ctx context.Context, network, server string) (dnsConn, error) {
- switch network {
- case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6":
- default:
- return nil, UnknownNetworkError(network)
- }
- // Calling Dial here is scary -- we have to be sure not to
- // dial a name that will require a DNS lookup, or Dial will
- // call back here to translate it. The DNS config parser has
- // already checked that all the cfg.servers are IP
- // addresses, which Dial will use without a DNS lookup.
- c, err := d.DialContext(ctx, network, server)
- if err != nil {
- return nil, mapErr(err)
- }
- switch network {
- case "tcp", "tcp4", "tcp6":
- return c.(*TCPConn), nil
- case "udp", "udp4", "udp6":
- return c.(*UDPConn), nil
- }
- panic("unreachable")
-}
-
// exchange sends a query on the connection and hopes for a response.
-func exchange(ctx context.Context, server, name string, qtype uint16, timeout time.Duration) (*dnsMsg, error) {
- d := testHookDNSDialer()
+func (r *Resolver) exchange(ctx context.Context, server, name string, qtype uint16, timeout time.Duration) (*dnsMsg, error) {
out := dnsMsg{
dnsMsgHdr: dnsMsgHdr{
recursion_desired: true,
@@ -158,7 +126,7 @@ func exchange(ctx context.Context, server, name string, qtype uint16, timeout ti
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(timeout))
defer cancel()
- c, err := d.dialDNS(ctx, network, server)
+ c, err := r.dial(ctx, network, server)
if err != nil {
return nil, err
}
@@ -181,7 +149,7 @@ func exchange(ctx context.Context, server, name string, qtype uint16, timeout ti
// Do a lookup for a single name, which must be rooted
// (otherwise answer will not find the answers).
-func tryOneName(ctx context.Context, cfg *dnsConfig, name string, qtype uint16) (string, []dnsRR, error) {
+func (r *Resolver) tryOneName(ctx context.Context, cfg *dnsConfig, name string, qtype uint16) (string, []dnsRR, error) {
var lastErr error
serverOffset := cfg.serverOffset()
sLen := uint32(len(cfg.servers))
@@ -190,7 +158,7 @@ func tryOneName(ctx context.Context, cfg *dnsConfig, name string, qtype uint16)
for j := uint32(0); j < sLen; j++ {
server := cfg.servers[(serverOffset+j)%sLen]
- msg, err := exchange(ctx, server, name, qtype, cfg.timeout)
+ msg, err := r.exchange(ctx, server, name, qtype, cfg.timeout)
if err != nil {
lastErr = &DNSError{
Err: err.Error(),
@@ -200,6 +168,11 @@ func tryOneName(ctx context.Context, cfg *dnsConfig, name string, qtype uint16)
if nerr, ok := err.(Error); ok && nerr.Timeout() {
lastErr.(*DNSError).IsTimeout = true
}
+ // Set IsTemporary for socket-level errors. Note that this flag
+ // may also be used to indicate a SERVFAIL response.
+ if _, ok := err.(*OpError); ok {
+ lastErr.(*DNSError).IsTemporary = true
+ }
continue
}
// libresolv continues to the next server when it receives
@@ -314,7 +287,7 @@ func (conf *resolverConfig) releaseSema() {
<-conf.ch
}
-func lookup(ctx context.Context, name string, qtype uint16) (cname string, rrs []dnsRR, err error) {
+func (r *Resolver) lookup(ctx context.Context, name string, qtype uint16) (cname string, rrs []dnsRR, err error) {
if !isDomainName(name) {
// We used to use "invalid domain name" as the error,
// but that is a detail of the specific lookup mechanism.
@@ -328,10 +301,15 @@ func lookup(ctx context.Context, name string, qtype uint16) (cname string, rrs [
conf := resolvConf.dnsConfig
resolvConf.mu.RUnlock()
for _, fqdn := range conf.nameList(name) {
- cname, rrs, err = tryOneName(ctx, conf, fqdn, qtype)
+ cname, rrs, err = r.tryOneName(ctx, conf, fqdn, qtype)
if err == nil {
break
}
+ if nerr, ok := err.(Error); ok && nerr.Temporary() && r.StrictErrors {
+ // If we hit a temporary error with StrictErrors enabled,
+ // stop immediately instead of trying more names.
+ break
+ }
}
if err, ok := err.(*DNSError); ok {
// Show original name passed to lookup, not suffixed one.
@@ -432,11 +410,11 @@ func (o hostLookupOrder) String() string {
// Normally we let cgo use the C library resolver instead of
// depending on our lookup code, so that Go and C get the same
// answers.
-func goLookupHost(ctx context.Context, name string) (addrs []string, err error) {
- return goLookupHostOrder(ctx, name, hostLookupFilesDNS)
+func (r *Resolver) goLookupHost(ctx context.Context, name string) (addrs []string, err error) {
+ return r.goLookupHostOrder(ctx, name, hostLookupFilesDNS)
}
-func goLookupHostOrder(ctx context.Context, name string, order hostLookupOrder) (addrs []string, err error) {
+func (r *Resolver) goLookupHostOrder(ctx context.Context, name string, order hostLookupOrder) (addrs []string, err error) {
if order == hostLookupFilesDNS || order == hostLookupFiles {
// Use entries from /etc/hosts if they match.
addrs = lookupStaticHost(name)
@@ -444,7 +422,7 @@ func goLookupHostOrder(ctx context.Context, name string, order hostLookupOrder)
return
}
}
- ips, _, err := goLookupIPCNAMEOrder(ctx, name, order)
+ ips, _, err := r.goLookupIPCNAMEOrder(ctx, name, order)
if err != nil {
return
}
@@ -470,13 +448,13 @@ func goLookupIPFiles(name string) (addrs []IPAddr) {
// goLookupIP is the native Go implementation of LookupIP.
// The libc versions are in cgo_*.go.
-func goLookupIP(ctx context.Context, host string) (addrs []IPAddr, err error) {
+func (r *Resolver) goLookupIP(ctx context.Context, host string) (addrs []IPAddr, err error) {
order := systemConf().hostLookupOrder(host)
- addrs, _, err = goLookupIPCNAMEOrder(ctx, host, order)
+ addrs, _, err = r.goLookupIPCNAMEOrder(ctx, host, order)
return
}
-func goLookupIPCNAMEOrder(ctx context.Context, name string, order hostLookupOrder) (addrs []IPAddr, cname string, err error) {
+func (r *Resolver) goLookupIPCNAMEOrder(ctx context.Context, name string, order hostLookupOrder) (addrs []IPAddr, cname string, err error) {
if order == hostLookupFilesDNS || order == hostLookupFiles {
addrs = goLookupIPFiles(name)
if len(addrs) > 0 || order == hostLookupFiles {
@@ -502,15 +480,20 @@ func goLookupIPCNAMEOrder(ctx context.Context, name string, order hostLookupOrde
for _, fqdn := range conf.nameList(name) {
for _, qtype := range qtypes {
go func(qtype uint16) {
- cname, rrs, err := tryOneName(ctx, conf, fqdn, qtype)
+ cname, rrs, err := r.tryOneName(ctx, conf, fqdn, qtype)
lane <- racer{cname, rrs, err}
}(qtype)
}
+ hitStrictError := false
for range qtypes {
racer := <-lane
if racer.error != nil {
- // Prefer error for original name.
- if lastErr == nil || fqdn == name+"." {
+ if nerr, ok := racer.error.(Error); ok && nerr.Temporary() && r.StrictErrors {
+ // This error will abort the nameList loop.
+ hitStrictError = true
+ lastErr = racer.error
+ } else if lastErr == nil || fqdn == name+"." {
+ // Prefer error for original name.
lastErr = racer.error
}
continue
@@ -520,6 +503,13 @@ func goLookupIPCNAMEOrder(ctx context.Context, name string, order hostLookupOrde
cname = racer.cname
}
}
+ if hitStrictError {
+ // If either family hit an error with StrictErrors enabled,
+ // discard all addresses. This ensures that network flakiness
+ // cannot turn a dualstack hostname IPv4/IPv6-only.
+ addrs = nil
+ break
+ }
if len(addrs) > 0 {
break
}
@@ -543,9 +533,9 @@ func goLookupIPCNAMEOrder(ctx context.Context, name string, order hostLookupOrde
}
// goLookupCNAME is the native Go (non-cgo) implementation of LookupCNAME.
-func goLookupCNAME(ctx context.Context, host string) (cname string, err error) {
+func (r *Resolver) goLookupCNAME(ctx context.Context, host string) (cname string, err error) {
order := systemConf().hostLookupOrder(host)
- _, cname, err = goLookupIPCNAMEOrder(ctx, host, order)
+ _, cname, err = r.goLookupIPCNAMEOrder(ctx, host, order)
return
}
@@ -554,7 +544,7 @@ func goLookupCNAME(ctx context.Context, host string) (cname string, err error) {
// only if cgoLookupPTR is the stub in cgo_stub.go).
// Normally we let cgo use the C library resolver instead of depending
// on our lookup code, so that Go and C get the same answers.
-func goLookupPTR(ctx context.Context, addr string) ([]string, error) {
+func (r *Resolver) goLookupPTR(ctx context.Context, addr string) ([]string, error) {
names := lookupStaticAddr(addr)
if len(names) > 0 {
return names, nil
@@ -563,7 +553,7 @@ func goLookupPTR(ctx context.Context, addr string) ([]string, error) {
if err != nil {
return nil, err
}
- _, rrs, err := lookup(ctx, arpa, dnsTypePTR)
+ _, rrs, err := r.lookup(ctx, arpa, dnsTypePTR)
if err != nil {
return nil, err
}
diff --git a/libgo/go/net/dnsclient_unix_test.go b/libgo/go/net/dnsclient_unix_test.go
index c66d2d196b2..94811c96a6b 100644
--- a/libgo/go/net/dnsclient_unix_test.go
+++ b/libgo/go/net/dnsclient_unix_test.go
@@ -8,8 +8,9 @@ package net
import (
"context"
+ "errors"
"fmt"
- "internal/testenv"
+ "internal/poll"
"io/ioutil"
"os"
"path"
@@ -20,9 +21,14 @@ import (
"time"
)
+var goResolver = Resolver{PreferGo: true}
+
// Test address from 192.0.2.0/24 block, reserved by RFC 5737 for documentation.
const TestAddr uint32 = 0xc0000201
+// Test address from 2001:db8::/32 block, reserved by RFC 3849 for documentation.
+var TestAddr6 = [16]byte{0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
+
var dnsTransportFallbackTests = []struct {
server string
name string
@@ -37,18 +43,33 @@ var dnsTransportFallbackTests = []struct {
}
func TestDNSTransportFallback(t *testing.T) {
- testenv.MustHaveExternalNetwork(t)
-
+ fake := fakeDNSServer{
+ rh: func(n, _ string, q *dnsMsg, _ time.Time) (*dnsMsg, error) {
+ r := &dnsMsg{
+ dnsMsgHdr: dnsMsgHdr{
+ id: q.id,
+ response: true,
+ rcode: dnsRcodeSuccess,
+ },
+ question: q.question,
+ }
+ if n == "udp" {
+ r.truncated = true
+ }
+ return r, nil
+ },
+ }
+ r := Resolver{PreferGo: true, Dial: fake.DialContext}
for _, tt := range dnsTransportFallbackTests {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- msg, err := exchange(ctx, tt.server, tt.name, tt.qtype, time.Second)
+ msg, err := r.exchange(ctx, tt.server, tt.name, tt.qtype, time.Second)
if err != nil {
t.Error(err)
continue
}
switch msg.rcode {
- case tt.rcode, dnsRcodeServerFailure:
+ case tt.rcode:
default:
t.Errorf("got %v from %v; want %v", msg.rcode, tt.server, tt.rcode)
continue
@@ -78,13 +99,30 @@ var specialDomainNameTests = []struct {
}
func TestSpecialDomainName(t *testing.T) {
- testenv.MustHaveExternalNetwork(t)
+ fake := fakeDNSServer{func(_, _ string, q *dnsMsg, _ time.Time) (*dnsMsg, error) {
+ r := &dnsMsg{
+ dnsMsgHdr: dnsMsgHdr{
+ id: q.id,
+ response: true,
+ },
+ question: q.question,
+ }
+
+ switch q.question[0].Name {
+ case "example.com.":
+ r.rcode = dnsRcodeSuccess
+ default:
+ r.rcode = dnsRcodeNameError
+ }
+ return r, nil
+ }}
+ r := Resolver{PreferGo: true, Dial: fake.DialContext}
server := "8.8.8.8:53"
for _, tt := range specialDomainNameTests {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- msg, err := exchange(ctx, server, tt.name, tt.qtype, 3*time.Second)
+ msg, err := r.exchange(ctx, server, tt.name, tt.qtype, 3*time.Second)
if err != nil {
t.Error(err)
continue
@@ -139,15 +177,40 @@ func TestAvoidDNSName(t *testing.T) {
}
}
+var fakeDNSServerSuccessful = fakeDNSServer{func(_, _ string, q *dnsMsg, _ time.Time) (*dnsMsg, error) {
+ r := &dnsMsg{
+ dnsMsgHdr: dnsMsgHdr{
+ id: q.id,
+ response: true,
+ },
+ question: q.question,
+ }
+ if len(q.question) == 1 && q.question[0].Qtype == dnsTypeA {
+ r.answer = []dnsRR{
+ &dnsRR_A{
+ Hdr: dnsRR_Header{
+ Name: q.question[0].Name,
+ Rrtype: dnsTypeA,
+ Class: dnsClassINET,
+ Rdlength: 4,
+ },
+ A: TestAddr,
+ },
+ }
+ }
+ return r, nil
+}}
+
// Issue 13705: don't try to resolve onion addresses, etc
func TestLookupTorOnion(t *testing.T) {
- addrs, err := goLookupIP(context.Background(), "foo.onion")
- if len(addrs) > 0 {
- t.Errorf("unexpected addresses: %v", addrs)
- }
+ r := Resolver{PreferGo: true, Dial: fakeDNSServerSuccessful.DialContext}
+ addrs, err := r.LookupIPAddr(context.Background(), "foo.onion")
if err != nil {
t.Fatalf("lookup = %v; want nil", err)
}
+ if len(addrs) > 0 {
+ t.Errorf("unexpected addresses: %v", addrs)
+ }
}
type resolvConfTest struct {
@@ -237,7 +300,7 @@ var updateResolvConfTests = []struct {
}
func TestUpdateResolvConf(t *testing.T) {
- testenv.MustHaveExternalNetwork(t)
+ r := Resolver{PreferGo: true, Dial: fakeDNSServerSuccessful.DialContext}
conf, err := newResolvConfTest()
if err != nil {
@@ -257,7 +320,7 @@ func TestUpdateResolvConf(t *testing.T) {
for j := 0; j < N; j++ {
go func(name string) {
defer wg.Done()
- ips, err := goLookupIP(context.Background(), name)
+ ips, err := r.LookupIPAddr(context.Background(), name)
if err != nil {
t.Error(err)
return
@@ -392,7 +455,60 @@ var goLookupIPWithResolverConfigTests = []struct {
}
func TestGoLookupIPWithResolverConfig(t *testing.T) {
- testenv.MustHaveExternalNetwork(t)
+ fake := fakeDNSServer{func(n, s string, q *dnsMsg, _ time.Time) (*dnsMsg, error) {
+ switch s {
+ case "[2001:4860:4860::8888]:53", "8.8.8.8:53":
+ break
+ default:
+ time.Sleep(10 * time.Millisecond)
+ return nil, poll.ErrTimeout
+ }
+ r := &dnsMsg{
+ dnsMsgHdr: dnsMsgHdr{
+ id: q.id,
+ response: true,
+ },
+ question: q.question,
+ }
+ for _, question := range q.question {
+ switch question.Qtype {
+ case dnsTypeA:
+ switch question.Name {
+ case "hostname.as112.net.":
+ break
+ case "ipv4.google.com.":
+ r.answer = append(r.answer, &dnsRR_A{
+ Hdr: dnsRR_Header{
+ Name: q.question[0].Name,
+ Rrtype: dnsTypeA,
+ Class: dnsClassINET,
+ Rdlength: 4,
+ },
+ A: TestAddr,
+ })
+ default:
+
+ }
+ case dnsTypeAAAA:
+ switch question.Name {
+ case "hostname.as112.net.":
+ break
+ case "ipv6.google.com.":
+ r.answer = append(r.answer, &dnsRR_AAAA{
+ Hdr: dnsRR_Header{
+ Name: q.question[0].Name,
+ Rrtype: dnsTypeAAAA,
+ Class: dnsClassINET,
+ Rdlength: 16,
+ },
+ AAAA: TestAddr6,
+ })
+ }
+ }
+ }
+ return r, nil
+ }}
+ r := Resolver{PreferGo: true, Dial: fake.DialContext}
conf, err := newResolvConfTest()
if err != nil {
@@ -405,14 +521,8 @@ func TestGoLookupIPWithResolverConfig(t *testing.T) {
t.Error(err)
continue
}
- addrs, err := goLookupIP(context.Background(), tt.name)
+ addrs, err := r.LookupIPAddr(context.Background(), tt.name)
if err != nil {
- // This test uses external network connectivity.
- // We need to take care with errors on both
- // DNS message exchange layer and DNS
- // transport layer because goLookupIP may fail
- // when the IP connectivity on node under test
- // gets lost during its run.
if err, ok := err.(*DNSError); !ok || tt.error != nil && (err.Name != tt.error.(*DNSError).Name || err.Server != tt.error.(*DNSError).Server || err.IsTimeout != tt.error.(*DNSError).IsTimeout) {
t.Errorf("got %v; want %v", err, tt.error)
}
@@ -437,7 +547,17 @@ func TestGoLookupIPWithResolverConfig(t *testing.T) {
// Test that goLookupIPOrder falls back to the host file when no DNS servers are available.
func TestGoLookupIPOrderFallbackToFile(t *testing.T) {
- testenv.MustHaveExternalNetwork(t)
+ fake := fakeDNSServer{func(n, s string, q *dnsMsg, tm time.Time) (*dnsMsg, error) {
+ r := &dnsMsg{
+ dnsMsgHdr: dnsMsgHdr{
+ id: q.id,
+ response: true,
+ },
+ question: q.question,
+ }
+ return r, nil
+ }}
+ r := Resolver{PreferGo: true, Dial: fake.DialContext}
// Add a config that simulates no dns servers being available.
conf, err := newResolvConfTest()
@@ -455,14 +575,14 @@ func TestGoLookupIPOrderFallbackToFile(t *testing.T) {
name := fmt.Sprintf("order %v", order)
// First ensure that we get an error when contacting a non-existent host.
- _, _, err := goLookupIPCNAMEOrder(context.Background(), "notarealhost", order)
+ _, _, err := r.goLookupIPCNAMEOrder(context.Background(), "notarealhost", order)
if err == nil {
t.Errorf("%s: expected error while looking up name not in hosts file", name)
continue
}
// Now check that we get an address when the name appears in the hosts file.
- addrs, _, err := goLookupIPCNAMEOrder(context.Background(), "thor", order) // entry is in "testdata/hosts"
+ addrs, _, err := r.goLookupIPCNAMEOrder(context.Background(), "thor", order) // entry is in "testdata/hosts"
if err != nil {
t.Errorf("%s: expected to successfully lookup host entry", name)
continue
@@ -485,9 +605,6 @@ func TestGoLookupIPOrderFallbackToFile(t *testing.T) {
func TestErrorForOriginalNameWhenSearching(t *testing.T) {
const fqdn = "doesnotexist.domain"
- origTestHookDNSDialer := testHookDNSDialer
- defer func() { testHookDNSDialer = origTestHookDNSDialer }()
-
conf, err := newResolvConfTest()
if err != nil {
t.Fatal(err)
@@ -498,14 +615,13 @@ func TestErrorForOriginalNameWhenSearching(t *testing.T) {
t.Fatal(err)
}
- d := &fakeDNSDialer{}
- testHookDNSDialer = func() dnsDialer { return d }
-
- d.rh = func(s string, q *dnsMsg, _ time.Time) (*dnsMsg, error) {
+ fake := fakeDNSServer{func(_, _ string, q *dnsMsg, _ time.Time) (*dnsMsg, error) {
r := &dnsMsg{
dnsMsgHdr: dnsMsgHdr{
- id: q.id,
+ id: q.id,
+ response: true,
},
+ question: q.question,
}
switch q.question[0].Name {
@@ -516,24 +632,31 @@ func TestErrorForOriginalNameWhenSearching(t *testing.T) {
}
return r, nil
- }
+ }}
- _, err = goLookupIP(context.Background(), fqdn)
- if err == nil {
- t.Fatal("expected an error")
+ cases := []struct {
+ strictErrors bool
+ wantErr *DNSError
+ }{
+ {true, &DNSError{Name: fqdn, Err: "server misbehaving", IsTemporary: true}},
+ {false, &DNSError{Name: fqdn, Err: errNoSuchHost.Error()}},
}
+ for _, tt := range cases {
+ r := Resolver{PreferGo: true, StrictErrors: tt.strictErrors, Dial: fake.DialContext}
+ _, err = r.LookupIPAddr(context.Background(), fqdn)
+ if err == nil {
+ t.Fatal("expected an error")
+ }
- want := &DNSError{Name: fqdn, Err: errNoSuchHost.Error()}
- if err, ok := err.(*DNSError); !ok || err.Name != want.Name || err.Err != want.Err {
- t.Errorf("got %v; want %v", err, want)
+ want := tt.wantErr
+ if err, ok := err.(*DNSError); !ok || err.Name != want.Name || err.Err != want.Err || err.IsTemporary != want.IsTemporary {
+ t.Errorf("got %v; want %v", err, want)
+ }
}
}
// Issue 15434. If a name server gives a lame referral, continue to the next.
func TestIgnoreLameReferrals(t *testing.T) {
- origTestHookDNSDialer := testHookDNSDialer
- defer func() { testHookDNSDialer = origTestHookDNSDialer }()
-
conf, err := newResolvConfTest()
if err != nil {
t.Fatal(err)
@@ -545,10 +668,7 @@ func TestIgnoreLameReferrals(t *testing.T) {
t.Fatal(err)
}
- d := &fakeDNSDialer{}
- testHookDNSDialer = func() dnsDialer { return d }
-
- d.rh = func(s string, q *dnsMsg, _ time.Time) (*dnsMsg, error) {
+ fake := fakeDNSServer{func(_, s string, q *dnsMsg, _ time.Time) (*dnsMsg, error) {
t.Log(s, q)
r := &dnsMsg{
dnsMsgHdr: dnsMsgHdr{
@@ -576,9 +696,10 @@ func TestIgnoreLameReferrals(t *testing.T) {
}
return r, nil
- }
+ }}
+ r := Resolver{PreferGo: true, Dial: fake.DialContext}
- addrs, err := goLookupIP(context.Background(), "www.golang.org")
+ addrs, err := r.LookupIPAddr(context.Background(), "www.golang.org")
if err != nil {
t.Fatal(err)
}
@@ -597,7 +718,7 @@ func BenchmarkGoLookupIP(b *testing.B) {
ctx := context.Background()
for i := 0; i < b.N; i++ {
- goLookupIP(ctx, "www.example.com")
+ goResolver.LookupIPAddr(ctx, "www.example.com")
}
}
@@ -606,7 +727,7 @@ func BenchmarkGoLookupIPNoSuchHost(b *testing.B) {
ctx := context.Background()
for i := 0; i < b.N; i++ {
- goLookupIP(ctx, "some.nonexistent")
+ goResolver.LookupIPAddr(ctx, "some.nonexistent")
}
}
@@ -629,38 +750,70 @@ func BenchmarkGoLookupIPWithBrokenNameServer(b *testing.B) {
ctx := context.Background()
for i := 0; i < b.N; i++ {
- goLookupIP(ctx, "www.example.com")
+ goResolver.LookupIPAddr(ctx, "www.example.com")
}
}
-type fakeDNSDialer struct {
- // reply handler
- rh func(s string, q *dnsMsg, t time.Time) (*dnsMsg, error)
+type fakeDNSServer struct {
+ rh func(n, s string, q *dnsMsg, t time.Time) (*dnsMsg, error)
}
-func (f *fakeDNSDialer) dialDNS(_ context.Context, n, s string) (dnsConn, error) {
- return &fakeDNSConn{f.rh, s, time.Time{}}, nil
+func (server *fakeDNSServer) DialContext(_ context.Context, n, s string) (Conn, error) {
+ return &fakeDNSConn{nil, server, n, s, nil, time.Time{}}, nil
}
type fakeDNSConn struct {
- rh func(s string, q *dnsMsg, t time.Time) (*dnsMsg, error)
- s string
- t time.Time
+ Conn
+ server *fakeDNSServer
+ n string
+ s string
+ q *dnsMsg
+ t time.Time
}
func (f *fakeDNSConn) Close() error {
return nil
}
+func (f *fakeDNSConn) Read(b []byte) (int, error) {
+ resp, err := f.server.rh(f.n, f.s, f.q, f.t)
+ if err != nil {
+ return 0, err
+ }
+
+ bb, ok := resp.Pack()
+ if !ok {
+ return 0, errors.New("cannot marshal DNS message")
+ }
+ if len(b) < len(bb) {
+ return 0, errors.New("read would fragment DNS message")
+ }
+
+ copy(b, bb)
+ return len(bb), nil
+}
+
+func (f *fakeDNSConn) ReadFrom(b []byte) (int, Addr, error) {
+ return 0, nil, nil
+}
+
+func (f *fakeDNSConn) Write(b []byte) (int, error) {
+ f.q = new(dnsMsg)
+ if !f.q.Unpack(b) {
+ return 0, errors.New("cannot unmarshal DNS message")
+ }
+ return len(b), nil
+}
+
+func (f *fakeDNSConn) WriteTo(b []byte, addr Addr) (int, error) {
+ return 0, nil
+}
+
func (f *fakeDNSConn) SetDeadline(t time.Time) error {
f.t = t
return nil
}
-func (f *fakeDNSConn) dnsRoundTrip(q *dnsMsg) (*dnsMsg, error) {
- return f.rh(f.s, q, f.t)
-}
-
// UDP round-tripper algorithm should ignore invalid DNS responses (issue 13281).
func TestIgnoreDNSForgeries(t *testing.T) {
c, s := Pipe()
@@ -723,7 +876,8 @@ func TestIgnoreDNSForgeries(t *testing.T) {
},
}
- resp, err := dnsRoundTripUDP(c, msg)
+ dc := &dnsPacketConn{c}
+ resp, err := dc.dnsRoundTrip(msg)
if err != nil {
t.Fatalf("dnsRoundTripUDP failed: %v", err)
}
@@ -735,9 +889,6 @@ func TestIgnoreDNSForgeries(t *testing.T) {
// Issue 16865. If a name server times out, continue to the next.
func TestRetryTimeout(t *testing.T) {
- origTestHookDNSDialer := testHookDNSDialer
- defer func() { testHookDNSDialer = origTestHookDNSDialer }()
-
conf, err := newResolvConfTest()
if err != nil {
t.Fatal(err)
@@ -752,12 +903,9 @@ func TestRetryTimeout(t *testing.T) {
t.Fatal(err)
}
- d := &fakeDNSDialer{}
- testHookDNSDialer = func() dnsDialer { return d }
-
var deadline0 time.Time
- d.rh = func(s string, q *dnsMsg, deadline time.Time) (*dnsMsg, error) {
+ fake := fakeDNSServer{func(_, s string, q *dnsMsg, deadline time.Time) (*dnsMsg, error) {
t.Log(s, q, deadline)
if deadline.IsZero() {
@@ -767,17 +915,18 @@ func TestRetryTimeout(t *testing.T) {
if s == "192.0.2.1:53" {
deadline0 = deadline
time.Sleep(10 * time.Millisecond)
- return nil, errTimeout
+ return nil, poll.ErrTimeout
}
- if deadline == deadline0 {
+ if deadline.Equal(deadline0) {
t.Error("deadline didn't change")
}
return mockTXTResponse(q), nil
- }
+ }}
+ r := &Resolver{PreferGo: true, Dial: fake.DialContext}
- _, err = LookupTXT("www.golang.org")
+ _, err = r.LookupTXT(context.Background(), "www.golang.org")
if err != nil {
t.Fatal(err)
}
@@ -796,9 +945,6 @@ func TestRotate(t *testing.T) {
}
func testRotate(t *testing.T, rotate bool, nameservers, wantServers []string) {
- origTestHookDNSDialer := testHookDNSDialer
- defer func() { testHookDNSDialer = origTestHookDNSDialer }()
-
conf, err := newResolvConfTest()
if err != nil {
t.Fatal(err)
@@ -817,18 +963,16 @@ func testRotate(t *testing.T, rotate bool, nameservers, wantServers []string) {
t.Fatal(err)
}
- d := &fakeDNSDialer{}
- testHookDNSDialer = func() dnsDialer { return d }
-
var usedServers []string
- d.rh = func(s string, q *dnsMsg, _ time.Time) (*dnsMsg, error) {
+ fake := fakeDNSServer{func(_, s string, q *dnsMsg, deadline time.Time) (*dnsMsg, error) {
usedServers = append(usedServers, s)
return mockTXTResponse(q), nil
- }
+ }}
+ r := Resolver{PreferGo: true, Dial: fake.DialContext}
// len(nameservers) + 1 to allow rotation to get back to start
for i := 0; i < len(nameservers)+1; i++ {
- if _, err := LookupTXT("www.golang.org"); err != nil {
+ if _, err := r.LookupTXT(context.Background(), "www.golang.org"); err != nil {
t.Fatal(err)
}
}
@@ -860,3 +1004,311 @@ func mockTXTResponse(q *dnsMsg) *dnsMsg {
return r
}
+
+// Issue 17448. With StrictErrors enabled, temporary errors should make
+// LookupIP fail rather than return a partial result.
+func TestStrictErrorsLookupIP(t *testing.T) {
+ conf, err := newResolvConfTest()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer conf.teardown()
+
+ confData := []string{
+ "nameserver 192.0.2.53",
+ "search x.golang.org y.golang.org",
+ }
+ if err := conf.writeAndUpdate(confData); err != nil {
+ t.Fatal(err)
+ }
+
+ const name = "test-issue19592"
+ const server = "192.0.2.53:53"
+ const searchX = "test-issue19592.x.golang.org."
+ const searchY = "test-issue19592.y.golang.org."
+ const ip4 = "192.0.2.1"
+ const ip6 = "2001:db8::1"
+
+ type resolveWhichEnum int
+ const (
+ resolveOK resolveWhichEnum = iota
+ resolveOpError
+ resolveServfail
+ resolveTimeout
+ )
+
+ makeTempError := func(err string) error {
+ return &DNSError{
+ Err: err,
+ Name: name,
+ Server: server,
+ IsTemporary: true,
+ }
+ }
+ makeTimeout := func() error {
+ return &DNSError{
+ Err: poll.ErrTimeout.Error(),
+ Name: name,
+ Server: server,
+ IsTimeout: true,
+ }
+ }
+ makeNxDomain := func() error {
+ return &DNSError{
+ Err: errNoSuchHost.Error(),
+ Name: name,
+ Server: server,
+ }
+ }
+
+ cases := []struct {
+ desc string
+ resolveWhich func(quest *dnsQuestion) resolveWhichEnum
+ wantStrictErr error
+ wantLaxErr error
+ wantIPs []string
+ }{
+ {
+ desc: "No errors",
+ resolveWhich: func(quest *dnsQuestion) resolveWhichEnum {
+ return resolveOK
+ },
+ wantIPs: []string{ip4, ip6},
+ },
+ {
+ desc: "searchX error fails in strict mode",
+ resolveWhich: func(quest *dnsQuestion) resolveWhichEnum {
+ if quest.Name == searchX {
+ return resolveTimeout
+ }
+ return resolveOK
+ },
+ wantStrictErr: makeTimeout(),
+ wantIPs: []string{ip4, ip6},
+ },
+ {
+ desc: "searchX IPv4-only timeout fails in strict mode",
+ resolveWhich: func(quest *dnsQuestion) resolveWhichEnum {
+ if quest.Name == searchX && quest.Qtype == dnsTypeA {
+ return resolveTimeout
+ }
+ return resolveOK
+ },
+ wantStrictErr: makeTimeout(),
+ wantIPs: []string{ip4, ip6},
+ },
+ {
+ desc: "searchX IPv6-only servfail fails in strict mode",
+ resolveWhich: func(quest *dnsQuestion) resolveWhichEnum {
+ if quest.Name == searchX && quest.Qtype == dnsTypeAAAA {
+ return resolveServfail
+ }
+ return resolveOK
+ },
+ wantStrictErr: makeTempError("server misbehaving"),
+ wantIPs: []string{ip4, ip6},
+ },
+ {
+ desc: "searchY error always fails",
+ resolveWhich: func(quest *dnsQuestion) resolveWhichEnum {
+ if quest.Name == searchY {
+ return resolveTimeout
+ }
+ return resolveOK
+ },
+ wantStrictErr: makeTimeout(),
+ wantLaxErr: makeNxDomain(), // This one reaches the "test." FQDN.
+ },
+ {
+ desc: "searchY IPv4-only socket error fails in strict mode",
+ resolveWhich: func(quest *dnsQuestion) resolveWhichEnum {
+ if quest.Name == searchY && quest.Qtype == dnsTypeA {
+ return resolveOpError
+ }
+ return resolveOK
+ },
+ wantStrictErr: makeTempError("write: socket on fire"),
+ wantIPs: []string{ip6},
+ },
+ {
+ desc: "searchY IPv6-only timeout fails in strict mode",
+ resolveWhich: func(quest *dnsQuestion) resolveWhichEnum {
+ if quest.Name == searchY && quest.Qtype == dnsTypeAAAA {
+ return resolveTimeout
+ }
+ return resolveOK
+ },
+ wantStrictErr: makeTimeout(),
+ wantIPs: []string{ip4},
+ },
+ }
+
+ for i, tt := range cases {
+ fake := fakeDNSServer{func(_, s string, q *dnsMsg, deadline time.Time) (*dnsMsg, error) {
+ t.Log(s, q)
+
+ switch tt.resolveWhich(&q.question[0]) {
+ case resolveOK:
+ // Handle below.
+ case resolveOpError:
+ return nil, &OpError{Op: "write", Err: fmt.Errorf("socket on fire")}
+ case resolveServfail:
+ return &dnsMsg{
+ dnsMsgHdr: dnsMsgHdr{
+ id: q.id,
+ response: true,
+ rcode: dnsRcodeServerFailure,
+ },
+ question: q.question,
+ }, nil
+ case resolveTimeout:
+ return nil, poll.ErrTimeout
+ default:
+ t.Fatal("Impossible resolveWhich")
+ }
+
+ switch q.question[0].Name {
+ case searchX, name + ".":
+ // Return NXDOMAIN to utilize the search list.
+ return &dnsMsg{
+ dnsMsgHdr: dnsMsgHdr{
+ id: q.id,
+ response: true,
+ rcode: dnsRcodeNameError,
+ },
+ question: q.question,
+ }, nil
+ case searchY:
+ // Return records below.
+ default:
+ return nil, fmt.Errorf("Unexpected Name: %v", q.question[0].Name)
+ }
+
+ r := &dnsMsg{
+ dnsMsgHdr: dnsMsgHdr{
+ id: q.id,
+ response: true,
+ },
+ question: q.question,
+ }
+ switch q.question[0].Qtype {
+ case dnsTypeA:
+ r.answer = []dnsRR{
+ &dnsRR_A{
+ Hdr: dnsRR_Header{
+ Name: q.question[0].Name,
+ Rrtype: dnsTypeA,
+ Class: dnsClassINET,
+ Rdlength: 4,
+ },
+ A: TestAddr,
+ },
+ }
+ case dnsTypeAAAA:
+ r.answer = []dnsRR{
+ &dnsRR_AAAA{
+ Hdr: dnsRR_Header{
+ Name: q.question[0].Name,
+ Rrtype: dnsTypeAAAA,
+ Class: dnsClassINET,
+ Rdlength: 16,
+ },
+ AAAA: TestAddr6,
+ },
+ }
+ default:
+ return nil, fmt.Errorf("Unexpected Qtype: %v", q.question[0].Qtype)
+ }
+ return r, nil
+ }}
+
+ for _, strict := range []bool{true, false} {
+ r := Resolver{PreferGo: true, StrictErrors: strict, Dial: fake.DialContext}
+ ips, err := r.LookupIPAddr(context.Background(), name)
+
+ var wantErr error
+ if strict {
+ wantErr = tt.wantStrictErr
+ } else {
+ wantErr = tt.wantLaxErr
+ }
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Errorf("#%d (%s) strict=%v: got err %#v; want %#v", i, tt.desc, strict, err, wantErr)
+ }
+
+ gotIPs := map[string]struct{}{}
+ for _, ip := range ips {
+ gotIPs[ip.String()] = struct{}{}
+ }
+ wantIPs := map[string]struct{}{}
+ if wantErr == nil {
+ for _, ip := range tt.wantIPs {
+ wantIPs[ip] = struct{}{}
+ }
+ }
+ if !reflect.DeepEqual(gotIPs, wantIPs) {
+ t.Errorf("#%d (%s) strict=%v: got ips %v; want %v", i, tt.desc, strict, gotIPs, wantIPs)
+ }
+ }
+ }
+}
+
+// Issue 17448. With StrictErrors enabled, temporary errors should make
+// LookupTXT stop walking the search list.
+func TestStrictErrorsLookupTXT(t *testing.T) {
+ conf, err := newResolvConfTest()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer conf.teardown()
+
+ confData := []string{
+ "nameserver 192.0.2.53",
+ "search x.golang.org y.golang.org",
+ }
+ if err := conf.writeAndUpdate(confData); err != nil {
+ t.Fatal(err)
+ }
+
+ const name = "test"
+ const server = "192.0.2.53:53"
+ const searchX = "test.x.golang.org."
+ const searchY = "test.y.golang.org."
+ const txt = "Hello World"
+
+ fake := fakeDNSServer{func(_, s string, q *dnsMsg, deadline time.Time) (*dnsMsg, error) {
+ t.Log(s, q)
+
+ switch q.question[0].Name {
+ case searchX:
+ return nil, poll.ErrTimeout
+ case searchY:
+ return mockTXTResponse(q), nil
+ default:
+ return nil, fmt.Errorf("Unexpected Name: %v", q.question[0].Name)
+ }
+ }}
+
+ for _, strict := range []bool{true, false} {
+ r := Resolver{StrictErrors: strict, Dial: fake.DialContext}
+ _, rrs, err := r.lookup(context.Background(), name, dnsTypeTXT)
+ var wantErr error
+ var wantRRs int
+ if strict {
+ wantErr = &DNSError{
+ Err: poll.ErrTimeout.Error(),
+ Name: name,
+ Server: server,
+ IsTimeout: true,
+ }
+ } else {
+ wantRRs = 1
+ }
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Errorf("strict=%v: got err %#v; want %#v", strict, err, wantErr)
+ }
+ if len(rrs) != wantRRs {
+ t.Errorf("strict=%v: got %v; want %v", strict, len(rrs), wantRRs)
+ }
+ }
+}
diff --git a/libgo/go/net/error_posix.go b/libgo/go/net/error_posix.go
new file mode 100644
index 00000000000..dd9754c841a
--- /dev/null
+++ b/libgo/go/net/error_posix.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
+
+package net
+
+import (
+ "os"
+ "syscall"
+)
+
+// wrapSyscallError takes an error and a syscall name. If the error is
+// a syscall.Errno, it wraps it in a os.SyscallError using the syscall name.
+func wrapSyscallError(name string, err error) error {
+ if _, ok := err.(syscall.Errno); ok {
+ err = os.NewSyscallError(name, err)
+ }
+ return err
+}
diff --git a/libgo/go/net/error_test.go b/libgo/go/net/error_test.go
index c23da49fad9..9791e6fe4de 100644
--- a/libgo/go/net/error_test.go
+++ b/libgo/go/net/error_test.go
@@ -7,11 +7,13 @@ package net
import (
"context"
"fmt"
+ "internal/poll"
"io"
"io/ioutil"
"net/internal/socktest"
"os"
"runtime"
+ "strings"
"testing"
"time"
)
@@ -87,7 +89,7 @@ second:
return nil
}
switch err := nestedErr.(type) {
- case *AddrError, addrinfoErrno, *DNSError, InvalidAddrError, *ParseError, *timeoutError, UnknownNetworkError:
+ case *AddrError, addrinfoErrno, *DNSError, InvalidAddrError, *ParseError, *poll.TimeoutError, UnknownNetworkError:
return nil
case *os.SyscallError:
nestedErr = err.Err
@@ -97,7 +99,7 @@ second:
goto third
}
switch nestedErr {
- case errCanceled, errClosing, errMissingAddress, errNoSuitableAddress,
+ case errCanceled, poll.ErrNetClosing, errMissingAddress, errNoSuitableAddress,
context.DeadlineExceeded, context.Canceled:
return nil
}
@@ -213,7 +215,7 @@ func TestDialAddrError(t *testing.T) {
case "nacl", "plan9":
t.Skipf("not supported on %s", runtime.GOOS)
}
- if !supportsIPv4 || !supportsIPv6 {
+ if !supportsIPv4() || !supportsIPv6() {
t.Skip("both IPv4 and IPv6 are required")
}
@@ -432,7 +434,7 @@ second:
goto third
}
switch nestedErr {
- case errClosing, errTimeout:
+ case poll.ErrNetClosing, poll.ErrTimeout:
return nil
}
return fmt.Errorf("unexpected type on 2nd nested level: %T", nestedErr)
@@ -467,14 +469,14 @@ second:
return nil
}
switch err := nestedErr.(type) {
- case *AddrError, addrinfoErrno, *DNSError, InvalidAddrError, *ParseError, *timeoutError, UnknownNetworkError:
+ case *AddrError, addrinfoErrno, *DNSError, InvalidAddrError, *ParseError, *poll.TimeoutError, UnknownNetworkError:
return nil
case *os.SyscallError:
nestedErr = err.Err
goto third
}
switch nestedErr {
- case errCanceled, errClosing, errMissingAddress, errTimeout, ErrWriteToConnected, io.ErrUnexpectedEOF:
+ case errCanceled, poll.ErrNetClosing, errMissingAddress, poll.ErrTimeout, ErrWriteToConnected, io.ErrUnexpectedEOF:
return nil
}
return fmt.Errorf("unexpected type on 2nd nested level: %T", nestedErr)
@@ -489,11 +491,21 @@ third:
// parseCloseError parses nestedErr and reports whether it is a valid
// error value from Close functions.
// It returns nil when nestedErr is valid.
-func parseCloseError(nestedErr error) error {
+func parseCloseError(nestedErr error, isShutdown bool) error {
if nestedErr == nil {
return nil
}
+ // Because historically we have not exported the error that we
+ // return for an operation on a closed network connection,
+ // there are programs that test for the exact error string.
+ // Verify that string here so that we don't break those
+ // programs unexpectedly. See issues #4373 and #19252.
+ want := "use of closed network connection"
+ if !isShutdown && !strings.Contains(nestedErr.Error(), want) {
+ return fmt.Errorf("error string %q does not contain expected string %q", nestedErr, want)
+ }
+
switch err := nestedErr.(type) {
case *OpError:
if err := err.isValid(); err != nil {
@@ -517,7 +529,7 @@ second:
goto third
}
switch nestedErr {
- case errClosing:
+ case poll.ErrNetClosing:
return nil
}
return fmt.Errorf("unexpected type on 2nd nested level: %T", nestedErr)
@@ -547,23 +559,23 @@ func TestCloseError(t *testing.T) {
for i := 0; i < 3; i++ {
err = c.(*TCPConn).CloseRead()
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, true); perr != nil {
t.Errorf("#%d: %v", i, perr)
}
}
for i := 0; i < 3; i++ {
err = c.(*TCPConn).CloseWrite()
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, true); perr != nil {
t.Errorf("#%d: %v", i, perr)
}
}
for i := 0; i < 3; i++ {
err = c.Close()
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, false); perr != nil {
t.Errorf("#%d: %v", i, perr)
}
err = ln.Close()
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, false); perr != nil {
t.Errorf("#%d: %v", i, perr)
}
}
@@ -576,7 +588,7 @@ func TestCloseError(t *testing.T) {
for i := 0; i < 3; i++ {
err = pc.Close()
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, false); perr != nil {
t.Errorf("#%d: %v", i, perr)
}
}
@@ -613,7 +625,7 @@ second:
goto third
}
switch nestedErr {
- case errClosing, errTimeout:
+ case poll.ErrNetClosing, poll.ErrTimeout:
return nil
}
return fmt.Errorf("unexpected type on 2nd nested level: %T", nestedErr)
@@ -692,7 +704,7 @@ second:
goto third
}
switch nestedErr {
- case errClosing:
+ case poll.ErrNetClosing:
return nil
}
return fmt.Errorf("unexpected type on 2nd nested level: %T", nestedErr)
diff --git a/libgo/go/net/external_test.go b/libgo/go/net/external_test.go
index e18b547cac9..38788efc3d3 100644
--- a/libgo/go/net/external_test.go
+++ b/libgo/go/net/external_test.go
@@ -15,7 +15,7 @@ import (
func TestResolveGoogle(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
- if !supportsIPv4 || !supportsIPv6 || !*testIPv4 || !*testIPv6 {
+ if !supportsIPv4() || !supportsIPv6() || !*testIPv4 || !*testIPv6 {
t.Skip("both IPv4 and IPv6 are required")
}
@@ -62,7 +62,7 @@ var dialGoogleTests = []struct {
func TestDialGoogle(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
- if !supportsIPv4 || !supportsIPv6 || !*testIPv4 || !*testIPv6 {
+ if !supportsIPv4() || !supportsIPv6() || !*testIPv4 || !*testIPv6 {
t.Skip("both IPv4 and IPv6 are required")
}
diff --git a/libgo/go/net/fd_io_plan9.go b/libgo/go/net/fd_io_plan9.go
deleted file mode 100644
index 76da0c546cf..00000000000
--- a/libgo/go/net/fd_io_plan9.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-import (
- "os"
- "runtime"
- "sync"
- "syscall"
-)
-
-// asyncIO implements asynchronous cancelable I/O.
-// An asyncIO represents a single asynchronous Read or Write
-// operation. The result is returned on the result channel.
-// The undergoing I/O system call can either complete or be
-// interrupted by a note.
-type asyncIO struct {
- res chan result
-
- // mu guards the pid field.
- mu sync.Mutex
-
- // pid holds the process id of
- // the process running the IO operation.
- pid int
-}
-
-// result is the return value of a Read or Write operation.
-type result struct {
- n int
- err error
-}
-
-// newAsyncIO returns a new asyncIO that performs an I/O
-// operation by calling fn, which must do one and only one
-// interruptible system call.
-func newAsyncIO(fn func([]byte) (int, error), b []byte) *asyncIO {
- aio := &asyncIO{
- res: make(chan result, 0),
- }
- aio.mu.Lock()
- go func() {
- // Lock the current goroutine to its process
- // and store the pid in io so that Cancel can
- // interrupt it. We ignore the "hangup" signal,
- // so the signal does not take down the entire
- // Go runtime.
- runtime.LockOSThread()
- runtime_ignoreHangup()
- aio.pid = os.Getpid()
- aio.mu.Unlock()
-
- n, err := fn(b)
-
- aio.mu.Lock()
- aio.pid = -1
- runtime_unignoreHangup()
- aio.mu.Unlock()
-
- aio.res <- result{n, err}
- }()
- return aio
-}
-
-var hangupNote os.Signal = syscall.Note("hangup")
-
-// Cancel interrupts the I/O operation, causing
-// the Wait function to return.
-func (aio *asyncIO) Cancel() {
- aio.mu.Lock()
- defer aio.mu.Unlock()
- if aio.pid == -1 {
- return
- }
- proc, err := os.FindProcess(aio.pid)
- if err != nil {
- return
- }
- proc.Signal(hangupNote)
-}
-
-// Wait for the I/O operation to complete.
-func (aio *asyncIO) Wait() (int, error) {
- res := <-aio.res
- return res.n, res.err
-}
-
-// The following functions, provided by the runtime, are used to
-// ignore and unignore the "hangup" signal received by the process.
-func runtime_ignoreHangup()
-func runtime_unignoreHangup()
diff --git a/libgo/go/net/fd_mutex.go b/libgo/go/net/fd_mutex.go
deleted file mode 100644
index 4591fd1cac8..00000000000
--- a/libgo/go/net/fd_mutex.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-import "sync/atomic"
-
-// fdMutex is a specialized synchronization primitive that manages
-// lifetime of an fd and serializes access to Read, Write and Close
-// methods on netFD.
-type fdMutex struct {
- state uint64
- rsema uint32
- wsema uint32
-}
-
-// fdMutex.state is organized as follows:
-// 1 bit - whether netFD is closed, if set all subsequent lock operations will fail.
-// 1 bit - lock for read operations.
-// 1 bit - lock for write operations.
-// 20 bits - total number of references (read+write+misc).
-// 20 bits - number of outstanding read waiters.
-// 20 bits - number of outstanding write waiters.
-const (
- mutexClosed = 1 << 0
- mutexRLock = 1 << 1
- mutexWLock = 1 << 2
- mutexRef = 1 << 3
- mutexRefMask = (1<<20 - 1) << 3
- mutexRWait = 1 << 23
- mutexRMask = (1<<20 - 1) << 23
- mutexWWait = 1 << 43
- mutexWMask = (1<<20 - 1) << 43
-)
-
-// Read operations must do rwlock(true)/rwunlock(true).
-//
-// Write operations must do rwlock(false)/rwunlock(false).
-//
-// Misc operations must do incref/decref.
-// Misc operations include functions like setsockopt and setDeadline.
-// They need to use incref/decref to ensure that they operate on the
-// correct fd in presence of a concurrent close call (otherwise fd can
-// be closed under their feet).
-//
-// Close operations must do increfAndClose/decref.
-
-// incref adds a reference to mu.
-// It reports whether mu is available for reading or writing.
-func (mu *fdMutex) incref() bool {
- for {
- old := atomic.LoadUint64(&mu.state)
- if old&mutexClosed != 0 {
- return false
- }
- new := old + mutexRef
- if new&mutexRefMask == 0 {
- panic("net: inconsistent fdMutex")
- }
- if atomic.CompareAndSwapUint64(&mu.state, old, new) {
- return true
- }
- }
-}
-
-// increfAndClose sets the state of mu to closed.
-// It reports whether there is no remaining reference.
-func (mu *fdMutex) increfAndClose() bool {
- for {
- old := atomic.LoadUint64(&mu.state)
- if old&mutexClosed != 0 {
- return false
- }
- // Mark as closed and acquire a reference.
- new := (old | mutexClosed) + mutexRef
- if new&mutexRefMask == 0 {
- panic("net: inconsistent fdMutex")
- }
- // Remove all read and write waiters.
- new &^= mutexRMask | mutexWMask
- if atomic.CompareAndSwapUint64(&mu.state, old, new) {
- // Wake all read and write waiters,
- // they will observe closed flag after wakeup.
- for old&mutexRMask != 0 {
- old -= mutexRWait
- runtime_Semrelease(&mu.rsema)
- }
- for old&mutexWMask != 0 {
- old -= mutexWWait
- runtime_Semrelease(&mu.wsema)
- }
- return true
- }
- }
-}
-
-// decref removes a reference from mu.
-// It reports whether there is no remaining reference.
-func (mu *fdMutex) decref() bool {
- for {
- old := atomic.LoadUint64(&mu.state)
- if old&mutexRefMask == 0 {
- panic("net: inconsistent fdMutex")
- }
- new := old - mutexRef
- if atomic.CompareAndSwapUint64(&mu.state, old, new) {
- return new&(mutexClosed|mutexRefMask) == mutexClosed
- }
- }
-}
-
-// lock adds a reference to mu and locks mu.
-// It reports whether mu is available for reading or writing.
-func (mu *fdMutex) rwlock(read bool) bool {
- var mutexBit, mutexWait, mutexMask uint64
- var mutexSema *uint32
- if read {
- mutexBit = mutexRLock
- mutexWait = mutexRWait
- mutexMask = mutexRMask
- mutexSema = &mu.rsema
- } else {
- mutexBit = mutexWLock
- mutexWait = mutexWWait
- mutexMask = mutexWMask
- mutexSema = &mu.wsema
- }
- for {
- old := atomic.LoadUint64(&mu.state)
- if old&mutexClosed != 0 {
- return false
- }
- var new uint64
- if old&mutexBit == 0 {
- // Lock is free, acquire it.
- new = (old | mutexBit) + mutexRef
- if new&mutexRefMask == 0 {
- panic("net: inconsistent fdMutex")
- }
- } else {
- // Wait for lock.
- new = old + mutexWait
- if new&mutexMask == 0 {
- panic("net: inconsistent fdMutex")
- }
- }
- if atomic.CompareAndSwapUint64(&mu.state, old, new) {
- if old&mutexBit == 0 {
- return true
- }
- runtime_Semacquire(mutexSema)
- // The signaller has subtracted mutexWait.
- }
- }
-}
-
-// unlock removes a reference from mu and unlocks mu.
-// It reports whether there is no remaining reference.
-func (mu *fdMutex) rwunlock(read bool) bool {
- var mutexBit, mutexWait, mutexMask uint64
- var mutexSema *uint32
- if read {
- mutexBit = mutexRLock
- mutexWait = mutexRWait
- mutexMask = mutexRMask
- mutexSema = &mu.rsema
- } else {
- mutexBit = mutexWLock
- mutexWait = mutexWWait
- mutexMask = mutexWMask
- mutexSema = &mu.wsema
- }
- for {
- old := atomic.LoadUint64(&mu.state)
- if old&mutexBit == 0 || old&mutexRefMask == 0 {
- panic("net: inconsistent fdMutex")
- }
- // Drop lock, drop reference and wake read waiter if present.
- new := (old &^ mutexBit) - mutexRef
- if old&mutexMask != 0 {
- new -= mutexWait
- }
- if atomic.CompareAndSwapUint64(&mu.state, old, new) {
- if old&mutexMask != 0 {
- runtime_Semrelease(mutexSema)
- }
- return new&(mutexClosed|mutexRefMask) == mutexClosed
- }
- }
-}
-
-// Implemented in runtime package.
-func runtime_Semacquire(sema *uint32)
-func runtime_Semrelease(sema *uint32)
-
-// incref adds a reference to fd.
-// It returns an error when fd cannot be used.
-func (fd *netFD) incref() error {
- if !fd.fdmu.incref() {
- return errClosing
- }
- return nil
-}
-
-// decref removes a reference from fd.
-// It also closes fd when the state of fd is set to closed and there
-// is no remaining reference.
-func (fd *netFD) decref() {
- if fd.fdmu.decref() {
- fd.destroy()
- }
-}
-
-// readLock adds a reference to fd and locks fd for reading.
-// It returns an error when fd cannot be used for reading.
-func (fd *netFD) readLock() error {
- if !fd.fdmu.rwlock(true) {
- return errClosing
- }
- return nil
-}
-
-// readUnlock removes a reference from fd and unlocks fd for reading.
-// It also closes fd when the state of fd is set to closed and there
-// is no remaining reference.
-func (fd *netFD) readUnlock() {
- if fd.fdmu.rwunlock(true) {
- fd.destroy()
- }
-}
-
-// writeLock adds a reference to fd and locks fd for writing.
-// It returns an error when fd cannot be used for writing.
-func (fd *netFD) writeLock() error {
- if !fd.fdmu.rwlock(false) {
- return errClosing
- }
- return nil
-}
-
-// writeUnlock removes a reference from fd and unlocks fd for writing.
-// It also closes fd when the state of fd is set to closed and there
-// is no remaining reference.
-func (fd *netFD) writeUnlock() {
- if fd.fdmu.rwunlock(false) {
- fd.destroy()
- }
-}
diff --git a/libgo/go/net/fd_mutex_test.go b/libgo/go/net/fd_mutex_test.go
deleted file mode 100644
index 3542c70f9d1..00000000000
--- a/libgo/go/net/fd_mutex_test.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-import (
- "math/rand"
- "runtime"
- "testing"
- "time"
-)
-
-func TestMutexLock(t *testing.T) {
- var mu fdMutex
-
- if !mu.incref() {
- t.Fatal("broken")
- }
- if mu.decref() {
- t.Fatal("broken")
- }
-
- if !mu.rwlock(true) {
- t.Fatal("broken")
- }
- if mu.rwunlock(true) {
- t.Fatal("broken")
- }
-
- if !mu.rwlock(false) {
- t.Fatal("broken")
- }
- if mu.rwunlock(false) {
- t.Fatal("broken")
- }
-}
-
-func TestMutexClose(t *testing.T) {
- var mu fdMutex
- if !mu.increfAndClose() {
- t.Fatal("broken")
- }
-
- if mu.incref() {
- t.Fatal("broken")
- }
- if mu.rwlock(true) {
- t.Fatal("broken")
- }
- if mu.rwlock(false) {
- t.Fatal("broken")
- }
- if mu.increfAndClose() {
- t.Fatal("broken")
- }
-}
-
-func TestMutexCloseUnblock(t *testing.T) {
- c := make(chan bool)
- var mu fdMutex
- mu.rwlock(true)
- for i := 0; i < 4; i++ {
- go func() {
- if mu.rwlock(true) {
- t.Error("broken")
- return
- }
- c <- true
- }()
- }
- // Concurrent goroutines must not be able to read lock the mutex.
- time.Sleep(time.Millisecond)
- select {
- case <-c:
- t.Fatal("broken")
- default:
- }
- mu.increfAndClose() // Must unblock the readers.
- for i := 0; i < 4; i++ {
- select {
- case <-c:
- case <-time.After(10 * time.Second):
- t.Fatal("broken")
- }
- }
- if mu.decref() {
- t.Fatal("broken")
- }
- if !mu.rwunlock(true) {
- t.Fatal("broken")
- }
-}
-
-func TestMutexPanic(t *testing.T) {
- ensurePanics := func(f func()) {
- defer func() {
- if recover() == nil {
- t.Fatal("does not panic")
- }
- }()
- f()
- }
-
- var mu fdMutex
- ensurePanics(func() { mu.decref() })
- ensurePanics(func() { mu.rwunlock(true) })
- ensurePanics(func() { mu.rwunlock(false) })
-
- ensurePanics(func() { mu.incref(); mu.decref(); mu.decref() })
- ensurePanics(func() { mu.rwlock(true); mu.rwunlock(true); mu.rwunlock(true) })
- ensurePanics(func() { mu.rwlock(false); mu.rwunlock(false); mu.rwunlock(false) })
-
- // ensure that it's still not broken
- mu.incref()
- mu.decref()
- mu.rwlock(true)
- mu.rwunlock(true)
- mu.rwlock(false)
- mu.rwunlock(false)
-}
-
-func TestMutexStress(t *testing.T) {
- P := 8
- N := int(1e6)
- if testing.Short() {
- P = 4
- N = 1e4
- }
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
- done := make(chan bool)
- var mu fdMutex
- var readState [2]uint64
- var writeState [2]uint64
- for p := 0; p < P; p++ {
- go func() {
- r := rand.New(rand.NewSource(rand.Int63()))
- for i := 0; i < N; i++ {
- switch r.Intn(3) {
- case 0:
- if !mu.incref() {
- t.Error("broken")
- return
- }
- if mu.decref() {
- t.Error("broken")
- return
- }
- case 1:
- if !mu.rwlock(true) {
- t.Error("broken")
- return
- }
- // Ensure that it provides mutual exclusion for readers.
- if readState[0] != readState[1] {
- t.Error("broken")
- return
- }
- readState[0]++
- readState[1]++
- if mu.rwunlock(true) {
- t.Error("broken")
- return
- }
- case 2:
- if !mu.rwlock(false) {
- t.Error("broken")
- return
- }
- // Ensure that it provides mutual exclusion for writers.
- if writeState[0] != writeState[1] {
- t.Error("broken")
- return
- }
- writeState[0]++
- writeState[1]++
- if mu.rwunlock(false) {
- t.Error("broken")
- return
- }
- }
- }
- done <- true
- }()
- }
- for p := 0; p < P; p++ {
- <-done
- }
- if !mu.increfAndClose() {
- t.Fatal("broken")
- }
- if !mu.decref() {
- t.Fatal("broken")
- }
-}
diff --git a/libgo/go/net/fd_plan9.go b/libgo/go/net/fd_plan9.go
index 300d8c4543e..46ee5d97400 100644
--- a/libgo/go/net/fd_plan9.go
+++ b/libgo/go/net/fd_plan9.go
@@ -5,23 +5,15 @@
package net
import (
+ "internal/poll"
"io"
"os"
- "sync/atomic"
"syscall"
- "time"
)
-type atomicBool int32
-
-func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
-func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
-func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
-
// Network file descriptor.
type netFD struct {
- // locking/lifetime of sysfd + serialize access to Read and Write methods
- fdmu fdMutex
+ pfd poll.FD
// immutable until Close
net string
@@ -30,26 +22,12 @@ type netFD struct {
listen, ctl, data *os.File
laddr, raddr Addr
isStream bool
-
- // deadlines
- raio *asyncIO
- waio *asyncIO
- rtimer *time.Timer
- wtimer *time.Timer
- rtimedout atomicBool // set true when read deadline has been reached
- wtimedout atomicBool // set true when write deadline has been reached
}
-var (
- netdir string // default network
-)
-
-func sysInit() {
- netdir = "/net"
-}
+var netdir = "/net" // default network
func newFD(net, name string, listen, ctl, data *os.File, laddr, raddr Addr) (*netFD, error) {
- return &netFD{
+ ret := &netFD{
net: net,
n: name,
dir: netdir + "/" + net + "/" + name,
@@ -57,7 +35,9 @@ func newFD(net, name string, listen, ctl, data *os.File, laddr, raddr Addr) (*ne
ctl: ctl, data: data,
laddr: laddr,
raddr: raddr,
- }, nil
+ }
+ ret.pfd.Destroy = ret.destroy
+ return ret, nil
}
func (fd *netFD) init() error {
@@ -99,28 +79,10 @@ func (fd *netFD) destroy() {
}
func (fd *netFD) Read(b []byte) (n int, err error) {
- if fd.rtimedout.isSet() {
- return 0, errTimeout
- }
if !fd.ok() || fd.data == nil {
return 0, syscall.EINVAL
}
- if err := fd.readLock(); err != nil {
- return 0, err
- }
- defer fd.readUnlock()
- if len(b) == 0 {
- return 0, nil
- }
- fd.raio = newAsyncIO(fd.data.Read, b)
- n, err = fd.raio.Wait()
- fd.raio = nil
- if isHangup(err) {
- err = io.EOF
- }
- if isInterrupted(err) {
- err = errTimeout
- }
+ n, err = fd.pfd.Read(fd.data.Read, b)
if fd.net == "udp" && err == io.EOF {
n = 0
err = nil
@@ -129,23 +91,10 @@ func (fd *netFD) Read(b []byte) (n int, err error) {
}
func (fd *netFD) Write(b []byte) (n int, err error) {
- if fd.wtimedout.isSet() {
- return 0, errTimeout
- }
if !fd.ok() || fd.data == nil {
return 0, syscall.EINVAL
}
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- fd.waio = newAsyncIO(fd.data.Write, b)
- n, err = fd.waio.Wait()
- fd.waio = nil
- if isInterrupted(err) {
- err = errTimeout
- }
- return
+ return fd.pfd.Write(fd.data.Write, b)
}
func (fd *netFD) closeRead() error {
@@ -163,8 +112,8 @@ func (fd *netFD) closeWrite() error {
}
func (fd *netFD) Close() error {
- if !fd.fdmu.increfAndClose() {
- return errClosing
+ if err := fd.pfd.Close(); err != nil {
+ return err
}
if !fd.ok() {
return syscall.EINVAL
@@ -216,77 +165,6 @@ func (fd *netFD) file(f *os.File, s string) (*os.File, error) {
return os.NewFile(uintptr(dfd), s), nil
}
-func (fd *netFD) setDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'r'+'w')
-}
-
-func (fd *netFD) setReadDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'r')
-}
-
-func (fd *netFD) setWriteDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'w')
-}
-
-func setDeadlineImpl(fd *netFD, t time.Time, mode int) error {
- d := t.Sub(time.Now())
- if mode == 'r' || mode == 'r'+'w' {
- fd.rtimedout.setFalse()
- }
- if mode == 'w' || mode == 'r'+'w' {
- fd.wtimedout.setFalse()
- }
- if t.IsZero() || d < 0 {
- // Stop timer
- if mode == 'r' || mode == 'r'+'w' {
- if fd.rtimer != nil {
- fd.rtimer.Stop()
- }
- fd.rtimer = nil
- }
- if mode == 'w' || mode == 'r'+'w' {
- if fd.wtimer != nil {
- fd.wtimer.Stop()
- }
- fd.wtimer = nil
- }
- } else {
- // Interrupt I/O operation once timer has expired
- if mode == 'r' || mode == 'r'+'w' {
- fd.rtimer = time.AfterFunc(d, func() {
- fd.rtimedout.setTrue()
- if fd.raio != nil {
- fd.raio.Cancel()
- }
- })
- }
- if mode == 'w' || mode == 'r'+'w' {
- fd.wtimer = time.AfterFunc(d, func() {
- fd.wtimedout.setTrue()
- if fd.waio != nil {
- fd.waio.Cancel()
- }
- })
- }
- }
- if !t.IsZero() && d < 0 {
- // Interrupt current I/O operation
- if mode == 'r' || mode == 'r'+'w' {
- fd.rtimedout.setTrue()
- if fd.raio != nil {
- fd.raio.Cancel()
- }
- }
- if mode == 'w' || mode == 'r'+'w' {
- fd.wtimedout.setTrue()
- if fd.waio != nil {
- fd.waio.Cancel()
- }
- }
- }
- return nil
-}
-
func setReadBuffer(fd *netFD, bytes int) error {
return syscall.EPLAN9
}
@@ -294,11 +172,3 @@ func setReadBuffer(fd *netFD, bytes int) error {
func setWriteBuffer(fd *netFD, bytes int) error {
return syscall.EPLAN9
}
-
-func isHangup(err error) bool {
- return err != nil && stringsHasSuffix(err.Error(), "Hangup")
-}
-
-func isInterrupted(err error) bool {
- return err != nil && stringsHasSuffix(err.Error(), "interrupted")
-}
diff --git a/libgo/go/net/fd_poll_nacl.go b/libgo/go/net/fd_poll_nacl.go
deleted file mode 100644
index 83987602a58..00000000000
--- a/libgo/go/net/fd_poll_nacl.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-import (
- "runtime"
- "syscall"
- "time"
-)
-
-type pollDesc struct {
- fd *netFD
- closing bool
-}
-
-func (pd *pollDesc) init(fd *netFD) error { pd.fd = fd; return nil }
-
-func (pd *pollDesc) close() {}
-
-func (pd *pollDesc) evict() {
- pd.closing = true
- if pd.fd != nil {
- syscall.StopIO(pd.fd.sysfd)
- runtime.KeepAlive(pd.fd)
- }
-}
-
-func (pd *pollDesc) prepare(mode int) error {
- if pd.closing {
- return errClosing
- }
- return nil
-}
-
-func (pd *pollDesc) prepareRead() error { return pd.prepare('r') }
-
-func (pd *pollDesc) prepareWrite() error { return pd.prepare('w') }
-
-func (pd *pollDesc) wait(mode int) error {
- if pd.closing {
- return errClosing
- }
- return errTimeout
-}
-
-func (pd *pollDesc) waitRead() error { return pd.wait('r') }
-
-func (pd *pollDesc) waitWrite() error { return pd.wait('w') }
-
-func (pd *pollDesc) waitCanceled(mode int) {}
-
-func (pd *pollDesc) waitCanceledRead() {}
-
-func (pd *pollDesc) waitCanceledWrite() {}
-
-func (fd *netFD) setDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'r'+'w')
-}
-
-func (fd *netFD) setReadDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'r')
-}
-
-func (fd *netFD) setWriteDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'w')
-}
-
-func setDeadlineImpl(fd *netFD, t time.Time, mode int) error {
- d := t.UnixNano()
- if t.IsZero() {
- d = 0
- }
- if err := fd.incref(); err != nil {
- return err
- }
- switch mode {
- case 'r':
- syscall.SetReadDeadline(fd.sysfd, d)
- case 'w':
- syscall.SetWriteDeadline(fd.sysfd, d)
- case 'r' + 'w':
- syscall.SetReadDeadline(fd.sysfd, d)
- syscall.SetWriteDeadline(fd.sysfd, d)
- }
- fd.decref()
- return nil
-}
diff --git a/libgo/go/net/fd_poll_runtime.go b/libgo/go/net/fd_poll_runtime.go
deleted file mode 100644
index 4ea92cb1f87..00000000000
--- a/libgo/go/net/fd_poll_runtime.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build aix darwin dragonfly freebsd linux netbsd openbsd windows solaris
-
-package net
-
-import (
- "runtime"
- "sync"
- "syscall"
- "time"
-)
-
-// runtimeNano returns the current value of the runtime clock in nanoseconds.
-func runtimeNano() int64
-
-func runtime_pollServerInit()
-func runtime_pollOpen(fd uintptr) (uintptr, int)
-func runtime_pollClose(ctx uintptr)
-func runtime_pollWait(ctx uintptr, mode int) int
-func runtime_pollWaitCanceled(ctx uintptr, mode int) int
-func runtime_pollReset(ctx uintptr, mode int) int
-func runtime_pollSetDeadline(ctx uintptr, d int64, mode int)
-func runtime_pollUnblock(ctx uintptr)
-
-type pollDesc struct {
- runtimeCtx uintptr
-}
-
-var serverInit sync.Once
-
-func (pd *pollDesc) init(fd *netFD) error {
- serverInit.Do(runtime_pollServerInit)
- ctx, errno := runtime_pollOpen(uintptr(fd.sysfd))
- runtime.KeepAlive(fd)
- if errno != 0 {
- return syscall.Errno(errno)
- }
- pd.runtimeCtx = ctx
- return nil
-}
-
-func (pd *pollDesc) close() {
- if pd.runtimeCtx == 0 {
- return
- }
- runtime_pollClose(pd.runtimeCtx)
- pd.runtimeCtx = 0
-}
-
-// Evict evicts fd from the pending list, unblocking any I/O running on fd.
-func (pd *pollDesc) evict() {
- if pd.runtimeCtx == 0 {
- return
- }
- runtime_pollUnblock(pd.runtimeCtx)
-}
-
-func (pd *pollDesc) prepare(mode int) error {
- res := runtime_pollReset(pd.runtimeCtx, mode)
- return convertErr(res)
-}
-
-func (pd *pollDesc) prepareRead() error {
- return pd.prepare('r')
-}
-
-func (pd *pollDesc) prepareWrite() error {
- return pd.prepare('w')
-}
-
-func (pd *pollDesc) wait(mode int) error {
- res := runtime_pollWait(pd.runtimeCtx, mode)
- return convertErr(res)
-}
-
-func (pd *pollDesc) waitRead() error {
- return pd.wait('r')
-}
-
-func (pd *pollDesc) waitWrite() error {
- return pd.wait('w')
-}
-
-func (pd *pollDesc) waitCanceled(mode int) {
- runtime_pollWaitCanceled(pd.runtimeCtx, mode)
-}
-
-func (pd *pollDesc) waitCanceledRead() {
- pd.waitCanceled('r')
-}
-
-func (pd *pollDesc) waitCanceledWrite() {
- pd.waitCanceled('w')
-}
-
-func convertErr(res int) error {
- switch res {
- case 0:
- return nil
- case 1:
- return errClosing
- case 2:
- return errTimeout
- }
- println("unreachable: ", res)
- panic("unreachable")
-}
-
-func (fd *netFD) setDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'r'+'w')
-}
-
-func (fd *netFD) setReadDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'r')
-}
-
-func (fd *netFD) setWriteDeadline(t time.Time) error {
- return setDeadlineImpl(fd, t, 'w')
-}
-
-func setDeadlineImpl(fd *netFD, t time.Time, mode int) error {
- diff := int64(time.Until(t))
- d := runtimeNano() + diff
- if d <= 0 && diff > 0 {
- // If the user has a deadline in the future, but the delay calculation
- // overflows, then set the deadline to the maximum possible value.
- d = 1<<63 - 1
- }
- if t.IsZero() {
- d = 0
- }
- if err := fd.incref(); err != nil {
- return err
- }
- runtime_pollSetDeadline(fd.pd.runtimeCtx, d, mode)
- fd.decref()
- return nil
-}
diff --git a/libgo/go/net/fd_posix.go b/libgo/go/net/fd_posix.go
deleted file mode 100644
index 72304796e4d..00000000000
--- a/libgo/go/net/fd_posix.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
-
-package net
-
-import (
- "io"
- "syscall"
-)
-
-// eofError returns io.EOF when fd is available for reading end of
-// file.
-func (fd *netFD) eofError(n int, err error) error {
- if n == 0 && err == nil && fd.sotype != syscall.SOCK_DGRAM && fd.sotype != syscall.SOCK_RAW {
- return io.EOF
- }
- return err
-}
diff --git a/libgo/go/net/fd_posix_test.go b/libgo/go/net/fd_posix_test.go
deleted file mode 100644
index 85711ef1b70..00000000000
--- a/libgo/go/net/fd_posix_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
-
-package net
-
-import (
- "io"
- "syscall"
- "testing"
-)
-
-var eofErrorTests = []struct {
- n int
- err error
- fd *netFD
- expected error
-}{
- {100, nil, &netFD{sotype: syscall.SOCK_STREAM}, nil},
- {100, io.EOF, &netFD{sotype: syscall.SOCK_STREAM}, io.EOF},
- {100, errClosing, &netFD{sotype: syscall.SOCK_STREAM}, errClosing},
- {0, nil, &netFD{sotype: syscall.SOCK_STREAM}, io.EOF},
- {0, io.EOF, &netFD{sotype: syscall.SOCK_STREAM}, io.EOF},
- {0, errClosing, &netFD{sotype: syscall.SOCK_STREAM}, errClosing},
-
- {100, nil, &netFD{sotype: syscall.SOCK_DGRAM}, nil},
- {100, io.EOF, &netFD{sotype: syscall.SOCK_DGRAM}, io.EOF},
- {100, errClosing, &netFD{sotype: syscall.SOCK_DGRAM}, errClosing},
- {0, nil, &netFD{sotype: syscall.SOCK_DGRAM}, nil},
- {0, io.EOF, &netFD{sotype: syscall.SOCK_DGRAM}, io.EOF},
- {0, errClosing, &netFD{sotype: syscall.SOCK_DGRAM}, errClosing},
-
- {100, nil, &netFD{sotype: syscall.SOCK_SEQPACKET}, nil},
- {100, io.EOF, &netFD{sotype: syscall.SOCK_SEQPACKET}, io.EOF},
- {100, errClosing, &netFD{sotype: syscall.SOCK_SEQPACKET}, errClosing},
- {0, nil, &netFD{sotype: syscall.SOCK_SEQPACKET}, io.EOF},
- {0, io.EOF, &netFD{sotype: syscall.SOCK_SEQPACKET}, io.EOF},
- {0, errClosing, &netFD{sotype: syscall.SOCK_SEQPACKET}, errClosing},
-
- {100, nil, &netFD{sotype: syscall.SOCK_RAW}, nil},
- {100, io.EOF, &netFD{sotype: syscall.SOCK_RAW}, io.EOF},
- {100, errClosing, &netFD{sotype: syscall.SOCK_RAW}, errClosing},
- {0, nil, &netFD{sotype: syscall.SOCK_RAW}, nil},
- {0, io.EOF, &netFD{sotype: syscall.SOCK_RAW}, io.EOF},
- {0, errClosing, &netFD{sotype: syscall.SOCK_RAW}, errClosing},
-}
-
-func TestEOFError(t *testing.T) {
- for _, tt := range eofErrorTests {
- actual := tt.fd.eofError(tt.n, tt.err)
- if actual != tt.expected {
- t.Errorf("eofError(%v, %v, %v): expected %v, actual %v", tt.n, tt.err, tt.fd.sotype, tt.expected, actual)
- }
- }
-}
diff --git a/libgo/go/net/fd_unix.go b/libgo/go/net/fd_unix.go
index b6ee05976dd..e5afd1ae0ae 100644
--- a/libgo/go/net/fd_unix.go
+++ b/libgo/go/net/fd_unix.go
@@ -8,7 +8,7 @@ package net
import (
"context"
- "io"
+ "internal/poll"
"os"
"runtime"
"sync/atomic"
@@ -17,38 +17,33 @@ import (
// Network file descriptor.
type netFD struct {
- // locking/lifetime of sysfd + serialize access to Read and Write methods
- fdmu fdMutex
+ pfd poll.FD
// immutable until Close
- sysfd int
family int
sotype int
- isStream bool
isConnected bool
net string
laddr Addr
raddr Addr
-
- // writev cache.
- iovecs *[]syscall.Iovec
-
- // wait server
- pd pollDesc
-}
-
-func sysInit() {
}
func newFD(sysfd, family, sotype int, net string) (*netFD, error) {
- return &netFD{sysfd: sysfd, family: family, sotype: sotype, net: net, isStream: sotype == syscall.SOCK_STREAM}, nil
+ ret := &netFD{
+ pfd: poll.FD{
+ Sysfd: sysfd,
+ IsStream: sotype == syscall.SOCK_STREAM,
+ ZeroReadIsEOF: sotype != syscall.SOCK_DGRAM && sotype != syscall.SOCK_RAW,
+ },
+ family: family,
+ sotype: sotype,
+ net: net,
+ }
+ return ret, nil
}
func (fd *netFD) init() error {
- if err := fd.pd.init(fd); err != nil {
- return err
- }
- return nil
+ return fd.pfd.Init(fd.net, true)
}
func (fd *netFD) setAddr(laddr, raddr Addr) {
@@ -68,22 +63,23 @@ func (fd *netFD) name() string {
return fd.net + ":" + ls + "->" + rs
}
-func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (ret error) {
+func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (rsa syscall.Sockaddr, ret error) {
// Do not need to call fd.writeLock here,
// because fd is not yet accessible to user,
// so no concurrent operations are possible.
- switch err := connectFunc(fd.sysfd, ra); err {
+ switch err := connectFunc(fd.pfd.Sysfd, ra); err {
case syscall.EINPROGRESS, syscall.EALREADY, syscall.EINTR:
case nil, syscall.EISCONN:
select {
case <-ctx.Done():
- return mapErr(ctx.Err())
+ return nil, mapErr(ctx.Err())
default:
}
- if err := fd.init(); err != nil {
- return err
+ if err := fd.pfd.Init(fd.net, true); err != nil {
+ return nil, err
}
- return nil
+ runtime.KeepAlive(fd)
+ return nil, nil
case syscall.EINVAL:
// On Solaris we can see EINVAL if the socket has
// already been accepted and closed by the server.
@@ -91,18 +87,18 @@ func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (ret erro
// the socket will see EOF. For details and a test
// case in C see https://golang.org/issue/6828.
if runtime.GOOS == "solaris" {
- return nil
+ return nil, nil
}
fallthrough
default:
- return os.NewSyscallError("connect", err)
+ return nil, os.NewSyscallError("connect", err)
}
- if err := fd.init(); err != nil {
- return err
+ if err := fd.pfd.Init(fd.net, true); err != nil {
+ return nil, err
}
if deadline, _ := ctx.Deadline(); !deadline.IsZero() {
- fd.setWriteDeadline(deadline)
- defer fd.setWriteDeadline(noDeadline)
+ fd.pfd.SetWriteDeadline(deadline)
+ defer fd.pfd.SetWriteDeadline(noDeadline)
}
// Start the "interrupter" goroutine, if this context might be canceled.
@@ -119,7 +115,7 @@ func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (ret erro
defer func() {
close(done)
if ctxErr := <-interruptRes; ctxErr != nil && ret == nil {
- // The interrupter goroutine called setWriteDeadline,
+ // The interrupter goroutine called SetWriteDeadline,
// but the connect code below had returned from
// waitWrite already and did a successful connect (ret
// == nil). Because we've now poisoned the connection
@@ -135,7 +131,7 @@ func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (ret erro
// Force the runtime's poller to immediately give up
// waiting for writability, unblocking waitWrite
// below.
- fd.setWriteDeadline(aLongTimeAgo)
+ fd.pfd.SetWriteDeadline(aLongTimeAgo)
testHookCanceledDial()
interruptRes <- ctx.Err()
case <-done:
@@ -153,66 +149,45 @@ func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (ret erro
// SO_ERROR socket option to see if the connection
// succeeded or failed. See issue 7474 for further
// details.
- if err := fd.pd.waitWrite(); err != nil {
+ if err := fd.pfd.WaitWrite(); err != nil {
select {
case <-ctx.Done():
- return mapErr(ctx.Err())
+ return nil, mapErr(ctx.Err())
default:
}
- return err
+ return nil, err
}
- nerr, err := getsockoptIntFunc(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_ERROR)
+ nerr, err := getsockoptIntFunc(fd.pfd.Sysfd, syscall.SOL_SOCKET, syscall.SO_ERROR)
if err != nil {
- return os.NewSyscallError("getsockopt", err)
+ return nil, os.NewSyscallError("getsockopt", err)
}
switch err := syscall.Errno(nerr); err {
case syscall.EINPROGRESS, syscall.EALREADY, syscall.EINTR:
- case syscall.Errno(0), syscall.EISCONN:
- if runtime.GOOS != "darwin" {
- return nil
- }
- // See golang.org/issue/14548.
- // On Darwin, multiple connect system calls on
- // a non-blocking socket never harm SO_ERROR.
- switch err := connectFunc(fd.sysfd, ra); err {
- case nil, syscall.EISCONN:
- return nil
+ case syscall.EISCONN:
+ return nil, nil
+ case syscall.Errno(0):
+ // The runtime poller can wake us up spuriously;
+ // see issues 14548 and 19289. Check that we are
+ // really connected; if not, wait again.
+ if rsa, err := syscall.Getpeername(fd.pfd.Sysfd); err == nil {
+ return rsa, nil
}
default:
- return os.NewSyscallError("getsockopt", err)
+ return nil, os.NewSyscallError("getsockopt", err)
}
+ runtime.KeepAlive(fd)
}
}
-func (fd *netFD) destroy() {
- // Poller may want to unregister fd in readiness notification mechanism,
- // so this must be executed before closeFunc.
- fd.pd.close()
- closeFunc(fd.sysfd)
- fd.sysfd = -1
- runtime.SetFinalizer(fd, nil)
-}
-
func (fd *netFD) Close() error {
- if !fd.fdmu.increfAndClose() {
- return errClosing
- }
- // Unblock any I/O. Once it all unblocks and returns,
- // so that it cannot be referring to fd.sysfd anymore,
- // the final decref will close fd.sysfd. This should happen
- // fairly quickly, since all the I/O is non-blocking, and any
- // attempts to block in the pollDesc will return errClosing.
- fd.pd.evict()
- fd.decref()
- return nil
+ runtime.SetFinalizer(fd, nil)
+ return fd.pfd.Close()
}
func (fd *netFD) shutdown(how int) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("shutdown", syscall.Shutdown(fd.sysfd, how))
+ err := fd.pfd.Shutdown(how)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("shutdown", err)
}
func (fd *netFD) closeRead() error {
@@ -224,233 +199,59 @@ func (fd *netFD) closeWrite() error {
}
func (fd *netFD) Read(p []byte) (n int, err error) {
- if err := fd.readLock(); err != nil {
- return 0, err
- }
- defer fd.readUnlock()
- if len(p) == 0 {
- // If the caller wanted a zero byte read, return immediately
- // without trying. (But after acquiring the readLock.) Otherwise
- // syscall.Read returns 0, nil and eofError turns that into
- // io.EOF.
- // TODO(bradfitz): make it wait for readability? (Issue 15735)
- return 0, nil
- }
- if err := fd.pd.prepareRead(); err != nil {
- return 0, err
- }
- if fd.isStream && len(p) > 1<<30 {
- p = p[:1<<30]
- }
- for {
- n, err = syscall.Read(fd.sysfd, p)
- if err != nil {
- n = 0
- if err == syscall.EAGAIN {
- if err = fd.pd.waitRead(); err == nil {
- continue
- }
- }
- }
- err = fd.eofError(n, err)
- break
- }
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("read", err)
- }
- return
+ n, err = fd.pfd.Read(p)
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError("read", err)
}
func (fd *netFD) readFrom(p []byte) (n int, sa syscall.Sockaddr, err error) {
- if err := fd.readLock(); err != nil {
- return 0, nil, err
- }
- defer fd.readUnlock()
- if err := fd.pd.prepareRead(); err != nil {
- return 0, nil, err
- }
- for {
- n, sa, err = syscall.Recvfrom(fd.sysfd, p, 0)
- if err != nil {
- n = 0
- if err == syscall.EAGAIN {
- if err = fd.pd.waitRead(); err == nil {
- continue
- }
- }
- }
- err = fd.eofError(n, err)
- break
- }
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("recvfrom", err)
- }
- return
+ n, sa, err = fd.pfd.ReadFrom(p)
+ runtime.KeepAlive(fd)
+ return n, sa, wrapSyscallError("recvfrom", err)
}
func (fd *netFD) readMsg(p []byte, oob []byte) (n, oobn, flags int, sa syscall.Sockaddr, err error) {
- if err := fd.readLock(); err != nil {
- return 0, 0, 0, nil, err
- }
- defer fd.readUnlock()
- if err := fd.pd.prepareRead(); err != nil {
- return 0, 0, 0, nil, err
- }
- for {
- n, oobn, flags, sa, err = syscall.Recvmsg(fd.sysfd, p, oob, 0)
- if err != nil {
- // TODO(dfc) should n and oobn be set to 0
- if err == syscall.EAGAIN {
- if err = fd.pd.waitRead(); err == nil {
- continue
- }
- }
- }
- err = fd.eofError(n, err)
- break
- }
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("recvmsg", err)
- }
- return
+ n, oobn, flags, sa, err = fd.pfd.ReadMsg(p, oob)
+ runtime.KeepAlive(fd)
+ return n, oobn, flags, sa, wrapSyscallError("recvmsg", err)
}
func (fd *netFD) Write(p []byte) (nn int, err error) {
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(); err != nil {
- return 0, err
- }
- for {
- var n int
- max := len(p)
- if fd.isStream && max-nn > 1<<30 {
- max = nn + 1<<30
- }
- n, err = syscall.Write(fd.sysfd, p[nn:max])
- if n > 0 {
- nn += n
- }
- if nn == len(p) {
- break
- }
- if err == syscall.EAGAIN {
- if err = fd.pd.waitWrite(); err == nil {
- continue
- }
- }
- if err != nil {
- break
- }
- if n == 0 {
- err = io.ErrUnexpectedEOF
- break
- }
- }
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("write", err)
- }
- return nn, err
+ nn, err = fd.pfd.Write(p)
+ runtime.KeepAlive(fd)
+ return nn, wrapSyscallError("write", err)
}
func (fd *netFD) writeTo(p []byte, sa syscall.Sockaddr) (n int, err error) {
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(); err != nil {
- return 0, err
- }
- for {
- err = syscall.Sendto(fd.sysfd, p, 0, sa)
- if err == syscall.EAGAIN {
- if err = fd.pd.waitWrite(); err == nil {
- continue
- }
- }
- break
- }
- if err == nil {
- n = len(p)
- }
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("sendto", err)
- }
- return
+ n, err = fd.pfd.WriteTo(p, sa)
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError("sendto", err)
}
func (fd *netFD) writeMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err error) {
- if err := fd.writeLock(); err != nil {
- return 0, 0, err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(); err != nil {
- return 0, 0, err
- }
- for {
- n, err = syscall.SendmsgN(fd.sysfd, p, oob, sa, 0)
- if err == syscall.EAGAIN {
- if err = fd.pd.waitWrite(); err == nil {
- continue
- }
- }
- break
- }
- if err == nil {
- oobn = len(oob)
- }
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("sendmsg", err)
- }
- return
+ n, oobn, err = fd.pfd.WriteMsg(p, oob, sa)
+ runtime.KeepAlive(fd)
+ return n, oobn, wrapSyscallError("sendmsg", err)
}
func (fd *netFD) accept() (netfd *netFD, err error) {
- if err := fd.readLock(); err != nil {
- return nil, err
- }
- defer fd.readUnlock()
-
- var s int
- var rsa syscall.Sockaddr
- if err = fd.pd.prepareRead(); err != nil {
- return nil, err
- }
- for {
- s, rsa, err = accept(fd.sysfd)
- if err != nil {
- nerr, ok := err.(*os.SyscallError)
- if !ok {
- return nil, err
- }
- switch nerr.Err {
- case syscall.EAGAIN:
- if err = fd.pd.waitRead(); err == nil {
- continue
- }
- case syscall.ECONNABORTED:
- // This means that a socket on the
- // listen queue was closed before we
- // Accept()ed it; it's a silly error,
- // so try again.
- continue
- }
- return nil, err
+ d, rsa, errcall, err := fd.pfd.Accept()
+ if err != nil {
+ if errcall != "" {
+ err = wrapSyscallError(errcall, err)
}
- break
+ return nil, err
}
- if netfd, err = newFD(s, fd.family, fd.sotype, fd.net); err != nil {
- closeFunc(s)
+ if netfd, err = newFD(d, fd.family, fd.sotype, fd.net); err != nil {
+ poll.CloseFunc(d)
return nil, err
}
if err = netfd.init(); err != nil {
fd.Close()
return nil, err
}
- lsa, _ := syscall.Getsockname(netfd.sysfd)
+ lsa, _ := syscall.Getsockname(netfd.pfd.Sysfd)
netfd.setAddr(netfd.addrFunc()(lsa), netfd.addrFunc()(rsa))
return netfd, nil
}
@@ -511,7 +312,7 @@ func dupCloseOnExecOld(fd int) (newfd int, err error) {
}
func (fd *netFD) dup() (f *os.File, err error) {
- ns, err := dupCloseOnExec(fd.sysfd)
+ ns, err := dupCloseOnExec(fd.pfd.Sysfd)
if err != nil {
return nil, err
}
diff --git a/libgo/go/net/fd_windows.go b/libgo/go/net/fd_windows.go
index a976f2ac7f9..c2156b255e5 100644
--- a/libgo/go/net/fd_windows.go
+++ b/libgo/go/net/fd_windows.go
@@ -6,64 +6,13 @@ package net
import (
"context"
- "internal/race"
+ "internal/poll"
"os"
"runtime"
- "sync"
"syscall"
"unsafe"
)
-var (
- initErr error
- ioSync uint64
-)
-
-// CancelIo Windows API cancels all outstanding IO for a particular
-// socket on current thread. To overcome that limitation, we run
-// special goroutine, locked to OS single thread, that both starts
-// and cancels IO. It means, there are 2 unavoidable thread switches
-// for every IO.
-// Some newer versions of Windows has new CancelIoEx API, that does
-// not have that limitation and can be used from any thread. This
-// package uses CancelIoEx API, if present, otherwise it fallback
-// to CancelIo.
-
-var (
- canCancelIO bool // determines if CancelIoEx API is present
- skipSyncNotif bool
- hasLoadSetFileCompletionNotificationModes bool
-)
-
-func sysInit() {
- var d syscall.WSAData
- e := syscall.WSAStartup(uint32(0x202), &d)
- if e != nil {
- initErr = os.NewSyscallError("wsastartup", e)
- }
- canCancelIO = syscall.LoadCancelIoEx() == nil
- hasLoadSetFileCompletionNotificationModes = syscall.LoadSetFileCompletionNotificationModes() == nil
- if hasLoadSetFileCompletionNotificationModes {
- // It's not safe to use FILE_SKIP_COMPLETION_PORT_ON_SUCCESS if non IFS providers are installed:
- // http://support.microsoft.com/kb/2568167
- skipSyncNotif = true
- protos := [2]int32{syscall.IPPROTO_TCP, 0}
- var buf [32]syscall.WSAProtocolInfo
- len := uint32(unsafe.Sizeof(buf))
- n, err := syscall.WSAEnumProtocols(&protos[0], &buf[0], &len)
- if err != nil {
- skipSyncNotif = false
- } else {
- for i := int32(0); i < n; i++ {
- if buf[i].ServiceFlags1&syscall.XP1_IFS_HANDLES == 0 {
- skipSyncNotif = false
- break
- }
- }
- }
- }
-}
-
// canUseConnectEx reports whether we can use the ConnectEx Windows API call
// for the given network type.
func canUseConnectEx(net string) bool {
@@ -75,257 +24,39 @@ func canUseConnectEx(net string) bool {
return false
}
-// operation contains superset of data necessary to perform all async IO.
-type operation struct {
- // Used by IOCP interface, it must be first field
- // of the struct, as our code rely on it.
- o syscall.Overlapped
-
- // fields used by runtime.netpoll
- runtimeCtx uintptr
- mode int32
- errno int32
- qty uint32
-
- // fields used only by net package
- fd *netFD
- errc chan error
- buf syscall.WSABuf
- sa syscall.Sockaddr
- rsa *syscall.RawSockaddrAny
- rsan int32
- handle syscall.Handle
- flags uint32
- bufs []syscall.WSABuf
-}
-
-func (o *operation) InitBuf(buf []byte) {
- o.buf.Len = uint32(len(buf))
- o.buf.Buf = nil
- if len(buf) != 0 {
- o.buf.Buf = &buf[0]
- }
-}
-
-func (o *operation) InitBufs(buf *Buffers) {
- if o.bufs == nil {
- o.bufs = make([]syscall.WSABuf, 0, len(*buf))
- } else {
- o.bufs = o.bufs[:0]
- }
- for _, b := range *buf {
- var p *byte
- if len(b) > 0 {
- p = &b[0]
- }
- o.bufs = append(o.bufs, syscall.WSABuf{Len: uint32(len(b)), Buf: p})
- }
-}
-
-// ClearBufs clears all pointers to Buffers parameter captured
-// by InitBufs, so it can be released by garbage collector.
-func (o *operation) ClearBufs() {
- for i := range o.bufs {
- o.bufs[i].Buf = nil
- }
- o.bufs = o.bufs[:0]
-}
-
-// ioSrv executes net IO requests.
-type ioSrv struct {
- req chan ioSrvReq
-}
-
-type ioSrvReq struct {
- o *operation
- submit func(o *operation) error // if nil, cancel the operation
-}
-
-// ProcessRemoteIO will execute submit IO requests on behalf
-// of other goroutines, all on a single os thread, so it can
-// cancel them later. Results of all operations will be sent
-// back to their requesters via channel supplied in request.
-// It is used only when the CancelIoEx API is unavailable.
-func (s *ioSrv) ProcessRemoteIO() {
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- for r := range s.req {
- if r.submit != nil {
- r.o.errc <- r.submit(r.o)
- } else {
- r.o.errc <- syscall.CancelIo(r.o.fd.sysfd)
- }
- }
-}
-
-// ExecIO executes a single IO operation o. It submits and cancels
-// IO in the current thread for systems where Windows CancelIoEx API
-// is available. Alternatively, it passes the request onto
-// runtime netpoll and waits for completion or cancels request.
-func (s *ioSrv) ExecIO(o *operation, name string, submit func(o *operation) error) (int, error) {
- fd := o.fd
- // Notify runtime netpoll about starting IO.
- err := fd.pd.prepare(int(o.mode))
- if err != nil {
- return 0, err
- }
- // Start IO.
- if canCancelIO {
- err = submit(o)
- } else {
- // Send request to a special dedicated thread,
- // so it can stop the IO with CancelIO later.
- s.req <- ioSrvReq{o, submit}
- err = <-o.errc
- }
- switch err {
- case nil:
- // IO completed immediately
- if o.fd.skipSyncNotif {
- // No completion message will follow, so return immediately.
- return int(o.qty), nil
- }
- // Need to get our completion message anyway.
- case syscall.ERROR_IO_PENDING:
- // IO started, and we have to wait for its completion.
- err = nil
- default:
- return 0, err
- }
- // Wait for our request to complete.
- err = fd.pd.wait(int(o.mode))
- if err == nil {
- // All is good. Extract our IO results and return.
- if o.errno != 0 {
- err = syscall.Errno(o.errno)
- return 0, err
- }
- return int(o.qty), nil
- }
- // IO is interrupted by "close" or "timeout"
- netpollErr := err
- switch netpollErr {
- case errClosing, errTimeout:
- // will deal with those.
- default:
- panic("net: unexpected runtime.netpoll error: " + netpollErr.Error())
- }
- // Cancel our request.
- if canCancelIO {
- err := syscall.CancelIoEx(fd.sysfd, &o.o)
- // Assuming ERROR_NOT_FOUND is returned, if IO is completed.
- if err != nil && err != syscall.ERROR_NOT_FOUND {
- // TODO(brainman): maybe do something else, but panic.
- panic(err)
- }
- } else {
- s.req <- ioSrvReq{o, nil}
- <-o.errc
- }
- // Wait for cancelation to complete.
- fd.pd.waitCanceled(int(o.mode))
- if o.errno != 0 {
- err = syscall.Errno(o.errno)
- if err == syscall.ERROR_OPERATION_ABORTED { // IO Canceled
- err = netpollErr
- }
- return 0, err
- }
- // We issued a cancelation request. But, it seems, IO operation succeeded
- // before the cancelation request run. We need to treat the IO operation as
- // succeeded (the bytes are actually sent/recv from network).
- return int(o.qty), nil
-}
-
-// Start helper goroutines.
-var rsrv, wsrv *ioSrv
-var onceStartServer sync.Once
-
-func startServer() {
- rsrv = new(ioSrv)
- wsrv = new(ioSrv)
- if !canCancelIO {
- // Only CancelIo API is available. Lets start two special goroutines
- // locked to an OS thread, that both starts and cancels IO. One will
- // process read requests, while other will do writes.
- rsrv.req = make(chan ioSrvReq)
- go rsrv.ProcessRemoteIO()
- wsrv.req = make(chan ioSrvReq)
- go wsrv.ProcessRemoteIO()
- }
-}
-
// Network file descriptor.
type netFD struct {
- // locking/lifetime of sysfd + serialize access to Read and Write methods
- fdmu fdMutex
+ pfd poll.FD
// immutable until Close
- sysfd syscall.Handle
- family int
- sotype int
- isStream bool
- isConnected bool
- skipSyncNotif bool
- net string
- laddr Addr
- raddr Addr
-
- rop operation // read operation
- wop operation // write operation
-
- // wait server
- pd pollDesc
+ family int
+ sotype int
+ isConnected bool
+ net string
+ laddr Addr
+ raddr Addr
}
func newFD(sysfd syscall.Handle, family, sotype int, net string) (*netFD, error) {
- if initErr != nil {
- return nil, initErr
+ ret := &netFD{
+ pfd: poll.FD{
+ Sysfd: sysfd,
+ IsStream: sotype == syscall.SOCK_STREAM,
+ ZeroReadIsEOF: sotype != syscall.SOCK_DGRAM && sotype != syscall.SOCK_RAW,
+ },
+ family: family,
+ sotype: sotype,
+ net: net,
}
- onceStartServer.Do(startServer)
- return &netFD{sysfd: sysfd, family: family, sotype: sotype, net: net, isStream: sotype == syscall.SOCK_STREAM}, nil
+ return ret, nil
}
func (fd *netFD) init() error {
- if err := fd.pd.init(fd); err != nil {
- return err
- }
- if hasLoadSetFileCompletionNotificationModes {
- // We do not use events, so we can skip them always.
- flags := uint8(syscall.FILE_SKIP_SET_EVENT_ON_HANDLE)
- // It's not safe to skip completion notifications for UDP:
- // http://blogs.technet.com/b/winserverperformance/archive/2008/06/26/designing-applications-for-high-performance-part-iii.aspx
- if skipSyncNotif && fd.net == "tcp" {
- flags |= syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS
- }
- err := syscall.SetFileCompletionNotificationModes(fd.sysfd, flags)
- if err == nil && flags&syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS != 0 {
- fd.skipSyncNotif = true
- }
- }
- // Disable SIO_UDP_CONNRESET behavior.
- // http://support.microsoft.com/kb/263823
- switch fd.net {
- case "udp", "udp4", "udp6":
- ret := uint32(0)
- flag := uint32(0)
- size := uint32(unsafe.Sizeof(flag))
- err := syscall.WSAIoctl(fd.sysfd, syscall.SIO_UDP_CONNRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0)
- if err != nil {
- return os.NewSyscallError("wsaioctl", err)
- }
+ errcall, err := fd.pfd.Init(fd.net)
+ if errcall != "" {
+ err = wrapSyscallError(errcall, err)
}
- fd.rop.mode = 'r'
- fd.wop.mode = 'w'
- fd.rop.fd = fd
- fd.wop.fd = fd
- fd.rop.runtimeCtx = fd.pd.runtimeCtx
- fd.wop.runtimeCtx = fd.pd.runtimeCtx
- if !canCancelIO {
- fd.rop.errc = make(chan error)
- fd.wop.errc = make(chan error)
- }
- return nil
+ return err
}
func (fd *netFD) setAddr(laddr, raddr Addr) {
@@ -334,20 +65,21 @@ func (fd *netFD) setAddr(laddr, raddr Addr) {
runtime.SetFinalizer(fd, (*netFD).Close)
}
-func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) error {
+// Always returns nil for connected peer address result.
+func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) (syscall.Sockaddr, error) {
// Do not need to call fd.writeLock here,
// because fd is not yet accessible to user,
// so no concurrent operations are possible.
if err := fd.init(); err != nil {
- return err
+ return nil, err
}
if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() {
- fd.setWriteDeadline(deadline)
- defer fd.setWriteDeadline(noDeadline)
+ fd.pfd.SetWriteDeadline(deadline)
+ defer fd.pfd.SetWriteDeadline(noDeadline)
}
if !canUseConnectEx(fd.net) {
- err := connectFunc(fd.sysfd, ra)
- return os.NewSyscallError("connect", err)
+ err := connectFunc(fd.pfd.Sysfd, ra)
+ return nil, os.NewSyscallError("connect", err)
}
// ConnectEx windows API requires an unconnected, previously bound socket.
if la == nil {
@@ -359,13 +91,10 @@ func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) error {
default:
panic("unexpected type in connect")
}
- if err := syscall.Bind(fd.sysfd, la); err != nil {
- return os.NewSyscallError("bind", err)
+ if err := syscall.Bind(fd.pfd.Sysfd, la); err != nil {
+ return nil, os.NewSyscallError("bind", err)
}
}
- // Call ConnectEx API.
- o := &fd.wop
- o.sa = ra
// Wait for the goroutine converting context.Done into a write timeout
// to exist, otherwise our caller might cancel the context and
@@ -377,59 +106,37 @@ func (fd *netFD) connect(ctx context.Context, la, ra syscall.Sockaddr) error {
case <-ctx.Done():
// Force the runtime's poller to immediately give
// up waiting for writability.
- fd.setWriteDeadline(aLongTimeAgo)
+ fd.pfd.SetWriteDeadline(aLongTimeAgo)
<-done
case <-done:
}
}()
- _, err := wsrv.ExecIO(o, "ConnectEx", func(o *operation) error {
- return connectExFunc(o.fd.sysfd, o.sa, nil, 0, nil, &o.o)
- })
- if err != nil {
+ // Call ConnectEx API.
+ if err := fd.pfd.ConnectEx(ra); err != nil {
select {
case <-ctx.Done():
- return mapErr(ctx.Err())
+ return nil, mapErr(ctx.Err())
default:
if _, ok := err.(syscall.Errno); ok {
err = os.NewSyscallError("connectex", err)
}
- return err
+ return nil, err
}
}
// Refresh socket properties.
- return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_UPDATE_CONNECT_CONTEXT, (*byte)(unsafe.Pointer(&fd.sysfd)), int32(unsafe.Sizeof(fd.sysfd))))
-}
-
-func (fd *netFD) destroy() {
- if fd.sysfd == syscall.InvalidHandle {
- return
- }
- // Poller may want to unregister fd in readiness notification mechanism,
- // so this must be executed before closeFunc.
- fd.pd.close()
- closeFunc(fd.sysfd)
- fd.sysfd = syscall.InvalidHandle
- // no need for a finalizer anymore
- runtime.SetFinalizer(fd, nil)
+ return nil, os.NewSyscallError("setsockopt", syscall.Setsockopt(fd.pfd.Sysfd, syscall.SOL_SOCKET, syscall.SO_UPDATE_CONNECT_CONTEXT, (*byte)(unsafe.Pointer(&fd.pfd.Sysfd)), int32(unsafe.Sizeof(fd.pfd.Sysfd))))
}
func (fd *netFD) Close() error {
- if !fd.fdmu.increfAndClose() {
- return errClosing
- }
- // unblock pending reader and writer
- fd.pd.evict()
- fd.decref()
- return nil
+ runtime.SetFinalizer(fd, nil)
+ return fd.pfd.Close()
}
func (fd *netFD) shutdown(how int) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return syscall.Shutdown(fd.sysfd, how)
+ err := fd.pfd.Shutdown(how)
+ runtime.KeepAlive(fd)
+ return err
}
func (fd *netFD) closeRead() error {
@@ -441,72 +148,21 @@ func (fd *netFD) closeWrite() error {
}
func (fd *netFD) Read(buf []byte) (int, error) {
- if err := fd.readLock(); err != nil {
- return 0, err
- }
- defer fd.readUnlock()
- o := &fd.rop
- o.InitBuf(buf)
- n, err := rsrv.ExecIO(o, "WSARecv", func(o *operation) error {
- return syscall.WSARecv(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil)
- })
- if race.Enabled {
- race.Acquire(unsafe.Pointer(&ioSync))
- }
- if len(buf) != 0 {
- err = fd.eofError(n, err)
- }
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("wsarecv", err)
- }
- return n, err
+ n, err := fd.pfd.Read(buf)
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError("wsarecv", err)
}
func (fd *netFD) readFrom(buf []byte) (int, syscall.Sockaddr, error) {
- if len(buf) == 0 {
- return 0, nil, nil
- }
- if err := fd.readLock(); err != nil {
- return 0, nil, err
- }
- defer fd.readUnlock()
- o := &fd.rop
- o.InitBuf(buf)
- n, err := rsrv.ExecIO(o, "WSARecvFrom", func(o *operation) error {
- if o.rsa == nil {
- o.rsa = new(syscall.RawSockaddrAny)
- }
- o.rsan = int32(unsafe.Sizeof(*o.rsa))
- return syscall.WSARecvFrom(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, o.rsa, &o.rsan, &o.o, nil)
- })
- err = fd.eofError(n, err)
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("wsarecvfrom", err)
- }
- if err != nil {
- return n, nil, err
- }
- sa, _ := o.rsa.Sockaddr()
- return n, sa, nil
+ n, sa, err := fd.pfd.ReadFrom(buf)
+ runtime.KeepAlive(fd)
+ return n, sa, wrapSyscallError("wsarecvfrom", err)
}
func (fd *netFD) Write(buf []byte) (int, error) {
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- if race.Enabled {
- race.ReleaseMerge(unsafe.Pointer(&ioSync))
- }
- o := &fd.wop
- o.InitBuf(buf)
- n, err := wsrv.ExecIO(o, "WSASend", func(o *operation) error {
- return syscall.WSASend(o.fd.sysfd, &o.buf, 1, &o.qty, 0, &o.o, nil)
- })
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("wsasend", err)
- }
- return n, err
+ n, err := fd.pfd.Write(buf)
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError("wsasend", err)
}
func (c *conn) writeBuffers(v *Buffers) (int64, error) {
@@ -515,67 +171,39 @@ func (c *conn) writeBuffers(v *Buffers) (int64, error) {
}
n, err := c.fd.writeBuffers(v)
if err != nil {
- return n, &OpError{Op: "WSASend", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ return n, &OpError{Op: "wsasend", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
}
return n, nil
}
func (fd *netFD) writeBuffers(buf *Buffers) (int64, error) {
- if len(*buf) == 0 {
- return 0, nil
- }
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- if race.Enabled {
- race.ReleaseMerge(unsafe.Pointer(&ioSync))
- }
- o := &fd.wop
- o.InitBufs(buf)
- n, err := wsrv.ExecIO(o, "WSASend", func(o *operation) error {
- return syscall.WSASend(o.fd.sysfd, &o.bufs[0], uint32(len(*buf)), &o.qty, 0, &o.o, nil)
- })
- o.ClearBufs()
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("wsasend", err)
- }
- testHookDidWritev(n)
- buf.consume(int64(n))
- return int64(n), err
+ n, err := fd.pfd.Writev((*[][]byte)(buf))
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError("wsasend", err)
}
func (fd *netFD) writeTo(buf []byte, sa syscall.Sockaddr) (int, error) {
- if len(buf) == 0 {
- return 0, nil
- }
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- o := &fd.wop
- o.InitBuf(buf)
- o.sa = sa
- n, err := wsrv.ExecIO(o, "WSASendto", func(o *operation) error {
- return syscall.WSASendto(o.fd.sysfd, &o.buf, 1, &o.qty, 0, o.sa, &o.o, nil)
- })
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("wsasendto", err)
- }
- return n, err
+ n, err := fd.pfd.WriteTo(buf, sa)
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError("wsasendto", err)
}
-func (fd *netFD) acceptOne(rawsa []syscall.RawSockaddrAny, o *operation) (*netFD, error) {
- // Get new socket.
- s, err := sysSocket(fd.family, fd.sotype, 0)
+func (fd *netFD) accept() (*netFD, error) {
+ s, rawsa, rsan, errcall, err := fd.pfd.Accept(func() (syscall.Handle, error) {
+ return sysSocket(fd.family, fd.sotype, 0)
+ })
+
if err != nil {
+ if errcall != "" {
+ err = wrapSyscallError(errcall, err)
+ }
return nil, err
}
// Associate our new socket with IOCP.
netfd, err := newFD(s, fd.family, fd.sotype, fd.net)
if err != nil {
- closeFunc(s)
+ poll.CloseFunc(s)
return nil, err
}
if err := netfd.init(); err != nil {
@@ -583,71 +211,11 @@ func (fd *netFD) acceptOne(rawsa []syscall.RawSockaddrAny, o *operation) (*netFD
return nil, err
}
- // Submit accept request.
- o.handle = s
- o.rsan = int32(unsafe.Sizeof(rawsa[0]))
- _, err = rsrv.ExecIO(o, "AcceptEx", func(o *operation) error {
- return acceptFunc(o.fd.sysfd, o.handle, (*byte)(unsafe.Pointer(&rawsa[0])), 0, uint32(o.rsan), uint32(o.rsan), &o.qty, &o.o)
- })
- if err != nil {
- netfd.Close()
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("acceptex", err)
- }
- return nil, err
- }
-
- // Inherit properties of the listening socket.
- err = syscall.Setsockopt(s, syscall.SOL_SOCKET, syscall.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&fd.sysfd)), int32(unsafe.Sizeof(fd.sysfd)))
- if err != nil {
- netfd.Close()
- return nil, os.NewSyscallError("setsockopt", err)
- }
- runtime.KeepAlive(fd)
- return netfd, nil
-}
-
-func (fd *netFD) accept() (*netFD, error) {
- if err := fd.readLock(); err != nil {
- return nil, err
- }
- defer fd.readUnlock()
-
- o := &fd.rop
- var netfd *netFD
- var err error
- var rawsa [2]syscall.RawSockaddrAny
- for {
- netfd, err = fd.acceptOne(rawsa[:], o)
- if err == nil {
- break
- }
- // Sometimes we see WSAECONNRESET and ERROR_NETNAME_DELETED is
- // returned here. These happen if connection reset is received
- // before AcceptEx could complete. These errors relate to new
- // connection, not to AcceptEx, so ignore broken connection and
- // try AcceptEx again for more connections.
- nerr, ok := err.(*os.SyscallError)
- if !ok {
- return nil, err
- }
- errno, ok := nerr.Err.(syscall.Errno)
- if !ok {
- return nil, err
- }
- switch errno {
- case syscall.ERROR_NETNAME_DELETED, syscall.WSAECONNRESET:
- // ignore these and try again
- default:
- return nil, err
- }
- }
-
// Get local and peer addr out of AcceptEx buffer.
var lrsa, rrsa *syscall.RawSockaddrAny
var llen, rlen int32
syscall.GetAcceptExSockaddrs((*byte)(unsafe.Pointer(&rawsa[0])),
- 0, uint32(o.rsan), uint32(o.rsan), &lrsa, &llen, &rrsa, &rlen)
+ 0, rsan, rsan, &lrsa, &llen, &rrsa, &rlen)
lsa, _ := lrsa.Sockaddr()
rsa, _ := rrsa.Sockaddr()
diff --git a/libgo/go/net/file_test.go b/libgo/go/net/file_test.go
index 6566ce21a1f..abf8b3a6995 100644
--- a/libgo/go/net/file_test.go
+++ b/libgo/go/net/file_test.go
@@ -90,7 +90,7 @@ func TestFileConn(t *testing.T) {
f, err = c1.File()
}
if err := c1.Close(); err != nil {
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, false); perr != nil {
t.Error(perr)
}
t.Error(err)
@@ -256,7 +256,7 @@ func TestFilePacketConn(t *testing.T) {
f, err = c1.File()
}
if err := c1.Close(); err != nil {
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, false); perr != nil {
t.Error(perr)
}
t.Error(err)
diff --git a/libgo/go/net/file_unix.go b/libgo/go/net/file_unix.go
index b47a6143e9a..3655a8930a1 100644
--- a/libgo/go/net/file_unix.go
+++ b/libgo/go/net/file_unix.go
@@ -7,6 +7,7 @@
package net
import (
+ "internal/poll"
"os"
"syscall"
)
@@ -17,7 +18,7 @@ func dupSocket(f *os.File) (int, error) {
return -1, err
}
if err := syscall.SetNonblock(s, true); err != nil {
- closeFunc(s)
+ poll.CloseFunc(s)
return -1, os.NewSyscallError("setnonblock", err)
}
return s, nil
@@ -31,7 +32,7 @@ func newFileFD(f *os.File) (*netFD, error) {
family := syscall.AF_UNSPEC
sotype, err := syscall.GetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_TYPE)
if err != nil {
- closeFunc(s)
+ poll.CloseFunc(s)
return nil, os.NewSyscallError("getsockopt", err)
}
lsa, _ := syscall.Getsockname(s)
@@ -44,12 +45,12 @@ func newFileFD(f *os.File) (*netFD, error) {
case *syscall.SockaddrUnix:
family = syscall.AF_UNIX
default:
- closeFunc(s)
+ poll.CloseFunc(s)
return nil, syscall.EPROTONOSUPPORT
}
fd, err := newFD(s, family, sotype, "")
if err != nil {
- closeFunc(s)
+ poll.CloseFunc(s)
return nil, err
}
laddr := fd.addrFunc()(lsa)
diff --git a/libgo/go/net/hook_cloexec.go b/libgo/go/net/hook_cloexec.go
deleted file mode 100644
index 870f0d78b12..00000000000
--- a/libgo/go/net/hook_cloexec.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build freebsd linux
-
-package net
-
-import "syscall"
-
-var (
- // Placeholders for socket system calls.
- accept4Func func(int, int) (int, syscall.Sockaddr, error) = syscall.Accept4
-)
diff --git a/libgo/go/net/hook_unix.go b/libgo/go/net/hook_unix.go
index b2522a2ed23..7d58d0fcf20 100644
--- a/libgo/go/net/hook_unix.go
+++ b/libgo/go/net/hook_unix.go
@@ -13,10 +13,8 @@ var (
testHookCanceledDial = func() {} // for golang.org/issue/16523
// Placeholders for socket system calls.
- socketFunc func(int, int, int) (int, error) = syscall.Socket
- closeFunc func(int) error = syscall.Close
- connectFunc func(int, syscall.Sockaddr) error = syscall.Connect
- listenFunc func(int, int) error = syscall.Listen
- acceptFunc func(int) (int, syscall.Sockaddr, error) = syscall.Accept
- getsockoptIntFunc func(int, int, int) (int, error) = syscall.GetsockoptInt
+ socketFunc func(int, int, int) (int, error) = syscall.Socket
+ connectFunc func(int, syscall.Sockaddr) error = syscall.Connect
+ listenFunc func(int, int) error = syscall.Listen
+ getsockoptIntFunc func(int, int, int) (int, error) = syscall.GetsockoptInt
)
diff --git a/libgo/go/net/hook_windows.go b/libgo/go/net/hook_windows.go
index 63ea35ab8c4..4e64dcef517 100644
--- a/libgo/go/net/hook_windows.go
+++ b/libgo/go/net/hook_windows.go
@@ -13,10 +13,7 @@ var (
testHookDialChannel = func() { time.Sleep(time.Millisecond) } // see golang.org/issue/5349
// Placeholders for socket system calls.
- socketFunc func(int, int, int) (syscall.Handle, error) = syscall.Socket
- closeFunc func(syscall.Handle) error = syscall.Closesocket
- connectFunc func(syscall.Handle, syscall.Sockaddr) error = syscall.Connect
- connectExFunc func(syscall.Handle, syscall.Sockaddr, *byte, uint32, *uint32, *syscall.Overlapped) error = syscall.ConnectEx
- listenFunc func(syscall.Handle, int) error = syscall.Listen
- acceptFunc func(syscall.Handle, syscall.Handle, *byte, uint32, uint32, uint32, *uint32, *syscall.Overlapped) error = syscall.AcceptEx
+ socketFunc func(int, int, int) (syscall.Handle, error) = syscall.Socket
+ connectFunc func(syscall.Handle, syscall.Sockaddr) error = syscall.Connect
+ listenFunc func(syscall.Handle, int) error = syscall.Listen
)
diff --git a/libgo/go/net/http/cgi/host_test.go b/libgo/go/net/http/cgi/host_test.go
index f0583729eba..133630013e5 100644
--- a/libgo/go/net/http/cgi/host_test.go
+++ b/libgo/go/net/http/cgi/host_test.go
@@ -409,7 +409,7 @@ func TestCopyError(t *testing.T) {
}
childRunning := func() bool {
- return isProcessRunning(t, pid)
+ return isProcessRunning(pid)
}
if !childRunning() {
diff --git a/libgo/go/net/http/cgi/posix_test.go b/libgo/go/net/http/cgi/posix_test.go
index 5ff9e7d5eb7..9396ce036af 100644
--- a/libgo/go/net/http/cgi/posix_test.go
+++ b/libgo/go/net/http/cgi/posix_test.go
@@ -9,10 +9,9 @@ package cgi
import (
"os"
"syscall"
- "testing"
)
-func isProcessRunning(t *testing.T, pid int) bool {
+func isProcessRunning(pid int) bool {
p, err := os.FindProcess(pid)
if err != nil {
return false
diff --git a/libgo/go/net/http/client.go b/libgo/go/net/http/client.go
index 0005538e70b..4c9084ae512 100644
--- a/libgo/go/net/http/client.go
+++ b/libgo/go/net/http/client.go
@@ -38,20 +38,20 @@ import (
// When following redirects, the Client will forward all headers set on the
// initial Request except:
//
-// * when forwarding sensitive headers like "Authorization",
-// "WWW-Authenticate", and "Cookie" to untrusted targets.
-// These headers will be ignored when following a redirect to a domain
-// that is not a subdomain match or exact match of the initial domain.
-// For example, a redirect from "foo.com" to either "foo.com" or "sub.foo.com"
-// will forward the sensitive headers, but a redirect to "bar.com" will not.
-//
-// * when forwarding the "Cookie" header with a non-nil cookie Jar.
-// Since each redirect may mutate the state of the cookie jar,
-// a redirect may possibly alter a cookie set in the initial request.
-// When forwarding the "Cookie" header, any mutated cookies will be omitted,
-// with the expectation that the Jar will insert those mutated cookies
-// with the updated values (assuming the origin matches).
-// If Jar is nil, the initial cookies are forwarded without change.
+// ⢠when forwarding sensitive headers like "Authorization",
+// "WWW-Authenticate", and "Cookie" to untrusted targets.
+// These headers will be ignored when following a redirect to a domain
+// that is not a subdomain match or exact match of the initial domain.
+// For example, a redirect from "foo.com" to either "foo.com" or "sub.foo.com"
+// will forward the sensitive headers, but a redirect to "bar.com" will not.
+//
+// ⢠when forwarding the "Cookie" header with a non-nil cookie Jar.
+// Since each redirect may mutate the state of the cookie jar,
+// a redirect may possibly alter a cookie set in the initial request.
+// When forwarding the "Cookie" header, any mutated cookies will be omitted,
+// with the expectation that the Jar will insert those mutated cookies
+// with the updated values (assuming the origin matches).
+// If Jar is nil, the initial cookies are forwarded without change.
//
type Client struct {
// Transport specifies the mechanism by which individual
@@ -494,17 +494,21 @@ func (c *Client) Do(req *Request) (*Response, error) {
}
var (
- deadline = c.deadline()
- reqs []*Request
- resp *Response
- copyHeaders = c.makeHeadersCopier(req)
+ deadline = c.deadline()
+ reqs []*Request
+ resp *Response
+ copyHeaders = c.makeHeadersCopier(req)
+ reqBodyClosed = false // have we closed the current req.Body?
// Redirect behavior:
redirectMethod string
includeBody bool
)
uerr := func(err error) error {
- req.closeBody()
+ // the body may have been closed already by c.send()
+ if !reqBodyClosed {
+ req.closeBody()
+ }
method := valueOrDefault(reqs[0].Method, "GET")
var urlStr string
if resp != nil && resp.Request != nil {
@@ -524,10 +528,12 @@ func (c *Client) Do(req *Request) (*Response, error) {
if len(reqs) > 0 {
loc := resp.Header.Get("Location")
if loc == "" {
+ resp.closeBody()
return nil, uerr(fmt.Errorf("%d response missing Location header", resp.StatusCode))
}
u, err := req.URL.Parse(loc)
if err != nil {
+ resp.closeBody()
return nil, uerr(fmt.Errorf("failed to parse Location header %q: %v", loc, err))
}
ireq := reqs[0]
@@ -542,6 +548,7 @@ func (c *Client) Do(req *Request) (*Response, error) {
if includeBody && ireq.GetBody != nil {
req.Body, err = ireq.GetBody()
if err != nil {
+ resp.closeBody()
return nil, uerr(err)
}
req.ContentLength = ireq.ContentLength
@@ -593,6 +600,8 @@ func (c *Client) Do(req *Request) (*Response, error) {
var err error
var didTimeout func() bool
if resp, didTimeout, err = c.send(req, deadline); err != nil {
+ // c.send() always closes req.Body
+ reqBodyClosed = true
if !deadline.IsZero() && didTimeout() {
err = &httpError{
err: err.Error() + " (Client.Timeout exceeded while awaiting headers)",
diff --git a/libgo/go/net/http/client_test.go b/libgo/go/net/http/client_test.go
index 4f674dd8d6c..b9a1c31e43a 100644
--- a/libgo/go/net/http/client_test.go
+++ b/libgo/go/net/http/client_test.go
@@ -10,7 +10,6 @@ import (
"bytes"
"context"
"crypto/tls"
- "crypto/x509"
"encoding/base64"
"errors"
"fmt"
@@ -73,7 +72,7 @@ func TestClient(t *testing.T) {
ts := httptest.NewServer(robotsTxtHandler)
defer ts.Close()
- c := &Client{Transport: &Transport{DisableKeepAlives: true}}
+ c := ts.Client()
r, err := c.Get(ts.URL)
var b []byte
if err == nil {
@@ -220,10 +219,7 @@ func TestClientRedirects(t *testing.T) {
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
-
- c := &Client{Transport: tr}
+ c := ts.Client()
_, err := c.Get(ts.URL)
if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g {
t.Errorf("with default client Get, expected error %q, got %q", e, g)
@@ -252,13 +248,10 @@ func TestClientRedirects(t *testing.T) {
var checkErr error
var lastVia []*Request
var lastReq *Request
- c = &Client{
- Transport: tr,
- CheckRedirect: func(req *Request, via []*Request) error {
- lastReq = req
- lastVia = via
- return checkErr
- },
+ c.CheckRedirect = func(req *Request, via []*Request) error {
+ lastReq = req
+ lastVia = via
+ return checkErr
}
res, err := c.Get(ts.URL)
if err != nil {
@@ -304,6 +297,7 @@ func TestClientRedirects(t *testing.T) {
}
}
+// Tests that Client redirects' contexts are derived from the original request's context.
func TestClientRedirectContext(t *testing.T) {
setParallel(t)
defer afterTest(t)
@@ -312,19 +306,16 @@ func TestClientRedirectContext(t *testing.T) {
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
-
ctx, cancel := context.WithCancel(context.Background())
- c := &Client{
- Transport: tr,
- CheckRedirect: func(req *Request, via []*Request) error {
- cancel()
- if len(via) > 2 {
- return errors.New("too many redirects")
- }
+ c := ts.Client()
+ c.CheckRedirect = func(req *Request, via []*Request) error {
+ cancel()
+ select {
+ case <-req.Context().Done():
return nil
- },
+ case <-time.After(5 * time.Second):
+ return errors.New("redirected request's context never expired after root request canceled")
+ }
}
req, _ := NewRequest("GET", ts.URL, nil)
req = req.WithContext(ctx)
@@ -458,11 +449,12 @@ func testRedirectsByMethod(t *testing.T, method string, table []redirectTest, wa
}))
defer ts.Close()
+ c := ts.Client()
for _, tt := range table {
content := tt.redirectBody
req, _ := NewRequest(method, ts.URL+tt.suffix, strings.NewReader(content))
req.GetBody = func() (io.ReadCloser, error) { return ioutil.NopCloser(strings.NewReader(content)), nil }
- res, err := DefaultClient.Do(req)
+ res, err := c.Do(req)
if err != nil {
t.Fatal(err)
@@ -516,17 +508,12 @@ func TestClientRedirectUseResponse(t *testing.T) {
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
-
- c := &Client{
- Transport: tr,
- CheckRedirect: func(req *Request, via []*Request) error {
- if req.Response == nil {
- t.Error("expected non-nil Request.Response")
- }
- return ErrUseLastResponse
- },
+ c := ts.Client()
+ c.CheckRedirect = func(req *Request, via []*Request) error {
+ if req.Response == nil {
+ t.Error("expected non-nil Request.Response")
+ }
+ return ErrUseLastResponse
}
res, err := c.Get(ts.URL)
if err != nil {
@@ -555,7 +542,8 @@ func TestClientRedirect308NoLocation(t *testing.T) {
w.WriteHeader(308)
}))
defer ts.Close()
- res, err := Get(ts.URL)
+ c := ts.Client()
+ res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
@@ -582,8 +570,9 @@ func TestClientRedirect308NoGetBody(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ c := ts.Client()
req.GetBody = nil // so it can't rewind.
- res, err := DefaultClient.Do(req)
+ res, err := c.Do(req)
if err != nil {
t.Fatal(err)
}
@@ -673,12 +662,8 @@ func TestRedirectCookiesJar(t *testing.T) {
var ts *httptest.Server
ts = httptest.NewServer(echoCookiesRedirectHandler)
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{
- Transport: tr,
- Jar: new(TestJar),
- }
+ c := ts.Client()
+ c.Jar = new(TestJar)
u, _ := url.Parse(ts.URL)
c.Jar.SetCookies(u, []*Cookie{expectedCookies[0]})
resp, err := c.Get(ts.URL)
@@ -722,13 +707,10 @@ func TestJarCalls(t *testing.T) {
}))
defer ts.Close()
jar := new(RecordingJar)
- c := &Client{
- Jar: jar,
- Transport: &Transport{
- Dial: func(_ string, _ string) (net.Conn, error) {
- return net.Dial("tcp", ts.Listener.Addr().String())
- },
- },
+ c := ts.Client()
+ c.Jar = jar
+ c.Transport.(*Transport).Dial = func(_ string, _ string) (net.Conn, error) {
+ return net.Dial("tcp", ts.Listener.Addr().String())
}
_, err := c.Get("http://firsthost.fake/")
if err != nil {
@@ -840,7 +822,8 @@ func TestClientWrites(t *testing.T) {
}
return c, err
}
- c := &Client{Transport: &Transport{Dial: dialer}}
+ c := ts.Client()
+ c.Transport.(*Transport).Dial = dialer
_, err := c.Get(ts.URL)
if err != nil {
@@ -873,14 +856,11 @@ func TestClientInsecureTransport(t *testing.T) {
// TODO(bradfitz): add tests for skipping hostname checks too?
// would require a new cert for testing, and probably
// redundant with these tests.
+ c := ts.Client()
for _, insecure := range []bool{true, false} {
- tr := &Transport{
- TLSClientConfig: &tls.Config{
- InsecureSkipVerify: insecure,
- },
+ c.Transport.(*Transport).TLSClientConfig = &tls.Config{
+ InsecureSkipVerify: insecure,
}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
res, err := c.Get(ts.URL)
if (err == nil) != insecure {
t.Errorf("insecure=%v: got unexpected err=%v", insecure, err)
@@ -914,22 +894,6 @@ func TestClientErrorWithRequestURI(t *testing.T) {
}
}
-func newTLSTransport(t *testing.T, ts *httptest.Server) *Transport {
- certs := x509.NewCertPool()
- for _, c := range ts.TLS.Certificates {
- roots, err := x509.ParseCertificates(c.Certificate[len(c.Certificate)-1])
- if err != nil {
- t.Fatalf("error parsing server's root cert: %v", err)
- }
- for _, root := range roots {
- certs.AddCert(root)
- }
- }
- return &Transport{
- TLSClientConfig: &tls.Config{RootCAs: certs},
- }
-}
-
func TestClientWithCorrectTLSServerName(t *testing.T) {
defer afterTest(t)
@@ -941,9 +905,8 @@ func TestClientWithCorrectTLSServerName(t *testing.T) {
}))
defer ts.Close()
- trans := newTLSTransport(t, ts)
- trans.TLSClientConfig.ServerName = serverName
- c := &Client{Transport: trans}
+ c := ts.Client()
+ c.Transport.(*Transport).TLSClientConfig.ServerName = serverName
if _, err := c.Get(ts.URL); err != nil {
t.Fatalf("expected successful TLS connection, got error: %v", err)
}
@@ -956,9 +919,8 @@ func TestClientWithIncorrectTLSServerName(t *testing.T) {
errc := make(chanWriter, 10) // but only expecting 1
ts.Config.ErrorLog = log.New(errc, "", 0)
- trans := newTLSTransport(t, ts)
- trans.TLSClientConfig.ServerName = "badserver"
- c := &Client{Transport: trans}
+ c := ts.Client()
+ c.Transport.(*Transport).TLSClientConfig.ServerName = "badserver"
_, err := c.Get(ts.URL)
if err == nil {
t.Fatalf("expected an error")
@@ -992,13 +954,12 @@ func TestTransportUsesTLSConfigServerName(t *testing.T) {
}))
defer ts.Close()
- tr := newTLSTransport(t, ts)
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
tr.TLSClientConfig.ServerName = "example.com" // one of httptest's Server cert names
tr.Dial = func(netw, addr string) (net.Conn, error) {
return net.Dial(netw, ts.Listener.Addr().String())
}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
res, err := c.Get("https://some-other-host.tld/")
if err != nil {
t.Fatal(err)
@@ -1013,13 +974,12 @@ func TestResponseSetsTLSConnectionState(t *testing.T) {
}))
defer ts.Close()
- tr := newTLSTransport(t, ts)
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
tr.TLSClientConfig.CipherSuites = []uint16{tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA}
tr.Dial = func(netw, addr string) (net.Conn, error) {
return net.Dial(netw, ts.Listener.Addr().String())
}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
res, err := c.Get("https://example.com/")
if err != nil {
t.Fatal(err)
@@ -1114,14 +1074,12 @@ func TestEmptyPasswordAuth(t *testing.T) {
}
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
req.URL.User = url.User(gopher)
+ c := ts.Client()
resp, err := c.Do(req)
if err != nil {
t.Fatal(err)
@@ -1498,21 +1456,17 @@ func TestClientCopyHeadersOnRedirect(t *testing.T) {
defer ts2.Close()
ts2URL = ts2.URL
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{
- Transport: tr,
- CheckRedirect: func(r *Request, via []*Request) error {
- want := Header{
- "User-Agent": []string{ua},
- "X-Foo": []string{xfoo},
- "Referer": []string{ts2URL},
- }
- if !reflect.DeepEqual(r.Header, want) {
- t.Errorf("CheckRedirect Request.Header = %#v; want %#v", r.Header, want)
- }
- return nil
- },
+ c := ts1.Client()
+ c.CheckRedirect = func(r *Request, via []*Request) error {
+ want := Header{
+ "User-Agent": []string{ua},
+ "X-Foo": []string{xfoo},
+ "Referer": []string{ts2URL},
+ }
+ if !reflect.DeepEqual(r.Header, want) {
+ t.Errorf("CheckRedirect Request.Header = %#v; want %#v", r.Header, want)
+ }
+ return nil
}
req, _ := NewRequest("GET", ts2.URL, nil)
@@ -1601,13 +1555,9 @@ func TestClientAltersCookiesOnRedirect(t *testing.T) {
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
jar, _ := cookiejar.New(nil)
- c := &Client{
- Transport: tr,
- Jar: jar,
- }
+ c := ts.Client()
+ c.Jar = jar
u, _ := url.Parse(ts.URL)
req, _ := NewRequest("GET", ts.URL, nil)
@@ -1725,9 +1675,7 @@ func TestClientRedirectTypes(t *testing.T) {
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
-
+ c := ts.Client()
for i, tt := range tests {
handlerc <- func(w ResponseWriter, r *Request) {
w.Header().Set("Location", ts.URL)
@@ -1740,7 +1688,6 @@ func TestClientRedirectTypes(t *testing.T) {
continue
}
- c := &Client{Transport: tr}
c.CheckRedirect = func(req *Request, via []*Request) error {
if got, want := req.Method, tt.wantMethod; got != want {
return fmt.Errorf("#%d: got next method %q; want %q", i, got, want)
@@ -1780,8 +1727,8 @@ func (b issue18239Body) Close() error {
return nil
}
-// Issue 18239: make sure the Transport doesn't retry requests with bodies.
-// (Especially if Request.GetBody is not defined.)
+// Issue 18239: make sure the Transport doesn't retry requests with bodies
+// if Request.GetBody is not defined.
func TestTransportBodyReadError(t *testing.T) {
setParallel(t)
defer afterTest(t)
@@ -1794,9 +1741,8 @@ func TestTransportBodyReadError(t *testing.T) {
w.Header().Set("X-Body-Read", fmt.Sprintf("%v, %v", n, err))
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
// Do one initial successful request to create an idle TCP connection
// for the subsequent request to reuse. (The Transport only retries
@@ -1816,6 +1762,7 @@ func TestTransportBodyReadError(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+ req = req.WithT(t)
_, err = tr.RoundTrip(req)
if err != someErr {
t.Errorf("Got error: %v; want Request.Body read error: %v", err, someErr)
diff --git a/libgo/go/net/http/clientserver_test.go b/libgo/go/net/http/clientserver_test.go
index 580115ca9c0..20feaa70ff6 100644
--- a/libgo/go/net/http/clientserver_test.go
+++ b/libgo/go/net/http/clientserver_test.go
@@ -1385,3 +1385,30 @@ func testServerUndeclaredTrailers(t *testing.T, h2 bool) {
t.Errorf("Trailer = %#v; want %#v", res.Trailer, want)
}
}
+
+func TestBadResponseAfterReadingBody(t *testing.T) {
+ defer afterTest(t)
+ cst := newClientServerTest(t, false, HandlerFunc(func(w ResponseWriter, r *Request) {
+ _, err := io.Copy(ioutil.Discard, r.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c, _, err := w.(Hijacker).Hijack()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ fmt.Fprintln(c, "some bogus crap")
+ }))
+ defer cst.close()
+
+ closes := 0
+ res, err := cst.c.Post(cst.ts.URL, "text/plain", countCloseReader{&closes, strings.NewReader("hello")})
+ if err == nil {
+ res.Body.Close()
+ t.Fatal("expected an error to be returned from Post")
+ }
+ if closes != 1 {
+ t.Errorf("closes = %d; want 1", closes)
+ }
+}
diff --git a/libgo/go/net/http/cookie.go b/libgo/go/net/http/cookie.go
index 5a67476cd42..cf522488c15 100644
--- a/libgo/go/net/http/cookie.go
+++ b/libgo/go/net/http/cookie.go
@@ -328,7 +328,7 @@ func sanitizeCookieValue(v string) string {
if len(v) == 0 {
return v
}
- if v[0] == ' ' || v[0] == ',' || v[len(v)-1] == ' ' || v[len(v)-1] == ',' {
+ if strings.IndexByte(v, ' ') >= 0 || strings.IndexByte(v, ',') >= 0 {
return `"` + v + `"`
}
return v
diff --git a/libgo/go/net/http/cookie_test.go b/libgo/go/net/http/cookie_test.go
index b3e54f8db32..9d199a3752e 100644
--- a/libgo/go/net/http/cookie_test.go
+++ b/libgo/go/net/http/cookie_test.go
@@ -69,7 +69,7 @@ var writeSetCookiesTests = []struct {
// are disallowed by RFC 6265 but are common in the wild.
{
&Cookie{Name: "special-1", Value: "a z"},
- `special-1=a z`,
+ `special-1="a z"`,
},
{
&Cookie{Name: "special-2", Value: " z"},
@@ -85,7 +85,7 @@ var writeSetCookiesTests = []struct {
},
{
&Cookie{Name: "special-5", Value: "a,z"},
- `special-5=a,z`,
+ `special-5="a,z"`,
},
{
&Cookie{Name: "special-6", Value: ",z"},
@@ -398,9 +398,12 @@ func TestCookieSanitizeValue(t *testing.T) {
{"foo\"bar", "foobar"},
{"\x00\x7e\x7f\x80", "\x7e"},
{`"withquotes"`, "withquotes"},
- {"a z", "a z"},
+ {"a z", `"a z"`},
{" z", `" z"`},
{"a ", `"a "`},
+ {"a,z", `"a,z"`},
+ {",z", `",z"`},
+ {"a,", `"a,"`},
}
for _, tt := range tests {
if got := sanitizeCookieValue(tt.in); got != tt.want {
diff --git a/libgo/go/net/http/cookiejar/jar.go b/libgo/go/net/http/cookiejar/jar.go
index f89abbcd186..ef8c35bf0a1 100644
--- a/libgo/go/net/http/cookiejar/jar.go
+++ b/libgo/go/net/http/cookiejar/jar.go
@@ -331,7 +331,7 @@ func jarKey(host string, psl PublicSuffixList) string {
var i int
if psl == nil {
i = strings.LastIndex(host, ".")
- if i == -1 {
+ if i <= 0 {
return host
}
} else {
@@ -345,6 +345,9 @@ func jarKey(host string, psl PublicSuffixList) string {
// Storing cookies under host is a safe stopgap.
return host
}
+ // Only len(suffix) is used to determine the jar key from
+ // here on, so it is okay if psl.PublicSuffix("www.buggy.psl")
+ // returns "com" as the jar key is generated from host.
}
prevDot := strings.LastIndex(host[:i-1], ".")
return host[prevDot+1:]
diff --git a/libgo/go/net/http/cookiejar/jar_test.go b/libgo/go/net/http/cookiejar/jar_test.go
index 3aa601586e3..47fb1abdaaf 100644
--- a/libgo/go/net/http/cookiejar/jar_test.go
+++ b/libgo/go/net/http/cookiejar/jar_test.go
@@ -19,6 +19,9 @@ var tNow = time.Date(2013, 1, 1, 12, 0, 0, 0, time.UTC)
// testPSL implements PublicSuffixList with just two rules: "co.uk"
// and the default rule "*".
+// The implementation has two intentional bugs:
+// PublicSuffix("www.buggy.psl") == "xy"
+// PublicSuffix("www2.buggy.psl") == "com"
type testPSL struct{}
func (testPSL) String() string {
@@ -28,6 +31,12 @@ func (testPSL) PublicSuffix(d string) string {
if d == "co.uk" || strings.HasSuffix(d, ".co.uk") {
return "co.uk"
}
+ if d == "www.buggy.psl" {
+ return "xy"
+ }
+ if d == "www2.buggy.psl" {
+ return "com"
+ }
return d[strings.LastIndex(d, ".")+1:]
}
@@ -125,6 +134,17 @@ var canonicalHostTests = map[string]string{
"[2001:4860:0:::68]:8080": "2001:4860:0:::68",
"www.bücher.de": "www.xn--bcher-kva.de",
"www.example.com.": "www.example.com",
+ // TODO: Fix canonicalHost so that all of the following malformed
+ // domain names trigger an error. (This list is not exhaustive, e.g.
+ // malformed internationalized domain names are missing.)
+ ".": "",
+ "..": ".",
+ "...": "..",
+ ".net": ".net",
+ ".net.": ".net",
+ "a..": "a.",
+ "b.a..": "b.a.",
+ "weird.stuff...": "weird.stuff..",
"[bad.unmatched.bracket:": "error",
}
@@ -133,7 +153,7 @@ func TestCanonicalHost(t *testing.T) {
got, err := canonicalHost(h)
if want == "error" {
if err == nil {
- t.Errorf("%q: got nil error, want non-nil", h)
+ t.Errorf("%q: got %q and nil error, want non-nil", h, got)
}
continue
}
@@ -176,6 +196,17 @@ var jarKeyTests = map[string]string{
"co.uk": "co.uk",
"uk": "uk",
"192.168.0.5": "192.168.0.5",
+ "www.buggy.psl": "www.buggy.psl",
+ "www2.buggy.psl": "buggy.psl",
+ // The following are actual outputs of canonicalHost for
+ // malformed inputs to canonicalHost (see above).
+ "": "",
+ ".": ".",
+ "..": ".",
+ ".net": ".net",
+ "a.": "a.",
+ "b.a.": "a.",
+ "weird.stuff..": ".",
}
func TestJarKey(t *testing.T) {
@@ -197,6 +228,15 @@ var jarKeyNilPSLTests = map[string]string{
"co.uk": "co.uk",
"uk": "uk",
"192.168.0.5": "192.168.0.5",
+ // The following are actual outputs of canonicalHost for
+ // malformed inputs to canonicalHost.
+ "": "",
+ ".": ".",
+ "..": "..",
+ ".net": ".net",
+ "a.": "a.",
+ "b.a.": "a.",
+ "weird.stuff..": "stuff..",
}
func TestJarKeyNilPSL(t *testing.T) {
@@ -1265,3 +1305,18 @@ func TestDomainHandling(t *testing.T) {
test.run(t, jar)
}
}
+
+func TestIssue19384(t *testing.T) {
+ cookies := []*http.Cookie{{Name: "name", Value: "value"}}
+ for _, host := range []string{"", ".", "..", "..."} {
+ jar, _ := New(nil)
+ u := &url.URL{Scheme: "http", Host: host, Path: "/"}
+ if got := jar.Cookies(u); len(got) != 0 {
+ t.Errorf("host %q, got %v", host, got)
+ }
+ jar.SetCookies(u, cookies)
+ if got := jar.Cookies(u); len(got) != 1 || got[0].Value != "value" {
+ t.Errorf("host %q, got %v", host, got)
+ }
+ }
+}
diff --git a/libgo/go/net/http/export_test.go b/libgo/go/net/http/export_test.go
index b61f58b2db4..2ef145e5342 100644
--- a/libgo/go/net/http/export_test.go
+++ b/libgo/go/net/http/export_test.go
@@ -8,24 +8,29 @@
package http
import (
+ "context"
"net"
"sort"
"sync"
+ "testing"
"time"
)
var (
- DefaultUserAgent = defaultUserAgent
- NewLoggingConn = newLoggingConn
- ExportAppendTime = appendTime
- ExportRefererForURL = refererForURL
- ExportServerNewConn = (*Server).newConn
- ExportCloseWriteAndWait = (*conn).closeWriteAndWait
- ExportErrRequestCanceled = errRequestCanceled
- ExportErrRequestCanceledConn = errRequestCanceledConn
- ExportServeFile = serveFile
- ExportScanETag = scanETag
- ExportHttp2ConfigureServer = http2ConfigureServer
+ DefaultUserAgent = defaultUserAgent
+ NewLoggingConn = newLoggingConn
+ ExportAppendTime = appendTime
+ ExportRefererForURL = refererForURL
+ ExportServerNewConn = (*Server).newConn
+ ExportCloseWriteAndWait = (*conn).closeWriteAndWait
+ ExportErrRequestCanceled = errRequestCanceled
+ ExportErrRequestCanceledConn = errRequestCanceledConn
+ ExportErrServerClosedIdle = errServerClosedIdle
+ ExportServeFile = serveFile
+ ExportScanETag = scanETag
+ ExportHttp2ConfigureServer = http2ConfigureServer
+ Export_shouldCopyHeaderOnRedirect = shouldCopyHeaderOnRedirect
+ Export_writeStatusLine = writeStatusLine
)
func init() {
@@ -186,8 +191,6 @@ func ExportHttp2ConfigureTransport(t *Transport) error {
return nil
}
-var Export_shouldCopyHeaderOnRedirect = shouldCopyHeaderOnRedirect
-
func (s *Server) ExportAllConnsIdle() bool {
s.mu.Lock()
defer s.mu.Unlock()
@@ -199,3 +202,7 @@ func (s *Server) ExportAllConnsIdle() bool {
}
return true
}
+
+func (r *Request) WithT(t *testing.T) *Request {
+ return r.WithContext(context.WithValue(r.Context(), tLogKey{}, t.Logf))
+}
diff --git a/libgo/go/net/http/fcgi/child.go b/libgo/go/net/http/fcgi/child.go
index 88704245db8..30a6b2ce2df 100644
--- a/libgo/go/net/http/fcgi/child.go
+++ b/libgo/go/net/http/fcgi/child.go
@@ -7,6 +7,7 @@ package fcgi
// This file implements FastCGI from the perspective of a child process.
import (
+ "context"
"errors"
"fmt"
"io"
@@ -31,6 +32,10 @@ type request struct {
keepConn bool
}
+// envVarsContextKey uniquely identifies a mapping of CGI
+// environment variables to their values in a request context
+type envVarsContextKey struct{}
+
func newRequest(reqId uint16, flags uint8) *request {
r := &request{
reqId: reqId,
@@ -259,6 +264,18 @@ func (c *child) handleRecord(rec *record) error {
}
}
+// filterOutUsedEnvVars returns a new map of env vars without the
+// variables in the given envVars map that are read for creating each http.Request
+func filterOutUsedEnvVars(envVars map[string]string) map[string]string {
+ withoutUsedEnvVars := make(map[string]string)
+ for k, v := range envVars {
+ if addFastCGIEnvToContext(k) {
+ withoutUsedEnvVars[k] = v
+ }
+ }
+ return withoutUsedEnvVars
+}
+
func (c *child) serveRequest(req *request, body io.ReadCloser) {
r := newResponse(c, req)
httpReq, err := cgi.RequestFromMap(req.params)
@@ -268,6 +285,9 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) {
c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error()))
} else {
httpReq.Body = body
+ withoutUsedEnvVars := filterOutUsedEnvVars(req.params)
+ envVarCtx := context.WithValue(httpReq.Context(), envVarsContextKey{}, withoutUsedEnvVars)
+ httpReq = httpReq.WithContext(envVarCtx)
c.handler.ServeHTTP(r, httpReq)
}
r.Close()
@@ -329,3 +349,39 @@ func Serve(l net.Listener, handler http.Handler) error {
go c.serve()
}
}
+
+// ProcessEnv returns FastCGI environment variables associated with the request r
+// for which no effort was made to be included in the request itself - the data
+// is hidden in the request's context. As an example, if REMOTE_USER is set for a
+// request, it will not be found anywhere in r, but it will be included in
+// ProcessEnv's response (via r's context).
+func ProcessEnv(r *http.Request) map[string]string {
+ env, _ := r.Context().Value(envVarsContextKey{}).(map[string]string)
+ return env
+}
+
+// addFastCGIEnvToContext reports whether to include the FastCGI environment variable s
+// in the http.Request.Context, accessible via ProcessEnv.
+func addFastCGIEnvToContext(s string) bool {
+ // Exclude things supported by net/http natively:
+ switch s {
+ case "CONTENT_LENGTH", "CONTENT_TYPE", "HTTPS",
+ "PATH_INFO", "QUERY_STRING", "REMOTE_ADDR",
+ "REMOTE_HOST", "REMOTE_PORT", "REQUEST_METHOD",
+ "REQUEST_URI", "SCRIPT_NAME", "SERVER_PROTOCOL":
+ return false
+ }
+ if strings.HasPrefix(s, "HTTP_") {
+ return false
+ }
+ // Explicitly include FastCGI-specific things.
+ // This list is redundant with the default "return true" below.
+ // Consider this documentation of the sorts of things we expect
+ // to maybe see.
+ switch s {
+ case "REMOTE_USER":
+ return true
+ }
+ // Unknown, so include it to be safe.
+ return true
+}
diff --git a/libgo/go/net/http/fcgi/fcgi.go b/libgo/go/net/http/fcgi/fcgi.go
index 5057d700981..8f3449a991a 100644
--- a/libgo/go/net/http/fcgi/fcgi.go
+++ b/libgo/go/net/http/fcgi/fcgi.go
@@ -24,7 +24,7 @@ import (
)
// recType is a record type, as defined by
-// http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S8
+// https://web.archive.org/web/20150420080736/http://www.fastcgi.com/drupal/node/6?q=node/22#S8
type recType uint8
const (
diff --git a/libgo/go/net/http/fcgi/fcgi_test.go b/libgo/go/net/http/fcgi/fcgi_test.go
index b6013bfdd51..e9d2b34023c 100644
--- a/libgo/go/net/http/fcgi/fcgi_test.go
+++ b/libgo/go/net/http/fcgi/fcgi_test.go
@@ -278,3 +278,69 @@ func TestMalformedParams(t *testing.T) {
c := newChild(rw, http.DefaultServeMux)
c.serve()
}
+
+// a series of FastCGI records that start and end a request
+var streamFullRequestStdin = bytes.Join([][]byte{
+ // set up request
+ makeRecord(typeBeginRequest, 1,
+ []byte{0, byte(roleResponder), 0, 0, 0, 0, 0, 0}),
+ // add required parameters
+ makeRecord(typeParams, 1, nameValuePair11("REQUEST_METHOD", "GET")),
+ makeRecord(typeParams, 1, nameValuePair11("SERVER_PROTOCOL", "HTTP/1.1")),
+ // set optional parameters
+ makeRecord(typeParams, 1, nameValuePair11("REMOTE_USER", "jane.doe")),
+ makeRecord(typeParams, 1, nameValuePair11("QUERY_STRING", "/foo/bar")),
+ makeRecord(typeParams, 1, nil),
+ // begin sending body of request
+ makeRecord(typeStdin, 1, []byte("0123456789abcdef")),
+ // end request
+ makeRecord(typeEndRequest, 1, nil),
+},
+ nil)
+
+var envVarTests = []struct {
+ input []byte
+ envVar string
+ expectedVal string
+ expectedFilteredOut bool
+}{
+ {
+ streamFullRequestStdin,
+ "REMOTE_USER",
+ "jane.doe",
+ false,
+ },
+ {
+ streamFullRequestStdin,
+ "QUERY_STRING",
+ "",
+ true,
+ },
+}
+
+// Test that environment variables set for a request can be
+// read by a handler. Ensures that variables not set will not
+// be exposed to a handler.
+func TestChildServeReadsEnvVars(t *testing.T) {
+ for _, tt := range envVarTests {
+ input := make([]byte, len(tt.input))
+ copy(input, tt.input)
+ rc := nopWriteCloser{bytes.NewBuffer(input)}
+ done := make(chan bool)
+ c := newChild(rc, http.HandlerFunc(func(
+ w http.ResponseWriter,
+ r *http.Request,
+ ) {
+ env := ProcessEnv(r)
+ if _, ok := env[tt.envVar]; ok && tt.expectedFilteredOut {
+ t.Errorf("Expected environment variable %s to not be set, but set to %s",
+ tt.envVar, env[tt.envVar])
+ } else if env[tt.envVar] != tt.expectedVal {
+ t.Errorf("Expected %s, got %s", tt.expectedVal, env[tt.envVar])
+ }
+ done <- true
+ }))
+ go c.serve()
+ <-done
+ }
+}
diff --git a/libgo/go/net/http/filetransport_test.go b/libgo/go/net/http/filetransport_test.go
index 6f1a537e2ed..2a2f32c7694 100644
--- a/libgo/go/net/http/filetransport_test.go
+++ b/libgo/go/net/http/filetransport_test.go
@@ -49,6 +49,7 @@ func TestFileTransport(t *testing.T) {
t.Fatalf("for %s, nil Body", urlstr)
}
slurp, err := ioutil.ReadAll(res.Body)
+ res.Body.Close()
check("ReadAll "+urlstr, err)
if string(slurp) != "Bar" {
t.Errorf("for %s, got content %q, want %q", urlstr, string(slurp), "Bar")
diff --git a/libgo/go/net/http/fs.go b/libgo/go/net/http/fs.go
index bf63bb5441f..5819334b5f4 100644
--- a/libgo/go/net/http/fs.go
+++ b/libgo/go/net/http/fs.go
@@ -30,21 +30,51 @@ import (
// value is a filename on the native file system, not a URL, so it is separated
// by filepath.Separator, which isn't necessarily '/'.
//
+// Note that Dir will allow access to files and directories starting with a
+// period, which could expose sensitive directories like a .git directory or
+// sensitive files like .htpasswd. To exclude files with a leading period,
+// remove the files/directories from the server or create a custom FileSystem
+// implementation.
+//
// An empty Dir is treated as ".".
type Dir string
+// mapDirOpenError maps the provided non-nil error from opening name
+// to a possibly better non-nil error. In particular, it turns OS-specific errors
+// about opening files in non-directories into os.ErrNotExist. See Issue 18984.
+func mapDirOpenError(originalErr error, name string) error {
+ if os.IsNotExist(originalErr) || os.IsPermission(originalErr) {
+ return originalErr
+ }
+
+ parts := strings.Split(name, string(filepath.Separator))
+ for i := range parts {
+ if parts[i] == "" {
+ continue
+ }
+ fi, err := os.Stat(strings.Join(parts[:i+1], string(filepath.Separator)))
+ if err != nil {
+ return originalErr
+ }
+ if !fi.IsDir() {
+ return os.ErrNotExist
+ }
+ }
+ return originalErr
+}
+
func (d Dir) Open(name string) (File, error) {
- if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) ||
- strings.Contains(name, "\x00") {
+ if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) {
return nil, errors.New("http: invalid character in file path")
}
dir := string(d)
if dir == "" {
dir = "."
}
- f, err := os.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
+ fullName := filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))
+ f, err := os.Open(fullName)
if err != nil {
- return nil, err
+ return nil, mapDirOpenError(err, fullName)
}
return f, nil
}
@@ -291,7 +321,7 @@ func scanETag(s string) (etag string, remain string) {
case c == '"':
return string(s[:i+1]), s[i+1:]
default:
- break
+ return "", ""
}
}
return "", ""
@@ -349,7 +379,7 @@ func checkIfMatch(w ResponseWriter, r *Request) condResult {
return condFalse
}
-func checkIfUnmodifiedSince(w ResponseWriter, r *Request, modtime time.Time) condResult {
+func checkIfUnmodifiedSince(r *Request, modtime time.Time) condResult {
ius := r.Header.Get("If-Unmodified-Since")
if ius == "" || isZeroTime(modtime) {
return condNone
@@ -394,7 +424,7 @@ func checkIfNoneMatch(w ResponseWriter, r *Request) condResult {
return condTrue
}
-func checkIfModifiedSince(w ResponseWriter, r *Request, modtime time.Time) condResult {
+func checkIfModifiedSince(r *Request, modtime time.Time) condResult {
if r.Method != "GET" && r.Method != "HEAD" {
return condNone
}
@@ -479,7 +509,7 @@ func checkPreconditions(w ResponseWriter, r *Request, modtime time.Time) (done b
// This function carefully follows RFC 7232 section 6.
ch := checkIfMatch(w, r)
if ch == condNone {
- ch = checkIfUnmodifiedSince(w, r, modtime)
+ ch = checkIfUnmodifiedSince(r, modtime)
}
if ch == condFalse {
w.WriteHeader(StatusPreconditionFailed)
@@ -495,7 +525,7 @@ func checkPreconditions(w ResponseWriter, r *Request, modtime time.Time) (done b
return true, ""
}
case condNone:
- if checkIfModifiedSince(w, r, modtime) == condFalse {
+ if checkIfModifiedSince(r, modtime) == condFalse {
writeNotModified(w)
return true, ""
}
@@ -580,7 +610,7 @@ func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirec
// Still a directory? (we didn't find an index.html file)
if d.IsDir() {
- if checkIfModifiedSince(w, r, d.ModTime()) == condFalse {
+ if checkIfModifiedSince(r, d.ModTime()) == condFalse {
writeNotModified(w)
return
}
diff --git a/libgo/go/net/http/fs_test.go b/libgo/go/net/http/fs_test.go
index bba56821156..f6eab0fcc31 100644
--- a/libgo/go/net/http/fs_test.go
+++ b/libgo/go/net/http/fs_test.go
@@ -74,6 +74,7 @@ func TestServeFile(t *testing.T) {
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
+ c := ts.Client()
var err error
@@ -91,7 +92,7 @@ func TestServeFile(t *testing.T) {
req.Method = "GET"
// straight GET
- _, body := getBody(t, "straight get", req)
+ _, body := getBody(t, "straight get", req, c)
if !bytes.Equal(body, file) {
t.Fatalf("body mismatch: got %q, want %q", body, file)
}
@@ -102,7 +103,7 @@ Cases:
if rt.r != "" {
req.Header.Set("Range", rt.r)
}
- resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req)
+ resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req, c)
if resp.StatusCode != rt.code {
t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
}
@@ -704,7 +705,8 @@ func TestDirectoryIfNotModified(t *testing.T) {
req, _ := NewRequest("GET", ts.URL, nil)
req.Header.Set("If-Modified-Since", lastMod)
- res, err = DefaultClient.Do(req)
+ c := ts.Client()
+ res, err = c.Do(req)
if err != nil {
t.Fatal(err)
}
@@ -716,7 +718,7 @@ func TestDirectoryIfNotModified(t *testing.T) {
// Advance the index.html file's modtime, but not the directory's.
indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
- res, err = DefaultClient.Do(req)
+ res, err = c.Do(req)
if err != nil {
t.Fatal(err)
}
@@ -995,7 +997,9 @@ func TestServeContent(t *testing.T) {
for k, v := range tt.reqHeader {
req.Header.Set(k, v)
}
- res, err := DefaultClient.Do(req)
+
+ c := ts.Client()
+ res, err := c.Do(req)
if err != nil {
t.Fatal(err)
}
@@ -1050,8 +1054,9 @@ func TestServeContentErrorMessages(t *testing.T) {
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
+ c := ts.Client()
for _, code := range []int{403, 404, 500} {
- res, err := DefaultClient.Get(fmt.Sprintf("%s/%d", ts.URL, code))
+ res, err := c.Get(fmt.Sprintf("%s/%d", ts.URL, code))
if err != nil {
t.Errorf("Error fetching /%d: %v", code, err)
continue
@@ -1090,8 +1095,11 @@ func TestLinuxSendfile(t *testing.T) {
// strace on the above platforms doesn't support sendfile64
// and will error out if we specify that with `-e trace='.
syscalls = "sendfile"
- case "mips64":
- t.Skip("TODO: update this test to be robust against various versions of strace on mips64. See golang.org/issue/33430")
+ }
+
+ // Attempt to run strace, and skip on failure - this test requires SYS_PTRACE.
+ if err := exec.Command("strace", "-f", "-q", "-e", "trace="+syscalls, os.Args[0], "-test.run=^$").Run(); err != nil {
+ t.Skipf("skipping; failed to run strace: %v", err)
}
var buf bytes.Buffer
@@ -1125,8 +1133,8 @@ func TestLinuxSendfile(t *testing.T) {
}
}
-func getBody(t *testing.T, testName string, req Request) (*Response, []byte) {
- r, err := DefaultClient.Do(&req)
+func getBody(t *testing.T, testName string, req Request, client *Client) (*Response, []byte) {
+ r, err := client.Do(&req)
if err != nil {
t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
}
@@ -1161,6 +1169,50 @@ func TestLinuxSendfileChild(*testing.T) {
}
}
+// Issue 18984: tests that requests for paths beyond files return not-found errors
+func TestFileServerNotDirError(t *testing.T) {
+ defer afterTest(t)
+ ts := httptest.NewServer(FileServer(Dir("testdata")))
+ defer ts.Close()
+
+ res, err := Get(ts.URL + "/index.html/not-a-file")
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+ if res.StatusCode != 404 {
+ t.Errorf("StatusCode = %v; want 404", res.StatusCode)
+ }
+
+ test := func(name string, dir Dir) {
+ t.Run(name, func(t *testing.T) {
+ _, err = dir.Open("/index.html/not-a-file")
+ if err == nil {
+ t.Fatal("err == nil; want != nil")
+ }
+ if !os.IsNotExist(err) {
+ t.Errorf("err = %v; os.IsNotExist(err) = %v; want true", err, os.IsNotExist(err))
+ }
+
+ _, err = dir.Open("/index.html/not-a-dir/not-a-file")
+ if err == nil {
+ t.Fatal("err == nil; want != nil")
+ }
+ if !os.IsNotExist(err) {
+ t.Errorf("err = %v; os.IsNotExist(err) = %v; want true", err, os.IsNotExist(err))
+ }
+ })
+ }
+
+ absPath, err := filepath.Abs("testdata")
+ if err != nil {
+ t.Fatal("get abs path:", err)
+ }
+
+ test("RelativePath", Dir("testdata"))
+ test("AbsolutePath", Dir(absPath))
+}
+
func TestFileServerCleanPath(t *testing.T) {
tests := []struct {
path string
@@ -1210,10 +1262,10 @@ func Test_scanETag(t *testing.T) {
{`"etag-2"`, `"etag-2"`, ""},
{`"etag-1", "etag-2"`, `"etag-1"`, `, "etag-2"`},
{"", "", ""},
- {"", "", ""},
{"W/", "", ""},
{`W/"truc`, "", ""},
{`w/"case-sensitive"`, "", ""},
+ {`"spaced etag"`, "", ""},
}
for _, test := range tests {
etag, remain := ExportScanETag(test.in)
diff --git a/libgo/go/net/http/h2_bundle.go b/libgo/go/net/http/h2_bundle.go
index 6fbbcd0fc76..373f55098a3 100644
--- a/libgo/go/net/http/h2_bundle.go
+++ b/libgo/go/net/http/h2_bundle.go
@@ -48,6 +48,642 @@ import (
"golang_org/x/net/lex/httplex"
)
+// A list of the possible cipher suite ids. Taken from
+// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt
+
+const (
+ http2cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
+ http2cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
+ http2cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
+ http2cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
+ http2cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
+ http2cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
+ http2cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
+ http2cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
+ http2cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
+ http2cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
+ http2cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
+ http2cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
+ http2cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
+ http2cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
+ http2cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
+ http2cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
+ http2cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
+ http2cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
+ http2cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
+ http2cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
+ http2cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
+ http2cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
+ http2cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
+ http2cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
+ http2cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
+ http2cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
+ http2cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
+ http2cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
+ // Reserved uint16 = 0x001C-1D
+ http2cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
+ http2cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
+ http2cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
+ http2cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
+ http2cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
+ http2cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
+ // Reserved uint16 = 0x0047-4F
+ // Reserved uint16 = 0x0050-58
+ // Reserved uint16 = 0x0059-5C
+ // Unassigned uint16 = 0x005D-5F
+ // Reserved uint16 = 0x0060-66
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
+ // Unassigned uint16 = 0x006E-83
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
+ http2cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
+ http2cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
+ http2cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
+ http2cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
+ http2cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
+ http2cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
+ http2cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
+ http2cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
+ http2cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
+ http2cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
+ http2cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
+ http2cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
+ http2cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
+ http2cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
+ http2cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
+ http2cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
+ http2cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
+ http2cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
+ http2cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
+ http2cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
+ http2cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
+ http2cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
+ http2cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
+ http2cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
+ // Unassigned uint16 = 0x00C6-FE
+ http2cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
+ // Unassigned uint16 = 0x01-55,*
+ http2cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
+ // Unassigned uint16 = 0x5601 - 0xC000
+ http2cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
+ http2cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
+ http2cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
+ http2cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
+ http2cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
+ http2cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
+ http2cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
+ http2cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
+ http2cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
+ http2cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
+ http2cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
+ http2cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
+ http2cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
+ http2cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
+ http2cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
+ http2cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
+ http2cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
+ http2cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
+ http2cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
+ http2cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
+ http2cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
+ http2cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
+ http2cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
+ http2cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
+ http2cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
+ http2cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
+ http2cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
+ http2cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
+ http2cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
+ http2cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
+ http2cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
+ http2cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
+ http2cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
+ http2cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
+ http2cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
+ http2cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
+ http2cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
+ http2cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
+ http2cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
+ http2cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
+ http2cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
+ http2cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
+ // Unassigned uint16 = 0xC0B0-FF
+ // Unassigned uint16 = 0xC1-CB,*
+ // Unassigned uint16 = 0xCC00-A7
+ http2cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
+ http2cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
+ http2cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
+ http2cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
+ http2cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
+ http2cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
+)
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+// References:
+// https://tools.ietf.org/html/rfc7540#appendix-A
+// Reject cipher suites from Appendix A.
+// "This list includes those cipher suites that do not
+// offer an ephemeral key exchange and those that are
+// based on the TLS null, stream or block cipher type"
+func http2isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case http2cipher_TLS_NULL_WITH_NULL_NULL,
+ http2cipher_TLS_RSA_WITH_NULL_MD5,
+ http2cipher_TLS_RSA_WITH_NULL_SHA,
+ http2cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
+ http2cipher_TLS_RSA_WITH_RC4_128_MD5,
+ http2cipher_TLS_RSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
+ http2cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
+ http2cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_DES_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
+ http2cipher_TLS_DH_anon_WITH_RC4_128_MD5,
+ http2cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_DES_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_RC4_128_SHA,
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
+ http2cipher_TLS_KRB5_WITH_DES_CBC_MD5,
+ http2cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
+ http2cipher_TLS_KRB5_WITH_RC4_128_MD5,
+ http2cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
+ http2cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
+ http2cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
+ http2cipher_TLS_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_NULL_SHA256,
+ http2cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
+ http2cipher_TLS_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
+ http2cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
+ http2cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
+ http2cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_NULL_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
+ http2cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ http2cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ http2cipher_TLS_RSA_WITH_AES_128_CCM,
+ http2cipher_TLS_RSA_WITH_AES_256_CCM,
+ http2cipher_TLS_RSA_WITH_AES_128_CCM_8,
+ http2cipher_TLS_RSA_WITH_AES_256_CCM_8,
+ http2cipher_TLS_PSK_WITH_AES_128_CCM,
+ http2cipher_TLS_PSK_WITH_AES_256_CCM,
+ http2cipher_TLS_PSK_WITH_AES_128_CCM_8,
+ http2cipher_TLS_PSK_WITH_AES_256_CCM_8:
+ return true
+ default:
+ return false
+ }
+}
+
// ClientConnPool manages a pool of HTTP/2 client connections.
type http2ClientConnPool interface {
GetClientConn(req *Request, addr string) (*http2ClientConn, error)
@@ -126,7 +762,7 @@ type http2dialCall struct {
// requires p.mu is held.
func (p *http2clientConnPool) getStartDialLocked(addr string) *http2dialCall {
if call, ok := p.dialing[addr]; ok {
-
+ // A dial is already in-flight. Don't start another.
return call
}
call := &http2dialCall{p: p, done: make(chan struct{})}
@@ -254,7 +890,12 @@ func (p *http2clientConnPool) MarkDead(cc *http2ClientConn) {
func (p *http2clientConnPool) closeIdleConnections() {
p.mu.Lock()
defer p.mu.Unlock()
-
+ // TODO: don't close a cc if it was just added to the pool
+ // milliseconds ago and has never been used. There's currently
+ // a small race window with the HTTP/1 Transport's integration
+ // where it can add an idle conn just before using it, and
+ // somebody else can concurrently call CloseIdleConns and
+ // break some caller's RoundTrip.
for _, vv := range p.conns {
for _, cc := range vv {
cc.closeIfIdle()
@@ -269,7 +910,8 @@ func http2filterOutClientConn(in []*http2ClientConn, exclude *http2ClientConn) [
out = append(out, v)
}
}
-
+ // If we filtered it out, zero out the last item to prevent
+ // the GC from seeing it.
if len(in) != len(out) {
in[len(in)-1] = nil
}
@@ -277,7 +919,7 @@ func http2filterOutClientConn(in []*http2ClientConn, exclude *http2ClientConn) [
}
// noDialClientConnPool is an implementation of http2.ClientConnPool
-// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
// connection instead.
type http2noDialClientConnPool struct{ *http2clientConnPool }
@@ -310,7 +952,10 @@ func http2configureTransport(t1 *Transport) (*http2Transport, error) {
go c.Close()
return http2erringRoundTripper{err}
} else if !used {
-
+ // Turns out we don't need this c.
+ // For example, two goroutines made requests to the same host
+ // at the same time, both kicking off TCP dials. (since protocol
+ // was unknown)
go c.Close()
}
return t2
@@ -326,7 +971,7 @@ func http2configureTransport(t1 *Transport) (*http2Transport, error) {
}
// registerHTTPSProtocol calls Transport.RegisterProtocol but
-// convering panics into errors.
+// converting panics into errors.
func http2registerHTTPSProtocol(t *Transport, rt RoundTripper) (err error) {
defer func() {
if e := recover(); e != nil {
@@ -349,6 +994,141 @@ func (rt http2noDialH2RoundTripper) RoundTrip(req *Request) (*Response, error) {
return res, err
}
+// Buffer chunks are allocated from a pool to reduce pressure on GC.
+// The maximum wasted space per dataBuffer is 2x the largest size class,
+// which happens when the dataBuffer has multiple chunks and there is
+// one unread byte in both the first and last chunks. We use a few size
+// classes to minimize overheads for servers that typically receive very
+// small request bodies.
+//
+// TODO: Benchmark to determine if the pools are necessary. The GC may have
+// improved enough that we can instead allocate chunks like this:
+// make([]byte, max(16<<10, expectedBytesRemaining))
+var (
+ http2dataChunkSizeClasses = []int{
+ 1 << 10,
+ 2 << 10,
+ 4 << 10,
+ 8 << 10,
+ 16 << 10,
+ }
+ http2dataChunkPools = [...]sync.Pool{
+ {New: func() interface{} { return make([]byte, 1<<10) }},
+ {New: func() interface{} { return make([]byte, 2<<10) }},
+ {New: func() interface{} { return make([]byte, 4<<10) }},
+ {New: func() interface{} { return make([]byte, 8<<10) }},
+ {New: func() interface{} { return make([]byte, 16<<10) }},
+ }
+)
+
+func http2getDataBufferChunk(size int64) []byte {
+ i := 0
+ for ; i < len(http2dataChunkSizeClasses)-1; i++ {
+ if size <= int64(http2dataChunkSizeClasses[i]) {
+ break
+ }
+ }
+ return http2dataChunkPools[i].Get().([]byte)
+}
+
+func http2putDataBufferChunk(p []byte) {
+ for i, n := range http2dataChunkSizeClasses {
+ if len(p) == n {
+ http2dataChunkPools[i].Put(p)
+ return
+ }
+ }
+ panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
+}
+
+// dataBuffer is an io.ReadWriter backed by a list of data chunks.
+// Each dataBuffer is used to read DATA frames on a single stream.
+// The buffer is divided into chunks so the server can limit the
+// total memory used by a single connection without limiting the
+// request body size on any single stream.
+type http2dataBuffer struct {
+ chunks [][]byte
+ r int // next byte to read is chunks[0][r]
+ w int // next byte to write is chunks[len(chunks)-1][w]
+ size int // total buffered bytes
+ expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
+}
+
+var http2errReadEmpty = errors.New("read from empty dataBuffer")
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *http2dataBuffer) Read(p []byte) (int, error) {
+ if b.size == 0 {
+ return 0, http2errReadEmpty
+ }
+ var ntotal int
+ for len(p) > 0 && b.size > 0 {
+ readFrom := b.bytesFromFirstChunk()
+ n := copy(p, readFrom)
+ p = p[n:]
+ ntotal += n
+ b.r += n
+ b.size -= n
+ // If the first chunk has been consumed, advance to the next chunk.
+ if b.r == len(b.chunks[0]) {
+ http2putDataBufferChunk(b.chunks[0])
+ end := len(b.chunks) - 1
+ copy(b.chunks[:end], b.chunks[1:])
+ b.chunks[end] = nil
+ b.chunks = b.chunks[:end]
+ b.r = 0
+ }
+ }
+ return ntotal, nil
+}
+
+func (b *http2dataBuffer) bytesFromFirstChunk() []byte {
+ if len(b.chunks) == 1 {
+ return b.chunks[0][b.r:b.w]
+ }
+ return b.chunks[0][b.r:]
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *http2dataBuffer) Len() int {
+ return b.size
+}
+
+// Write appends p to the buffer.
+func (b *http2dataBuffer) Write(p []byte) (int, error) {
+ ntotal := len(p)
+ for len(p) > 0 {
+ // If the last chunk is empty, allocate a new chunk. Try to allocate
+ // enough to fully copy p plus any additional bytes we expect to
+ // receive. However, this may allocate less than len(p).
+ want := int64(len(p))
+ if b.expected > want {
+ want = b.expected
+ }
+ chunk := b.lastChunkOrAlloc(want)
+ n := copy(chunk[b.w:], p)
+ p = p[n:]
+ b.w += n
+ b.size += n
+ b.expected -= int64(n)
+ }
+ return ntotal, nil
+}
+
+func (b *http2dataBuffer) lastChunkOrAlloc(want int64) []byte {
+ if len(b.chunks) != 0 {
+ last := b.chunks[len(b.chunks)-1]
+ if b.w < len(last) {
+ return last
+ }
+ }
+ chunk := http2getDataBufferChunk(want)
+ b.chunks = append(b.chunks, chunk)
+ b.w = 0
+ return chunk
+}
+
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
type http2ErrCode uint32
@@ -429,11 +1209,16 @@ type http2goAwayFlowError struct{}
func (http2goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
+// connError represents an HTTP/2 ConnectionError error code, along
+// with a string (for debugging) explaining why.
+//
// Errors of this type are only returned by the frame parser functions
-// and converted into ConnectionError(ErrCodeProtocol).
+// and converted into ConnectionError(Code), after stashing away
+// the Reason into the Framer's errDetail field, accessible via
+// the (*Framer).ErrorDetail method.
type http2connError struct {
- Code http2ErrCode
- Reason string
+ Code http2ErrCode // the ConnectionError error code
+ Reason string // additional reason
}
func (e http2connError) Error() string {
@@ -469,56 +1254,6 @@ var (
http2errPseudoAfterRegular = errors.New("pseudo header field after regular")
)
-// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
-// It never allocates, but moves old data as new data is written.
-type http2fixedBuffer struct {
- buf []byte
- r, w int
-}
-
-var (
- http2errReadEmpty = errors.New("read from empty fixedBuffer")
- http2errWriteFull = errors.New("write on full fixedBuffer")
-)
-
-// Read copies bytes from the buffer into p.
-// It is an error to read when no data is available.
-func (b *http2fixedBuffer) Read(p []byte) (n int, err error) {
- if b.r == b.w {
- return 0, http2errReadEmpty
- }
- n = copy(p, b.buf[b.r:b.w])
- b.r += n
- if b.r == b.w {
- b.r = 0
- b.w = 0
- }
- return n, nil
-}
-
-// Len returns the number of bytes of the unread portion of the buffer.
-func (b *http2fixedBuffer) Len() int {
- return b.w - b.r
-}
-
-// Write copies bytes from p into the buffer.
-// It is an error to write more data than the buffer can hold.
-func (b *http2fixedBuffer) Write(p []byte) (n int, err error) {
-
- if b.r > 0 && len(p) > len(b.buf)-b.w {
- copy(b.buf, b.buf[b.r:b.w])
- b.w -= b.r
- b.r = 0
- }
-
- n = copy(b.buf[b.w:], p)
- b.w += n
- if n < len(p) {
- err = http2errWriteFull
- }
- return n, err
-}
-
// flow is the flow control window's size.
type http2flow struct {
// n is the number of DATA bytes we're allowed to send.
@@ -666,7 +1401,7 @@ var http2flagName = map[http2FrameType]map[http2Flags]string{
// a frameParser parses a frame given its FrameHeader and payload
// bytes. The length of payload will always equal fh.Length (which
// might be 0).
-type http2frameParser func(fh http2FrameHeader, payload []byte) (http2Frame, error)
+type http2frameParser func(fc *http2frameCache, fh http2FrameHeader, payload []byte) (http2Frame, error)
var http2frameParsers = map[http2FrameType]http2frameParser{
http2FrameData: http2parseDataFrame,
@@ -855,25 +1590,33 @@ type http2Framer struct {
// If the limit is hit, MetaHeadersFrame.Truncated is set true.
MaxHeaderListSize uint32
+ // TODO: track which type of frame & with which flags was sent
+ // last. Then return an error (unless AllowIllegalWrites) if
+ // we're in the middle of a header block and a
+ // non-Continuation or Continuation on a different stream is
+ // attempted to be written.
+
logReads, logWrites bool
debugFramer *http2Framer // only use for logging written writes
debugFramerBuf *bytes.Buffer
debugReadLoggerf func(string, ...interface{})
debugWriteLoggerf func(string, ...interface{})
+
+ frameCache *http2frameCache // nil if frames aren't reused (default)
}
func (fr *http2Framer) maxHeaderListSize() uint32 {
if fr.MaxHeaderListSize == 0 {
- return 16 << 20
+ return 16 << 20 // sane default, per docs
}
return fr.MaxHeaderListSize
}
func (f *http2Framer) startWrite(ftype http2FrameType, flags http2Flags, streamID uint32) {
-
+ // Write the FrameHeader.
f.wbuf = append(f.wbuf[:0],
- 0,
+ 0, // 3 bytes of length, filled in in endWrite
0,
0,
byte(ftype),
@@ -885,7 +1628,8 @@ func (f *http2Framer) startWrite(ftype http2FrameType, flags http2Flags, streamI
}
func (f *http2Framer) endWrite() error {
-
+ // Now that we know the final size, fill in the FrameHeader in
+ // the space previously reserved for it. Abuse append.
length := len(f.wbuf) - http2frameHeaderLen
if length >= (1 << 24) {
return http2ErrFrameTooLarge
@@ -909,8 +1653,9 @@ func (f *http2Framer) logWrite() {
if f.debugFramer == nil {
f.debugFramerBuf = new(bytes.Buffer)
f.debugFramer = http2NewFramer(nil, f.debugFramerBuf)
- f.debugFramer.logReads = false
-
+ f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
+ // Let us read anything, even if we accidentally wrote it
+ // in the wrong order:
f.debugFramer.AllowIllegalReads = true
}
f.debugFramerBuf.Write(f.wbuf)
@@ -937,6 +1682,27 @@ const (
http2maxFrameSize = 1<<24 - 1
)
+// SetReuseFrames allows the Framer to reuse Frames.
+// If called on a Framer, Frames returned by calls to ReadFrame are only
+// valid until the next call to ReadFrame.
+func (fr *http2Framer) SetReuseFrames() {
+ if fr.frameCache != nil {
+ return
+ }
+ fr.frameCache = &http2frameCache{}
+}
+
+type http2frameCache struct {
+ dataFrame http2DataFrame
+}
+
+func (fc *http2frameCache) getDataFrame() *http2DataFrame {
+ if fc == nil {
+ return &http2DataFrame{}
+ }
+ return &fc.dataFrame
+}
+
// NewFramer returns a Framer that writes frames to w and reads them from r.
func http2NewFramer(w io.Writer, r io.Reader) *http2Framer {
fr := &http2Framer{
@@ -1016,7 +1782,7 @@ func (fr *http2Framer) ReadFrame() (http2Frame, error) {
if _, err := io.ReadFull(fr.r, payload); err != nil {
return nil, err
}
- f, err := http2typeFrameParser(fh.Type)(fh, payload)
+ f, err := http2typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
if err != nil {
if ce, ok := err.(http2connError); ok {
return nil, fr.connError(ce.Code, ce.Reason)
@@ -1104,14 +1870,18 @@ func (f *http2DataFrame) Data() []byte {
return f.data
}
-func http2parseDataFrame(fh http2FrameHeader, payload []byte) (http2Frame, error) {
+func http2parseDataFrame(fc *http2frameCache, fh http2FrameHeader, payload []byte) (http2Frame, error) {
if fh.StreamID == 0 {
-
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
return nil, http2connError{http2ErrCodeProtocol, "DATA frame with stream ID 0"}
}
- f := &http2DataFrame{
- http2FrameHeader: fh,
- }
+ f := fc.getDataFrame()
+ f.http2FrameHeader = fh
+
var padSize byte
if fh.Flags.Has(http2FlagDataPadded) {
var err error
@@ -1121,7 +1891,10 @@ func http2parseDataFrame(fh http2FrameHeader, payload []byte) (http2Frame, error
}
}
if int(padSize) > len(payload) {
-
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
return nil, http2connError{http2ErrCodeProtocol, "pad size larger than data payload"}
}
f.data = payload[:len(payload)-int(padSize)]
@@ -1132,6 +1905,7 @@ var (
http2errStreamID = errors.New("invalid stream ID")
http2errDepStreamID = errors.New("invalid dependent stream ID")
http2errPadLength = errors.New("pad length too large")
+ http2errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
)
func http2validStreamIDOrZero(streamID uint32) bool {
@@ -1155,6 +1929,7 @@ func (f *http2Framer) WriteData(streamID uint32, endStream bool, data []byte) er
//
// If pad is nil, the padding bit is not sent.
// The length of pad must not exceed 255 bytes.
+// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility not to violate the maximum frame size
@@ -1163,8 +1938,18 @@ func (f *http2Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad
if !http2validStreamID(streamID) && !f.AllowIllegalWrites {
return http2errStreamID
}
- if len(pad) > 255 {
- return http2errPadLength
+ if len(pad) > 0 {
+ if len(pad) > 255 {
+ return http2errPadLength
+ }
+ if !f.AllowIllegalWrites {
+ for _, b := range pad {
+ if b != 0 {
+ // "Padding octets MUST be set to zero when sending."
+ return http2errPadBytes
+ }
+ }
+ }
}
var flags http2Flags
if endStream {
@@ -1192,22 +1977,35 @@ type http2SettingsFrame struct {
p []byte
}
-func http2parseSettingsFrame(fh http2FrameHeader, p []byte) (http2Frame, error) {
+func http2parseSettingsFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
if fh.Flags.Has(http2FlagSettingsAck) && fh.Length > 0 {
-
+ // When this (ACK 0x1) bit is set, the payload of the
+ // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FRAME_SIZE_ERROR.
return nil, http2ConnectionError(http2ErrCodeFrameSize)
}
if fh.StreamID != 0 {
-
+ // SETTINGS frames always apply to a connection,
+ // never a single stream. The stream identifier for a
+ // SETTINGS frame MUST be zero (0x0). If an endpoint
+ // receives a SETTINGS frame whose stream identifier
+ // field is anything other than 0x0, the endpoint MUST
+ // respond with a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR.
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
if len(p)%6 != 0 {
-
+ // Expecting even number of 6 byte settings.
return nil, http2ConnectionError(http2ErrCodeFrameSize)
}
f := &http2SettingsFrame{http2FrameHeader: fh, p: p}
if v, ok := f.Value(http2SettingInitialWindowSize); ok && v > (1<<31)-1 {
-
+ // Values above the maximum flow control window size of 2^31 - 1 MUST
+ // be treated as a connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
return nil, http2ConnectionError(http2ErrCodeFlowControl)
}
return f, nil
@@ -1281,7 +2079,7 @@ type http2PingFrame struct {
func (f *http2PingFrame) IsAck() bool { return f.Flags.Has(http2FlagPingAck) }
-func http2parsePingFrame(fh http2FrameHeader, payload []byte) (http2Frame, error) {
+func http2parsePingFrame(_ *http2frameCache, fh http2FrameHeader, payload []byte) (http2Frame, error) {
if len(payload) != 8 {
return nil, http2ConnectionError(http2ErrCodeFrameSize)
}
@@ -1321,7 +2119,7 @@ func (f *http2GoAwayFrame) DebugData() []byte {
return f.debugData
}
-func http2parseGoAwayFrame(fh http2FrameHeader, p []byte) (http2Frame, error) {
+func http2parseGoAwayFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
if fh.StreamID != 0 {
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
@@ -1361,7 +2159,7 @@ func (f *http2UnknownFrame) Payload() []byte {
return f.p
}
-func http2parseUnknownFrame(fh http2FrameHeader, p []byte) (http2Frame, error) {
+func http2parseUnknownFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
return &http2UnknownFrame{fh, p}, nil
}
@@ -1372,13 +2170,18 @@ type http2WindowUpdateFrame struct {
Increment uint32 // never read with high bit set
}
-func http2parseWindowUpdateFrame(fh http2FrameHeader, p []byte) (http2Frame, error) {
+func http2parseWindowUpdateFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
if len(p) != 4 {
return nil, http2ConnectionError(http2ErrCodeFrameSize)
}
- inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff
+ inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
if inc == 0 {
-
+ // A receiver MUST treat the receipt of a
+ // WINDOW_UPDATE frame with an flow control window
+ // increment of 0 as a stream error (Section 5.4.2) of
+ // type PROTOCOL_ERROR; errors on the connection flow
+ // control window MUST be treated as a connection
+ // error (Section 5.4.1).
if fh.StreamID == 0 {
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
@@ -1395,7 +2198,7 @@ func http2parseWindowUpdateFrame(fh http2FrameHeader, p []byte) (http2Frame, err
// If the Stream ID is zero, the window update applies to the
// connection as a whole.
func (f *http2Framer) WriteWindowUpdate(streamID, incr uint32) error {
-
+ // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
return errors.New("illegal window increment value")
}
@@ -1432,12 +2235,15 @@ func (f *http2HeadersFrame) HasPriority() bool {
return f.http2FrameHeader.Flags.Has(http2FlagHeadersPriority)
}
-func http2parseHeadersFrame(fh http2FrameHeader, p []byte) (_ http2Frame, err error) {
+func http2parseHeadersFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (_ http2Frame, err error) {
hf := &http2HeadersFrame{
http2FrameHeader: fh,
}
if fh.StreamID == 0 {
-
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // is received whose stream identifier field is 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
return nil, http2connError{http2ErrCodeProtocol, "HEADERS frame with stream ID 0"}
}
var padLength uint8
@@ -1453,7 +2259,7 @@ func http2parseHeadersFrame(fh http2FrameHeader, p []byte) (_ http2Frame, err er
return nil, err
}
hf.Priority.StreamDep = v & 0x7fffffff
- hf.Priority.Exclusive = (v != hf.Priority.StreamDep)
+ hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
p, hf.Priority.Weight, err = http2readByte(p)
if err != nil {
return nil, err
@@ -1556,7 +2362,7 @@ type http2PriorityParam struct {
Exclusive bool
// Weight is the stream's zero-indexed weight. It should be
- // set together with StreamDep, or neither should be set. Per
+ // set together with StreamDep, or neither should be set. Per
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
@@ -1566,7 +2372,7 @@ func (p http2PriorityParam) IsZero() bool {
return p == http2PriorityParam{}
}
-func http2parsePriorityFrame(fh http2FrameHeader, payload []byte) (http2Frame, error) {
+func http2parsePriorityFrame(_ *http2frameCache, fh http2FrameHeader, payload []byte) (http2Frame, error) {
if fh.StreamID == 0 {
return nil, http2connError{http2ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
}
@@ -1574,13 +2380,13 @@ func http2parsePriorityFrame(fh http2FrameHeader, payload []byte) (http2Frame, e
return nil, http2connError{http2ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
}
v := binary.BigEndian.Uint32(payload[:4])
- streamID := v & 0x7fffffff
+ streamID := v & 0x7fffffff // mask off high bit
return &http2PriorityFrame{
http2FrameHeader: fh,
http2PriorityParam: http2PriorityParam{
Weight: payload[4],
StreamDep: streamID,
- Exclusive: streamID != v,
+ Exclusive: streamID != v, // was high bit set?
},
}, nil
}
@@ -1613,7 +2419,7 @@ type http2RSTStreamFrame struct {
ErrCode http2ErrCode
}
-func http2parseRSTStreamFrame(fh http2FrameHeader, p []byte) (http2Frame, error) {
+func http2parseRSTStreamFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
if len(p) != 4 {
return nil, http2ConnectionError(http2ErrCodeFrameSize)
}
@@ -1643,7 +2449,7 @@ type http2ContinuationFrame struct {
headerFragBuf []byte
}
-func http2parseContinuationFrame(fh http2FrameHeader, p []byte) (http2Frame, error) {
+func http2parseContinuationFrame(_ *http2frameCache, fh http2FrameHeader, p []byte) (http2Frame, error) {
if fh.StreamID == 0 {
return nil, http2connError{http2ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
}
@@ -1693,12 +2499,17 @@ func (f *http2PushPromiseFrame) HeadersEnded() bool {
return f.http2FrameHeader.Flags.Has(http2FlagPushPromiseEndHeaders)
}
-func http2parsePushPromise(fh http2FrameHeader, p []byte) (_ http2Frame, err error) {
+func http2parsePushPromise(_ *http2frameCache, fh http2FrameHeader, p []byte) (_ http2Frame, err error) {
pp := &http2PushPromiseFrame{
http2FrameHeader: fh,
}
if pp.StreamID == 0 {
-
+ // PUSH_PROMISE frames MUST be associated with an existing,
+ // peer-initiated stream. The stream identifier of a
+ // PUSH_PROMISE frame indicates the stream it is associated
+ // with. If the stream identifier field specifies the value
+ // 0x0, a recipient MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
// The PUSH_PROMISE frame includes optional padding.
@@ -1717,7 +2528,7 @@ func http2parsePushPromise(fh http2FrameHeader, p []byte) (_ http2Frame, err err
pp.PromiseID = pp.PromiseID & (1<<31 - 1)
if int(padLength) > len(p) {
-
+ // like the DATA frame, error out if padding is longer than the body.
return nil, http2ConnectionError(http2ErrCodeProtocol)
}
pp.headerFragBuf = p[:len(p)-int(padLength)]
@@ -1887,7 +2698,9 @@ func (mh *http2MetaHeadersFrame) checkPseudos() error {
default:
return http2pseudoHeaderError(hf.Name)
}
-
+ // Check for duplicates.
+ // This would be a bad algorithm, but N is 4.
+ // And this doesn't allocate.
for _, hf2 := range pf[:i] {
if hf.Name == hf2.Name {
return http2duplicatePseudoHeaderError(hf.Name)
@@ -1905,7 +2718,8 @@ func (fr *http2Framer) maxHeaderStringLen() int {
if uint32(int(v)) == v {
return int(v)
}
-
+ // They had a crazy big number for MaxHeaderBytes anyway,
+ // so give them unlimited header lengths:
return 0
}
@@ -1960,7 +2774,7 @@ func (fr *http2Framer) readMetaFrame(hf *http2HeadersFrame) (*http2MetaHeadersFr
mh.Fields = append(mh.Fields, hf)
})
-
+ // Lose reference to MetaHeadersFrame:
defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
var hc http2headersOrContinuation = hf
@@ -1976,7 +2790,7 @@ func (fr *http2Framer) readMetaFrame(hf *http2HeadersFrame) (*http2MetaHeadersFr
if f, err := fr.ReadFrame(); err != nil {
return nil, err
} else {
- hc = f.(*http2ContinuationFrame)
+ hc = f.(*http2ContinuationFrame) // guaranteed by checkFrameOrder
}
}
@@ -2018,7 +2832,7 @@ func http2summarizeFrame(f http2Frame) string {
return nil
})
if n > 0 {
- buf.Truncate(buf.Len() - 1)
+ buf.Truncate(buf.Len() - 1) // remove trailing comma
}
case *http2DataFrame:
data := f.Data()
@@ -2050,29 +2864,6 @@ func http2transportExpectContinueTimeout(t1 *Transport) time.Duration {
return t1.ExpectContinueTimeout
}
-// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
-func http2isBadCipher(cipher uint16) bool {
- switch cipher {
- case tls.TLS_RSA_WITH_RC4_128_SHA,
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
-
- return true
- default:
- return false
- }
-}
-
type http2contextContext interface {
context.Context
}
@@ -2164,7 +2955,11 @@ func (cc *http2ClientConn) Ping(ctx context.Context) error {
return cc.ping(ctx)
}
-func http2cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() }
+func http2cloneTLSConfig(c *tls.Config) *tls.Config {
+ c2 := c.Clone()
+ c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
+ return c2
+}
var _ Pusher = (*http2responseWriter)(nil)
@@ -2201,6 +2996,13 @@ func http2reqBodyIsNoBody(body io.ReadCloser) bool {
return body == NoBody
}
+func http2go18httpNoBody() io.ReadCloser { return NoBody } // for tests only
+
+func http2configureServer19(s *Server, conf *http2Server) error {
+ s.RegisterOnShutdown(conf.state.startGracefulShutdown)
+ return nil
+}
+
var http2DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
type http2goroutineLock uint64
@@ -2237,7 +3039,7 @@ func http2curGoroutineID() uint64 {
defer http2littleBuf.Put(bp)
b := *bp
b = b[:runtime.Stack(b, false)]
-
+ // Parse the 4707 out of "goroutine 4707 ["
b = bytes.TrimPrefix(b, http2goroutineSpace)
i := bytes.IndexByte(b, ' ')
if i < 0 {
@@ -2273,9 +3075,10 @@ func http2parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error)
goto Error
case 2 <= base && base <= 36:
+ // valid base; nothing to do
case base == 0:
-
+ // Look for octal, hex prefix.
switch {
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
base = 16
@@ -2321,7 +3124,7 @@ func http2parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error)
}
if n >= cutoff {
-
+ // n*base overflows
n = 1<<64 - 1
err = strconv.ErrRange
goto Error
@@ -2330,7 +3133,7 @@ func http2parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error)
n1 := n + uint64(v)
if n1 < n || n1 > maxVal {
-
+ // n+v overflows
n = 1<<64 - 1
err = strconv.ErrRange
goto Error
@@ -2514,7 +3317,7 @@ func (s http2Setting) String() string {
// Valid reports whether the setting is valid.
func (s http2Setting) Valid() error {
-
+ // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
switch s.ID {
case http2SettingEnablePush:
if s.Val != 1 && s.Val != 0 {
@@ -2758,7 +3561,8 @@ func (s *http2sorter) Keys(h Header) []string {
}
func (s *http2sorter) SortStrings(ss []string) {
-
+ // Our sorter works on s.v, which sorter owns, so
+ // stash it away while we sort the user's buffer.
save := s.v
s.v = ss
sort.Sort(s)
@@ -2768,27 +3572,31 @@ func (s *http2sorter) SortStrings(ss []string) {
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
-// *) a non-empty string starting with '/', but not with with "//",
+// *) a non-empty string starting with '/'
// *) the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
+//
+// We used to enforce that the path also didn't start with "//", but
+// Google's GFE accepts such paths and Chrome sends them, so ignore
+// that part of the spec. See golang.org/issue/19103.
func http2validPseudoPath(v string) bool {
- return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*"
+ return (len(v) > 0 && v[0] == '/') || v == "*"
}
-// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
+// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
// underlying buffer is an interface. (io.Pipe is always unbuffered)
type http2pipe struct {
mu sync.Mutex
- c sync.Cond // c.L lazily initialized to &p.mu
- b http2pipeBuffer
- err error // read error once empty. non-nil means closed.
- breakErr error // immediate read error (caller doesn't see rest of b)
- donec chan struct{} // closed on error
- readFn func() // optional code to run in Read before error
+ c sync.Cond // c.L lazily initialized to &p.mu
+ b http2pipeBuffer // nil when done reading
+ err error // read error once empty. non-nil means closed.
+ breakErr error // immediate read error (caller doesn't see rest of b)
+ donec chan struct{} // closed on error
+ readFn func() // optional code to run in Read before error
}
type http2pipeBuffer interface {
@@ -2800,6 +3608,9 @@ type http2pipeBuffer interface {
func (p *http2pipe) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
+ if p.b == nil {
+ return 0
+ }
return p.b.Len()
}
@@ -2815,14 +3626,15 @@ func (p *http2pipe) Read(d []byte) (n int, err error) {
if p.breakErr != nil {
return 0, p.breakErr
}
- if p.b.Len() > 0 {
+ if p.b != nil && p.b.Len() > 0 {
return p.b.Read(d)
}
if p.err != nil {
if p.readFn != nil {
- p.readFn()
- p.readFn = nil
+ p.readFn() // e.g. copy trailers
+ p.readFn = nil // not sticky like p.err
}
+ p.b = nil
return 0, p.err
}
p.c.Wait()
@@ -2843,6 +3655,9 @@ func (p *http2pipe) Write(d []byte) (n int, err error) {
if p.err != nil {
return 0, http2errClosedPipeWrite
}
+ if p.breakErr != nil {
+ return len(d), nil // discard when there is no reader
+ }
return p.b.Write(d)
}
@@ -2873,10 +3688,13 @@ func (p *http2pipe) closeWithError(dst *error, err error, fn func()) {
}
defer p.c.Signal()
if *dst != nil {
-
+ // Already been done.
return
}
p.readFn = fn
+ if dst == &p.breakErr {
+ p.b = nil
+ }
*dst = err
p.closeDoneLocked()
}
@@ -2886,7 +3704,8 @@ func (p *http2pipe) closeDoneLocked() {
if p.donec == nil {
return
}
-
+ // Close if unclosed. This isn't racy since we always
+ // hold p.mu while closing.
select {
case <-p.donec:
default:
@@ -2912,7 +3731,7 @@ func (p *http2pipe) Done() <-chan struct{} {
if p.donec == nil {
p.donec = make(chan struct{})
if p.err != nil || p.breakErr != nil {
-
+ // Already hit an error.
p.closeDoneLocked()
}
}
@@ -2980,9 +3799,41 @@ type http2Server struct {
// activity for the purposes of IdleTimeout.
IdleTimeout time.Duration
+ // MaxUploadBufferPerConnection is the size of the initial flow
+ // control window for each connections. The HTTP/2 spec does not
+ // allow this to be smaller than 65535 or larger than 2^32-1.
+ // If the value is outside this range, a default value will be
+ // used instead.
+ MaxUploadBufferPerConnection int32
+
+ // MaxUploadBufferPerStream is the size of the initial flow control
+ // window for each stream. The HTTP/2 spec does not allow this to
+ // be larger than 2^32-1. If the value is zero or larger than the
+ // maximum, a default value will be used instead.
+ MaxUploadBufferPerStream int32
+
// NewWriteScheduler constructs a write scheduler for a connection.
// If nil, a default scheduler is chosen.
NewWriteScheduler func() http2WriteScheduler
+
+ // Internal state. This is a pointer (rather than embedded directly)
+ // so that we don't embed a Mutex in this struct, which will make the
+ // struct non-copyable, which might break some callers.
+ state *http2serverInternalState
+}
+
+func (s *http2Server) initialConnRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerConnection > http2initialWindowSize {
+ return s.MaxUploadBufferPerConnection
+ }
+ return 1 << 20
+}
+
+func (s *http2Server) initialStreamRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerStream > 0 {
+ return s.MaxUploadBufferPerStream
+ }
+ return 1 << 20
}
func (s *http2Server) maxReadFrameSize() uint32 {
@@ -2999,6 +3850,40 @@ func (s *http2Server) maxConcurrentStreams() uint32 {
return http2defaultMaxStreams
}
+type http2serverInternalState struct {
+ mu sync.Mutex
+ activeConns map[*http2serverConn]struct{}
+}
+
+func (s *http2serverInternalState) registerConn(sc *http2serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ s.activeConns[sc] = struct{}{}
+ s.mu.Unlock()
+}
+
+func (s *http2serverInternalState) unregisterConn(sc *http2serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ delete(s.activeConns, sc)
+ s.mu.Unlock()
+}
+
+func (s *http2serverInternalState) startGracefulShutdown() {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ for sc := range s.activeConns {
+ sc.startGracefulShutdown()
+ }
+ s.mu.Unlock()
+}
+
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@@ -3011,9 +3896,13 @@ func http2ConfigureServer(s *Server, conf *http2Server) error {
if conf == nil {
conf = new(http2Server)
}
+ conf.state = &http2serverInternalState{activeConns: make(map[*http2serverConn]struct{})}
if err := http2configureServer18(s, conf); err != nil {
return err
}
+ if err := http2configureServer19(s, conf); err != nil {
+ return err
+ }
if s.TLSConfig == nil {
s.TLSConfig = new(tls.Config)
@@ -3039,6 +3928,13 @@ func http2ConfigureServer(s *Server, conf *http2Server) error {
}
}
+ // Note: not setting MinVersion to tls.VersionTLS12,
+ // as we don't want to interfere with HTTP/1.1 traffic
+ // on the user's server. We enforce TLS 1.2 later once
+ // we accept a connection. Ideally this should be done
+ // during next-proto selection, but using TLS <1.2 with
+ // HTTP/2 is still the client's bug.
+
s.TLSConfig.PreferServerCipherSuites = true
haveNPN := false
@@ -3118,29 +4014,37 @@ func (s *http2Server) ServeConn(c net.Conn, opts *http2ServeConnOpts) {
defer cancel()
sc := &http2serverConn{
- srv: s,
- hs: opts.baseConfig(),
- conn: c,
- baseCtx: baseCtx,
- remoteAddrStr: c.RemoteAddr().String(),
- bw: http2newBufferedWriter(c),
- handler: opts.handler(),
- streams: make(map[uint32]*http2stream),
- readFrameCh: make(chan http2readFrameResult),
- wantWriteFrameCh: make(chan http2FrameWriteRequest, 8),
- wantStartPushCh: make(chan http2startPushRequest, 8),
- wroteFrameCh: make(chan http2frameWriteResult, 1),
- bodyReadCh: make(chan http2bodyReadMsg),
- doneServing: make(chan struct{}),
- clientMaxStreams: math.MaxUint32,
- advMaxStreams: s.maxConcurrentStreams(),
- initialWindowSize: http2initialWindowSize,
- maxFrameSize: http2initialMaxFrameSize,
- headerTableSize: http2initialHeaderTableSize,
- serveG: http2newGoroutineLock(),
- pushEnabled: true,
- }
-
+ srv: s,
+ hs: opts.baseConfig(),
+ conn: c,
+ baseCtx: baseCtx,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: http2newBufferedWriter(c),
+ handler: opts.handler(),
+ streams: make(map[uint32]*http2stream),
+ readFrameCh: make(chan http2readFrameResult),
+ wantWriteFrameCh: make(chan http2FrameWriteRequest, 8),
+ serveMsgCh: make(chan interface{}, 8),
+ wroteFrameCh: make(chan http2frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan http2bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
+ advMaxStreams: s.maxConcurrentStreams(),
+ initialStreamSendWindowSize: http2initialWindowSize,
+ maxFrameSize: http2initialMaxFrameSize,
+ headerTableSize: http2initialHeaderTableSize,
+ serveG: http2newGoroutineLock(),
+ pushEnabled: true,
+ }
+
+ s.state.registerConn(sc)
+ defer s.state.unregisterConn(sc)
+
+ // The net/http package sets the write deadline from the
+ // http.Server.WriteTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already set.
+ // Write deadlines are set per stream in serverConn.newStream.
+ // Disarm the net.Conn write deadline here.
if sc.hs.WriteTimeout != 0 {
sc.conn.SetWriteDeadline(time.Time{})
}
@@ -3151,6 +4055,9 @@ func (s *http2Server) ServeConn(c net.Conn, opts *http2ServeConnOpts) {
sc.writeSched = http2NewRandomWriteScheduler()
}
+ // These start at the RFC-specified defaults. If there is a higher
+ // configured value for inflow, that will be updated when we send a
+ // WINDOW_UPDATE shortly after sending SETTINGS.
sc.flow.add(http2initialWindowSize)
sc.inflow.add(http2initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
@@ -3164,18 +4071,44 @@ func (s *http2Server) ServeConn(c net.Conn, opts *http2ServeConnOpts) {
if tc, ok := c.(http2connectionStater); ok {
sc.tlsState = new(tls.ConnectionState)
*sc.tlsState = tc.ConnectionState()
-
+ // 9.2 Use of TLS Features
+ // An implementation of HTTP/2 over TLS MUST use TLS
+ // 1.2 or higher with the restrictions on feature set
+ // and cipher suite described in this section. Due to
+ // implementation limitations, it might not be
+ // possible to fail TLS negotiation. An endpoint MUST
+ // immediately terminate an HTTP/2 connection that
+ // does not meet the TLS requirements described in
+ // this section with a connection error (Section
+ // 5.4.1) of type INADEQUATE_SECURITY.
if sc.tlsState.Version < tls.VersionTLS12 {
sc.rejectConn(http2ErrCodeInadequateSecurity, "TLS version too low")
return
}
if sc.tlsState.ServerName == "" {
-
+ // Client must use SNI, but we don't enforce that anymore,
+ // since it was causing problems when connecting to bare IP
+ // addresses during development.
+ //
+ // TODO: optionally enforce? Or enforce at the time we receive
+ // a new request, and verify the the ServerName matches the :authority?
+ // But that precludes proxy situations, perhaps.
+ //
+ // So for now, do nothing here again.
}
if !s.PermitProhibitedCipherSuites && http2isBadCipher(sc.tlsState.CipherSuite) {
-
+ // "Endpoints MAY choose to generate a connection error
+ // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+ // the prohibited cipher suites are negotiated."
+ //
+ // We choose that. In my opinion, the spec is weak
+ // here. It also says both parties must support at least
+ // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+ // excuses here. If we really must, we could allow an
+ // "AllowInsecureWeakCiphers" option on the server later.
+ // Let's see how it plays out first.
sc.rejectConn(http2ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
return
}
@@ -3189,7 +4122,7 @@ func (s *http2Server) ServeConn(c net.Conn, opts *http2ServeConnOpts) {
func (sc *http2serverConn) rejectConn(err http2ErrCode, debug string) {
sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
-
+ // ignoring errors. hanging up anyway.
sc.framer.WriteGoAway(0, err, []byte(debug))
sc.bw.Flush()
sc.conn.Close()
@@ -3207,10 +4140,9 @@ type http2serverConn struct {
doneServing chan struct{} // closed when serverConn.serve ends
readFrameCh chan http2readFrameResult // written by serverConn.readFrames
wantWriteFrameCh chan http2FrameWriteRequest // from handlers -> serve
- wantStartPushCh chan http2startPushRequest // from handlers -> serve
wroteFrameCh chan http2frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
bodyReadCh chan http2bodyReadMsg // from handlers -> serve
- testHookCh chan func(int) // code to run on the serve loop
+ serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
flow http2flow // conn-wide (not stream-specific) outbound flow control
inflow http2flow // conn-wide inbound flow control
tlsState *tls.ConnectionState // shared by all handlers, like net/http
@@ -3218,38 +4150,39 @@ type http2serverConn struct {
writeSched http2WriteScheduler
// Everything following is owned by the serve loop; use serveG.check():
- serveG http2goroutineLock // used to verify funcs are on serve()
- pushEnabled bool
- sawFirstSettings bool // got the initial SETTINGS frame after the preface
- needToSendSettingsAck bool
- unackedSettings int // how many SETTINGS have we sent without ACKs?
- clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
- advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
- curClientStreams uint32 // number of open streams initiated by the client
- curPushedStreams uint32 // number of open streams initiated by server push
- maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
- maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
- streams map[uint32]*http2stream
- initialWindowSize int32
- maxFrameSize int32
- headerTableSize uint32
- peerMaxHeaderListSize uint32 // zero means unknown (default)
- canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
- writingFrame bool // started writing a frame (on serve goroutine or separate)
- writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
- needsFrameFlush bool // last frame write wasn't a flush
- inGoAway bool // we've started to or sent GOAWAY
- inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
- needToSendGoAway bool // we need to schedule a GOAWAY frame write
- goAwayCode http2ErrCode
- shutdownTimerCh <-chan time.Time // nil until used
- shutdownTimer *time.Timer // nil until used
- idleTimer *time.Timer // nil if unused
- idleTimerCh <-chan time.Time // nil if unused
+ serveG http2goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curClientStreams uint32 // number of open streams initiated by the client
+ curPushedStreams uint32 // number of open streams initiated by server push
+ maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
+ maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
+ streams map[uint32]*http2stream
+ initialStreamSendWindowSize int32
+ maxFrameSize int32
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ writingFrame bool // started writing a frame (on serve goroutine or separate)
+ writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ inGoAway bool // we've started to or sent GOAWAY
+ inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ goAwayCode http2ErrCode
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
hpackEncoder *hpack.Encoder
+
+ // Used by startGracefulShutdown.
+ shutdownOnce sync.Once
}
func (sc *http2serverConn) maxHeaderListSize() uint32 {
@@ -3294,10 +4227,10 @@ type http2stream struct {
numTrailerValues int64
weight uint8
state http2streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- reqBuf []byte // if non-nil, body pipe buffer to return later at EOF
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ writeDeadline *time.Timer // nil if unused
trailer Header // accumulated trailers
reqTrailer Header // handler's Request.Trailer
@@ -3315,11 +4248,16 @@ func (sc *http2serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
func (sc *http2serverConn) state(streamID uint32) (http2streamState, *http2stream) {
sc.serveG.check()
-
+ // http://tools.ietf.org/html/rfc7540#section-5.1
if st, ok := sc.streams[streamID]; ok {
return st.state, st
}
-
+ // "The first use of a new stream identifier implicitly closes all
+ // streams in the "idle" state that might have been initiated by
+ // that peer with a lower-valued stream identifier. For example, if
+ // a client sends a HEADERS frame on stream 7 without ever sending a
+ // frame on stream 5, then stream 5 transitions to the "closed"
+ // state when the first frame for stream 7 is sent or received."
if streamID%2 == 1 {
if streamID <= sc.maxClientStreamID {
return http2stateClosed, nil
@@ -3373,11 +4311,18 @@ func http2isClosedConnError(err error) bool {
return false
}
+ // TODO: remove this string search and be more like the Windows
+ // case below. That might involve modifying the standard library
+ // to return better error types.
str := err.Error()
if strings.Contains(str, "use of closed network connection") {
return true
}
+ // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
+ // build tags, so I can't make an http2_windows.go file with
+ // Windows-specific stuff. Fix that and move this, once we
+ // have a way to bundle this into std's net/http somehow.
if runtime.GOOS == "windows" {
if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
@@ -3397,7 +4342,7 @@ func (sc *http2serverConn) condlogf(err error, format string, args ...interface{
return
}
if err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err) {
-
+ // Boring, expected errors.
sc.vlogf(format, args...)
} else {
sc.logf(format, args...)
@@ -3487,7 +4432,7 @@ func (sc *http2serverConn) stopShutdownTimer() {
}
func (sc *http2serverConn) notePanic() {
-
+ // Note: this is for serverConn.serve panicking, not http.Handler code.
if http2testHookOnPanicMu != nil {
http2testHookOnPanicMu.Lock()
defer http2testHookOnPanicMu.Unlock()
@@ -3507,7 +4452,7 @@ func (sc *http2serverConn) serve() {
defer sc.conn.Close()
defer sc.closeAllStreamsOnConnClose()
defer sc.stopShutdownTimer()
- defer close(sc.doneServing)
+ defer close(sc.doneServing) // unblocks handlers trying to send
if http2VerboseLogs {
sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
@@ -3518,44 +4463,48 @@ func (sc *http2serverConn) serve() {
{http2SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{http2SettingMaxConcurrentStreams, sc.advMaxStreams},
{http2SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+ {http2SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
},
})
sc.unackedSettings++
+ // Each connection starts with intialWindowSize inflow tokens.
+ // If a higher value is configured, we add more tokens.
+ if diff := sc.srv.initialConnRecvWindowSize() - http2initialWindowSize; diff > 0 {
+ sc.sendWindowUpdate(nil, int(diff))
+ }
+
if err := sc.readPreface(); err != nil {
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
return
}
-
+ // Now that we've got the preface, get us out of the
+ // "StateNew" state. We can't go directly to idle, though.
+ // Active means we read some data and anticipate a request. We'll
+ // do another Active when we get a HEADERS frame.
sc.setConnState(StateActive)
sc.setConnState(StateIdle)
if sc.srv.IdleTimeout != 0 {
- sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout)
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
- sc.idleTimerCh = sc.idleTimer.C
}
- var gracefulShutdownCh chan struct{}
- if sc.hs != nil {
- ch := http2h1ServerShutdownChan(sc.hs)
- if ch != nil {
- gracefulShutdownCh = make(chan struct{})
- go sc.awaitGracefulShutdown(ch, gracefulShutdownCh)
- }
- }
+ go sc.readFrames() // closed by defer sc.conn.Close above
- go sc.readFrames()
+ settingsTimer := time.AfterFunc(http2firstSettingsTimeout, sc.onSettingsTimer)
+ defer settingsTimer.Stop()
- settingsTimer := time.NewTimer(http2firstSettingsTimeout)
loopNum := 0
for {
loopNum++
select {
case wr := <-sc.wantWriteFrameCh:
+ if se, ok := wr.write.(http2StreamError); ok {
+ sc.resetStream(se)
+ break
+ }
sc.writeFrame(wr)
- case spr := <-sc.wantStartPushCh:
- sc.startPush(spr)
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
@@ -3563,26 +4512,37 @@ func (sc *http2serverConn) serve() {
return
}
res.readMore()
- if settingsTimer.C != nil {
+ if settingsTimer != nil {
settingsTimer.Stop()
- settingsTimer.C = nil
+ settingsTimer = nil
}
case m := <-sc.bodyReadCh:
sc.noteBodyRead(m.st, m.n)
- case <-settingsTimer.C:
- sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
- return
- case <-gracefulShutdownCh:
- gracefulShutdownCh = nil
- sc.startGracefulShutdown()
- case <-sc.shutdownTimerCh:
- sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
- return
- case <-sc.idleTimerCh:
- sc.vlogf("connection is idle")
- sc.goAway(http2ErrCodeNo)
- case fn := <-sc.testHookCh:
- fn(loopNum)
+ case msg := <-sc.serveMsgCh:
+ switch v := msg.(type) {
+ case func(int):
+ v(loopNum) // for testing
+ case *http2serverMessage:
+ switch v {
+ case http2settingsTimerMsg:
+ sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+ return
+ case http2idleTimerMsg:
+ sc.vlogf("connection is idle")
+ sc.goAway(http2ErrCodeNo)
+ case http2shutdownTimerMsg:
+ sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+ return
+ case http2gracefulShutdownMsg:
+ sc.startGracefulShutdownInternal()
+ default:
+ panic("unknown timer")
+ }
+ case *http2startPushRequest:
+ sc.startPush(v)
+ default:
+ panic(fmt.Sprintf("unexpected type %T", v))
+ }
}
if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame {
@@ -3599,12 +4559,36 @@ func (sc *http2serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, priva
}
}
+type http2serverMessage int
+
+// Message values sent to serveMsgCh.
+var (
+ http2settingsTimerMsg = new(http2serverMessage)
+ http2idleTimerMsg = new(http2serverMessage)
+ http2shutdownTimerMsg = new(http2serverMessage)
+ http2gracefulShutdownMsg = new(http2serverMessage)
+)
+
+func (sc *http2serverConn) onSettingsTimer() { sc.sendServeMsg(http2settingsTimerMsg) }
+
+func (sc *http2serverConn) onIdleTimer() { sc.sendServeMsg(http2idleTimerMsg) }
+
+func (sc *http2serverConn) onShutdownTimer() { sc.sendServeMsg(http2shutdownTimerMsg) }
+
+func (sc *http2serverConn) sendServeMsg(msg interface{}) {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.serveMsgCh <- msg:
+ case <-sc.doneServing:
+ }
+}
+
// readPreface reads the ClientPreface greeting from the peer
// or returns an error on timeout or an invalid greeting.
func (sc *http2serverConn) readPreface() error {
errc := make(chan error, 1)
go func() {
-
+ // Read the client preface
buf := make([]byte, len(http2ClientPreface))
if _, err := io.ReadFull(sc.conn, buf); err != nil {
errc <- err
@@ -3614,7 +4598,7 @@ func (sc *http2serverConn) readPreface() error {
errc <- nil
}
}()
- timer := time.NewTimer(http2prefaceTimeout)
+ timer := time.NewTimer(http2prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
case <-timer.C:
@@ -3658,7 +4642,13 @@ func (sc *http2serverConn) writeDataFromHandler(stream *http2stream, data []byte
case <-sc.doneServing:
return http2errClientDisconnected
case <-stream.cw:
-
+ // If both ch and stream.cw were ready (as might
+ // happen on the final Write after an http.Handler
+ // ends), prefer the write result. Otherwise this
+ // might just be us successfully closing the stream.
+ // The writeFrameAsync and serve goroutines guarantee
+ // that the ch send will happen before the stream.cw
+ // close.
select {
case err = <-ch:
frameWriteDone = true
@@ -3681,12 +4671,13 @@ func (sc *http2serverConn) writeDataFromHandler(stream *http2stream, data []byte
// buffered and is read by serve itself). If you're on the serve
// goroutine, call writeFrame instead.
func (sc *http2serverConn) writeFrameFromHandler(wr http2FrameWriteRequest) error {
- sc.serveG.checkNotOn()
+ sc.serveG.checkNotOn() // NOT
select {
case sc.wantWriteFrameCh <- wr:
return nil
case <-sc.doneServing:
-
+ // Serve loop is gone.
+ // Client has closed their connection to the server.
return http2errClientDisconnected
}
}
@@ -3705,6 +4696,24 @@ func (sc *http2serverConn) writeFrame(wr http2FrameWriteRequest) {
// If true, wr will not be written and wr.done will not be signaled.
var ignoreWrite bool
+ // We are not allowed to write frames on closed streams. RFC 7540 Section
+ // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
+ // a closed stream." Our server never sends PRIORITY, so that exception
+ // does not apply.
+ //
+ // The serverConn might close an open stream while the stream's handler
+ // is still running. For example, the server might close a stream when it
+ // receives bad data from the client. If this happens, the handler might
+ // attempt to write a frame after the stream has been closed (since the
+ // handler hasn't yet been notified of the close). In this case, we simply
+ // ignore the frame. The handler will notice that the stream is closed when
+ // it waits for the frame to be written.
+ //
+ // As an exception to this rule, we allow sending RST_STREAM after close.
+ // This allows us to immediately reject new streams without tracking any
+ // state for those streams (except for the queued RST_STREAM frame). This
+ // may result in duplicate RST_STREAMs in some cases, but the client should
+ // ignore those.
if wr.StreamID() != 0 {
_, isReset := wr.write.(http2StreamError)
if state, _ := sc.state(wr.StreamID()); state == http2stateClosed && !isReset {
@@ -3712,12 +4721,15 @@ func (sc *http2serverConn) writeFrame(wr http2FrameWriteRequest) {
}
}
+ // Don't send a 100-continue response if we've already sent headers.
+ // See golang.org/issue/14030.
switch wr.write.(type) {
case *http2writeResHeaders:
wr.stream.wroteHeaders = true
case http2write100ContinueHeadersFrame:
if wr.stream.wroteHeaders {
-
+ // We do not need to notify wr.done because this frame is
+ // never written with wr.done != nil.
if wr.done != nil {
panic("wr.done != nil for write100ContinueHeadersFrame")
}
@@ -3746,7 +4758,8 @@ func (sc *http2serverConn) startFrameWrite(wr http2FrameWriteRequest) {
case http2stateHalfClosedLocal:
switch wr.write.(type) {
case http2StreamError, http2handlerPanicRST, http2writeWindowUpdate:
-
+ // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
+ // in this state. (We never send PRIORITY from the server, so that is not checked.)
default:
panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
}
@@ -3800,16 +4813,29 @@ func (sc *http2serverConn) wroteFrame(res http2frameWriteResult) {
}
switch st.state {
case http2stateOpen:
-
+ // Here we would go to stateHalfClosedLocal in
+ // theory, but since our handler is done and
+ // the net/http package provides no mechanism
+ // for closing a ResponseWriter while still
+ // reading data (see possible TODO at top of
+ // this file), we go into closed state here
+ // anyway, after telling the peer we're
+ // hanging up on them. We'll transition to
+ // stateClosed after the RST_STREAM frame is
+ // written.
st.state = http2stateHalfClosedLocal
- sc.resetStream(http2streamError(st.id, http2ErrCodeCancel))
+ // Section 8.1: a server MAY request that the client abort
+ // transmission of a request without error by sending a
+ // RST_STREAM with an error code of NO_ERROR after sending
+ // a complete response.
+ sc.resetStream(http2streamError(st.id, http2ErrCodeNo))
case http2stateHalfClosedRemote:
sc.closeStream(st, http2errHandlerComplete)
}
} else {
switch v := wr.write.(type) {
case http2StreamError:
-
+ // st may be unknown if the RST_STREAM was generated to reject bad input.
if st, ok := sc.streams[v.StreamID]; ok {
sc.closeStream(st, v)
}
@@ -3818,6 +4844,7 @@ func (sc *http2serverConn) wroteFrame(res http2frameWriteResult) {
}
}
+ // Reply (if requested) to unblock the ServeHTTP goroutine.
wr.replyToWriter(res.err)
sc.scheduleFrameWrite()
@@ -3865,7 +4892,7 @@ func (sc *http2serverConn) scheduleFrameWrite() {
}
if sc.needsFrameFlush {
sc.startFrameWrite(http2FrameWriteRequest{write: http2flushFrameWriter{}})
- sc.needsFrameFlush = false
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
continue
}
break
@@ -3873,10 +4900,19 @@ func (sc *http2serverConn) scheduleFrameWrite() {
sc.inFrameScheduleLoop = false
}
-// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the
-// client we're gracefully shutting down. The connection isn't closed
-// until all current streams are done.
+// startGracefulShutdown gracefully shuts down a connection. This
+// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
+// shutting down. The connection isn't closed until all current
+// streams are done.
+//
+// startGracefulShutdown returns immediately; it does not wait until
+// the connection has shut down.
func (sc *http2serverConn) startGracefulShutdown() {
+ sc.serveG.checkNotOn() // NOT
+ sc.shutdownOnce.Do(func() { sc.sendServeMsg(http2gracefulShutdownMsg) })
+}
+
+func (sc *http2serverConn) startGracefulShutdownInternal() {
sc.goAwayIn(http2ErrCodeNo, 0)
}
@@ -3886,7 +4922,7 @@ func (sc *http2serverConn) goAway(code http2ErrCode) {
if code != http2ErrCodeNo {
forceCloseIn = 250 * time.Millisecond
} else {
-
+ // TODO: configurable
forceCloseIn = 1 * time.Second
}
sc.goAwayIn(code, forceCloseIn)
@@ -3908,8 +4944,7 @@ func (sc *http2serverConn) goAwayIn(code http2ErrCode, forceCloseIn time.Duratio
func (sc *http2serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
- sc.shutdownTimer = time.NewTimer(d)
- sc.shutdownTimerCh = sc.shutdownTimer.C
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *http2serverConn) resetStream(se http2StreamError) {
@@ -3929,11 +4964,18 @@ func (sc *http2serverConn) processFrameFromReader(res http2readFrameResult) bool
if err != nil {
if err == http2ErrFrameTooLarge {
sc.goAway(http2ErrCodeFrameSize)
- return true
+ return true // goAway will close the loop
}
clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || http2isClosedConnError(err)
if clientGone {
-
+ // TODO: could we also get into this state if
+ // the peer does a half close
+ // (e.g. CloseWrite) because they're done
+ // sending frames but they're still wanting
+ // our open replies? Investigate.
+ // TODO: add CloseWrite to crypto/tls.Conn first
+ // so we have a way to test this? I suppose
+ // just for testing we could have a non-TLS mode.
return false
}
} else {
@@ -3957,7 +4999,7 @@ func (sc *http2serverConn) processFrameFromReader(res http2readFrameResult) bool
case http2ConnectionError:
sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
sc.goAway(http2ErrCode(ev))
- return true
+ return true // goAway will handle shutdown
default:
if res.err != nil {
sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
@@ -3971,6 +5013,7 @@ func (sc *http2serverConn) processFrameFromReader(res http2readFrameResult) bool
func (sc *http2serverConn) processFrame(f http2Frame) error {
sc.serveG.check()
+ // First frame received must be SETTINGS.
if !sc.sawFirstSettings {
if _, ok := f.(*http2SettingsFrame); !ok {
return http2ConnectionError(http2ErrCodeProtocol)
@@ -3996,7 +5039,8 @@ func (sc *http2serverConn) processFrame(f http2Frame) error {
case *http2GoAwayFrame:
return sc.processGoAway(f)
case *http2PushPromiseFrame:
-
+ // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+ // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
return http2ConnectionError(http2ErrCodeProtocol)
default:
sc.vlogf("http2: server ignoring frame: %v", f.Header())
@@ -4007,11 +5051,16 @@ func (sc *http2serverConn) processFrame(f http2Frame) error {
func (sc *http2serverConn) processPing(f *http2PingFrame) error {
sc.serveG.check()
if f.IsAck() {
-
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
return nil
}
if f.StreamID != 0 {
-
+ // "PING frames are not associated with any individual
+ // stream. If a PING frame is received with a stream
+ // identifier field value other than 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
return http2ConnectionError(http2ErrCodeProtocol)
}
if sc.inGoAway && sc.goAwayCode != http2ErrCodeNo {
@@ -4024,20 +5073,27 @@ func (sc *http2serverConn) processPing(f *http2PingFrame) error {
func (sc *http2serverConn) processWindowUpdate(f *http2WindowUpdateFrame) error {
sc.serveG.check()
switch {
- case f.StreamID != 0:
+ case f.StreamID != 0: // stream-level flow control
state, st := sc.state(f.StreamID)
if state == http2stateIdle {
-
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
return http2ConnectionError(http2ErrCodeProtocol)
}
if st == nil {
-
+ // "WINDOW_UPDATE can be sent by a peer that has sent a
+ // frame bearing the END_STREAM flag. This means that a
+ // receiver could receive a WINDOW_UPDATE frame on a "half
+ // closed (remote)" or "closed" stream. A receiver MUST
+ // NOT treat this as an error, see Section 5.1."
return nil
}
if !st.flow.add(int32(f.Increment)) {
return http2streamError(f.StreamID, http2ErrCodeFlowControl)
}
- default:
+ default: // connection-level flow control
if !sc.flow.add(int32(f.Increment)) {
return http2goAwayFlowError{}
}
@@ -4051,7 +5107,11 @@ func (sc *http2serverConn) processResetStream(f *http2RSTStreamFrame) error {
state, st := sc.state(f.StreamID)
if state == http2stateIdle {
-
+ // 6.4 "RST_STREAM frames MUST NOT be sent for a
+ // stream in the "idle" state. If a RST_STREAM frame
+ // identifying an idle stream is received, the
+ // recipient MUST treat this as a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
return http2ConnectionError(http2ErrCodeProtocol)
}
if st != nil {
@@ -4067,6 +5127,9 @@ func (sc *http2serverConn) closeStream(st *http2stream, err error) {
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
}
st.state = http2stateClosed
+ if st.writeDeadline != nil {
+ st.writeDeadline.Stop()
+ }
if st.isPushed() {
sc.curPushedStreams--
} else {
@@ -4079,16 +5142,17 @@ func (sc *http2serverConn) closeStream(st *http2stream, err error) {
sc.idleTimer.Reset(sc.srv.IdleTimeout)
}
if http2h1ServerKeepAlivesDisabled(sc.hs) {
- sc.startGracefulShutdown()
+ sc.startGracefulShutdownInternal()
}
}
if p := st.body; p != nil {
-
+ // Return any buffered unread bytes worth of conn-level flow control.
+ // See golang.org/issue/16481
sc.sendWindowUpdate(nil, p.Len())
p.CloseWithError(err)
}
- st.cw.Close()
+ st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
sc.writeSched.CloseStream(st.id)
}
@@ -4097,7 +5161,9 @@ func (sc *http2serverConn) processSettings(f *http2SettingsFrame) error {
if f.IsAck() {
sc.unackedSettings--
if sc.unackedSettings < 0 {
-
+ // Why is the peer ACKing settings we never sent?
+ // The spec doesn't mention this case, but
+ // hang up on them anyway.
return http2ConnectionError(http2ErrCodeProtocol)
}
return nil
@@ -4129,11 +5195,13 @@ func (sc *http2serverConn) processSetting(s http2Setting) error {
case http2SettingInitialWindowSize:
return sc.processSettingInitialWindowSize(s.Val)
case http2SettingMaxFrameSize:
- sc.maxFrameSize = int32(s.Val)
+ sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
case http2SettingMaxHeaderListSize:
sc.peerMaxHeaderListSize = s.Val
default:
-
+ // Unknown setting: "An endpoint that receives a SETTINGS
+ // frame with any unknown or unsupported identifier MUST
+ // ignore that setting."
if http2VerboseLogs {
sc.vlogf("http2: server ignoring unknown setting %v", s)
}
@@ -4143,13 +5211,26 @@ func (sc *http2serverConn) processSetting(s http2Setting) error {
func (sc *http2serverConn) processSettingInitialWindowSize(val uint32) error {
sc.serveG.check()
-
- old := sc.initialWindowSize
- sc.initialWindowSize = int32(val)
- growth := sc.initialWindowSize - old
+ // Note: val already validated to be within range by
+ // processSetting's Valid call.
+
+ // "A SETTINGS frame can alter the initial flow control window
+ // size for all current streams. When the value of
+ // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+ // adjust the size of all stream flow control windows that it
+ // maintains by the difference between the new value and the
+ // old value."
+ old := sc.initialStreamSendWindowSize
+ sc.initialStreamSendWindowSize = int32(val)
+ growth := int32(val) - old // may be negative
for _, st := range sc.streams {
if !st.flow.add(growth) {
-
+ // 6.9.2 Initial Flow Control Window Size
+ // "An endpoint MUST treat a change to
+ // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+ // control window to exceed the maximum size as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR."
return http2ConnectionError(http2ErrCodeFlowControl)
}
}
@@ -4163,23 +5244,40 @@ func (sc *http2serverConn) processData(f *http2DataFrame) error {
}
data := f.Data()
+ // "If a DATA frame is received whose stream is not in "open"
+ // or "half closed (local)" state, the recipient MUST respond
+ // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
id := f.Header().StreamID
state, st := sc.state(id)
if id == 0 || state == http2stateIdle {
-
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
return http2ConnectionError(http2ErrCodeProtocol)
}
if st == nil || state != http2stateOpen || st.gotTrailerHeader || st.resetQueued {
-
+ // This includes sending a RST_STREAM if the stream is
+ // in stateHalfClosedLocal (which currently means that
+ // the http.Handler returned, so it's done reading &
+ // done writing). Try to stop the client from sending
+ // more DATA.
+
+ // But still enforce their connection-level flow control,
+ // and return any flow control bytes since we're not going
+ // to consume them.
if sc.inflow.available() < int32(f.Length) {
return http2streamError(id, http2ErrCodeFlowControl)
}
-
+ // Deduct the flow control from inflow, since we're
+ // going to immediately add it back in
+ // sendWindowUpdate, which also schedules sending the
+ // frames.
sc.inflow.take(int32(f.Length))
- sc.sendWindowUpdate(nil, int(f.Length))
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
if st != nil && st.resetQueued {
-
+ // Already have a stream error in flight. Don't send another.
return nil
}
return http2streamError(id, http2ErrCodeStreamClosed)
@@ -4188,12 +5286,13 @@ func (sc *http2serverConn) processData(f *http2DataFrame) error {
panic("internal error: should have a body in this state")
}
+ // Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
return http2streamError(id, http2ErrCodeStreamClosed)
}
if f.Length > 0 {
-
+ // Check whether the client has flow control quota.
if st.inflow.available() < int32(f.Length) {
return http2streamError(id, http2ErrCodeFlowControl)
}
@@ -4210,6 +5309,8 @@ func (sc *http2serverConn) processData(f *http2DataFrame) error {
st.bodyBytes += int64(len(data))
}
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
if pad := int32(f.Length) - int32(len(data)); pad > 0 {
sc.sendWindowUpdate32(nil, pad)
sc.sendWindowUpdate32(st, pad)
@@ -4228,8 +5329,9 @@ func (sc *http2serverConn) processGoAway(f *http2GoAwayFrame) error {
} else {
sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
}
- sc.startGracefulShutdown()
-
+ sc.startGracefulShutdownInternal()
+ // http://tools.ietf.org/html/rfc7540#section-6.8
+ // We should not create any new streams, which means we should disable push.
sc.pushEnabled = false
return nil
}
@@ -4260,32 +5362,51 @@ func (st *http2stream) endStream() {
func (st *http2stream) copyTrailersToHandlerRequest() {
for k, vv := range st.trailer {
if _, ok := st.reqTrailer[k]; ok {
-
+ // Only copy it over it was pre-declared.
st.reqTrailer[k] = vv
}
}
}
+// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
+// when the stream's WriteTimeout has fired.
+func (st *http2stream) onWriteTimeout() {
+ st.sc.writeFrameFromHandler(http2FrameWriteRequest{write: http2streamError(st.id, http2ErrCodeInternal)})
+}
+
func (sc *http2serverConn) processHeaders(f *http2MetaHeadersFrame) error {
sc.serveG.check()
id := f.StreamID
if sc.inGoAway {
-
+ // Ignore.
return nil
}
-
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1
+ // Streams initiated by a client MUST use odd-numbered stream
+ // identifiers. [...] An endpoint that receives an unexpected
+ // stream identifier MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
if id%2 != 1 {
return http2ConnectionError(http2ErrCodeProtocol)
}
-
+ // A HEADERS frame can be used to create a new stream or
+ // send a trailer for an open one. If we already have a stream
+ // open, let it process its own HEADERS frame (trailers at this
+ // point, if it's valid).
if st := sc.streams[f.StreamID]; st != nil {
if st.resetQueued {
-
+ // We're sending RST_STREAM to close the stream, so don't bother
+ // processing this frame.
return nil
}
return st.processTrailerHeaders(f)
}
+ // [...] The identifier of a newly established stream MUST be
+ // numerically greater than all streams that the initiating
+ // endpoint has opened or reserved. [...] An endpoint that
+ // receives an unexpected stream identifier MUST respond with
+ // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
if id <= sc.maxClientStreamID {
return http2ConnectionError(http2ErrCodeProtocol)
}
@@ -4295,12 +5416,22 @@ func (sc *http2serverConn) processHeaders(f *http2MetaHeadersFrame) error {
sc.idleTimer.Stop()
}
+ // http://tools.ietf.org/html/rfc7540#section-5.1.2
+ // [...] Endpoints MUST NOT exceed the limit set by their peer. An
+ // endpoint that receives a HEADERS frame that causes their
+ // advertised concurrent stream limit to be exceeded MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
+ // or REFUSED_STREAM.
if sc.curClientStreams+1 > sc.advMaxStreams {
if sc.unackedSettings == 0 {
-
+ // They should know better.
return http2streamError(id, http2ErrCodeProtocol)
}
-
+ // Assume it's a network race, where they just haven't
+ // received our last SETTINGS update. But actually
+ // this can't happen yet, because we don't yet provide
+ // a way for users to adjust server parameters at
+ // runtime.
return http2streamError(id, http2ErrCodeRefusedStream)
}
@@ -4325,17 +5456,24 @@ func (sc *http2serverConn) processHeaders(f *http2MetaHeadersFrame) error {
if st.reqTrailer != nil {
st.trailer = make(Header)
}
- st.body = req.Body.(*http2requestBody).pipe
+ st.body = req.Body.(*http2requestBody).pipe // may be nil
st.declBodyBytes = req.ContentLength
handler := sc.handler.ServeHTTP
if f.Truncated {
-
+ // Their header list was too long. Send a 431 error.
handler = http2handleHeaderListTooLong
} else if err := http2checkValidHTTP2RequestHeaders(req.Header); err != nil {
handler = http2new400Handler(err)
}
+ // The net/http package sets the read deadline from the
+ // http.Server.ReadTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already
+ // set. Disarm it here after the request headers are read,
+ // similar to how the http1 server works. Here it's
+ // technically more like the http1 Server's ReadHeaderTimeout
+ // (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout != 0 {
sc.conn.SetReadDeadline(time.Time{})
}
@@ -4362,7 +5500,9 @@ func (st *http2stream) processTrailerHeaders(f *http2MetaHeadersFrame) error {
for _, hf := range f.RegularFields() {
key := sc.canonicalHeader(hf.Name)
if !http2ValidTrailerHeader(key) {
-
+ // TODO: send more details to the peer somehow. But http2 has
+ // no way to send debug data at a stream level. Discuss with
+ // HTTP folk.
return http2streamError(st.id, http2ErrCodeProtocol)
}
st.trailer[key] = append(st.trailer[key], hf.Value)
@@ -4374,7 +5514,10 @@ func (st *http2stream) processTrailerHeaders(f *http2MetaHeadersFrame) error {
func http2checkPriority(streamID uint32, p http2PriorityParam) error {
if streamID == p.StreamDep {
-
+ // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
+ // Section 5.3.3 says that a stream can depend on one of its dependencies,
+ // so it's only self-dependencies that are forbidden.
return http2streamError(streamID, http2ErrCodeProtocol)
}
return nil
@@ -4406,10 +5549,13 @@ func (sc *http2serverConn) newStream(id, pusherID uint32, state http2streamState
cancelCtx: cancelCtx,
}
st.cw.Init()
- st.flow.conn = &sc.flow
- st.flow.add(sc.initialWindowSize)
- st.inflow.conn = &sc.inflow
- st.inflow.add(http2initialWindowSize)
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialStreamSendWindowSize)
+ st.inflow.conn = &sc.inflow // link to conn-level counter
+ st.inflow.add(sc.srv.initialStreamRecvWindowSize())
+ if sc.hs.WriteTimeout != 0 {
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ }
sc.streams[id] = st
sc.writeSched.OpenStream(st.id, http2OpenStreamOptions{PusherID: pusherID})
@@ -4441,13 +5587,22 @@ func (sc *http2serverConn) newWriterAndRequest(st *http2stream, f *http2MetaHead
return nil, nil, http2streamError(f.StreamID, http2ErrCodeProtocol)
}
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
-
+ // See 8.1.2.6 Malformed Requests and Responses:
+ //
+ // Malformed requests or responses that are detected
+ // MUST be treated as a stream error (Section 5.4.2)
+ // of type PROTOCOL_ERROR."
+ //
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid
+ // value for the :method, :scheme, and :path
+ // pseudo-header fields"
return nil, nil, http2streamError(f.StreamID, http2ErrCodeProtocol)
}
bodyOpen := !f.StreamEnded()
if rp.method == "HEAD" && bodyOpen {
-
+ // HEAD requests can't have bodies
return nil, nil, http2streamError(f.StreamID, http2ErrCodeProtocol)
}
@@ -4464,16 +5619,14 @@ func (sc *http2serverConn) newWriterAndRequest(st *http2stream, f *http2MetaHead
return nil, nil, err
}
if bodyOpen {
- st.reqBuf = http2getRequestBodyBuf()
- req.Body.(*http2requestBody).pipe = &http2pipe{
- b: &http2fixedBuffer{buf: st.reqBuf},
- }
-
if vv, ok := rp.header["Content-Length"]; ok {
req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
} else {
req.ContentLength = -1
}
+ req.Body.(*http2requestBody).pipe = &http2pipe{
+ b: &http2dataBuffer{expected: req.ContentLength},
+ }
}
return rw, req, nil
}
@@ -4496,7 +5649,7 @@ func (sc *http2serverConn) newWriterAndRequestNoBody(st *http2stream, rp http2re
if needsContinue {
rp.header.Del("Expect")
}
-
+ // Merge Cookie headers into one "; "-delimited value.
if cookies := rp.header["Cookie"]; len(cookies) > 1 {
rp.header.Set("Cookie", strings.Join(cookies, "; "))
}
@@ -4508,7 +5661,8 @@ func (sc *http2serverConn) newWriterAndRequestNoBody(st *http2stream, rp http2re
key = CanonicalHeaderKey(strings.TrimSpace(key))
switch key {
case "Transfer-Encoding", "Trailer", "Content-Length":
-
+ // Bogus. (copy of http1 rules)
+ // Ignore.
default:
if trailer == nil {
trailer = make(Header)
@@ -4523,7 +5677,7 @@ func (sc *http2serverConn) newWriterAndRequestNoBody(st *http2stream, rp http2re
var requestURI string
if rp.method == "CONNECT" {
url_ = &url.URL{Host: rp.authority}
- requestURI = rp.authority
+ requestURI = rp.authority // mimic HTTP/1 server behavior
} else {
var err error
url_, err = url.ParseRequestURI(rp.path)
@@ -4556,7 +5710,7 @@ func (sc *http2serverConn) newWriterAndRequestNoBody(st *http2stream, rp http2re
rws := http2responseWriterStatePool.Get().(*http2responseWriterState)
bwSave := rws.bw
- *rws = http2responseWriterState{}
+ *rws = http2responseWriterState{} // zero all the fields
rws.conn = sc
rws.bw = bwSave
rws.bw.Reset(http2chunkWriter{rws})
@@ -4568,24 +5722,6 @@ func (sc *http2serverConn) newWriterAndRequestNoBody(st *http2stream, rp http2re
return rw, req, nil
}
-var http2reqBodyCache = make(chan []byte, 8)
-
-func http2getRequestBodyBuf() []byte {
- select {
- case b := <-http2reqBodyCache:
- return b
- default:
- return make([]byte, http2initialWindowSize)
- }
-}
-
-func http2putRequestBodyBuf(b []byte) {
- select {
- case http2reqBodyCache <- b:
- default:
- }
-}
-
// Run on its own goroutine.
func (sc *http2serverConn) runHandler(rw *http2responseWriter, req *Request, handler func(ResponseWriter, *Request)) {
didPanic := true
@@ -4597,7 +5733,7 @@ func (sc *http2serverConn) runHandler(rw *http2responseWriter, req *Request, han
write: http2handlerPanicRST{rw.rws.stream.id},
stream: rw.rws.stream,
})
-
+ // Same as net/http:
if http2shouldLogPanic(e) {
const size = 64 << 10
buf := make([]byte, size)
@@ -4625,10 +5761,13 @@ func http2handleHeaderListTooLong(w ResponseWriter, r *Request) {
// called from handler goroutines.
// h may be nil.
func (sc *http2serverConn) writeHeaders(st *http2stream, headerData *http2writeResHeaders) error {
- sc.serveG.checkNotOn()
+ sc.serveG.checkNotOn() // NOT on
var errc chan error
if headerData.h != nil {
-
+ // If there's a header map (which we don't own), so we have to block on
+ // waiting for this frame to be written, so an http.Flush mid-handler
+ // writes out the correct value of keys, before a handler later potentially
+ // mutates it.
errc = http2errChanPool.Get().(chan error)
}
if err := sc.writeFrameFromHandler(http2FrameWriteRequest{
@@ -4671,26 +5810,21 @@ type http2bodyReadMsg struct {
// Notes that the handler for the given stream ID read n bytes of its body
// and schedules flow control tokens to be sent.
func (sc *http2serverConn) noteBodyReadFromHandler(st *http2stream, n int, err error) {
- sc.serveG.checkNotOn()
+ sc.serveG.checkNotOn() // NOT on
if n > 0 {
select {
case sc.bodyReadCh <- http2bodyReadMsg{st, n}:
case <-sc.doneServing:
}
}
- if err == io.EOF {
- if buf := st.reqBuf; buf != nil {
- st.reqBuf = nil
- http2putRequestBodyBuf(buf)
- }
- }
}
func (sc *http2serverConn) noteBodyRead(st *http2stream, n int) {
sc.serveG.check()
- sc.sendWindowUpdate(nil, n)
+ sc.sendWindowUpdate(nil, n) // conn-level
if st.state != http2stateHalfClosedRemote && st.state != http2stateClosed {
-
+ // Don't send this WINDOW_UPDATE if the stream is closed
+ // remotely.
sc.sendWindowUpdate(st, n)
}
}
@@ -4777,8 +5911,8 @@ func (b *http2requestBody) Read(p []byte) (n int, err error) {
return
}
-// responseWriter is the http.ResponseWriter implementation. It's
-// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
// responseWriterState pointer inside is zeroed at the end of a
// request (in handlerDone) and calls on the responseWriter thereafter
// simply crash (caller's mistake), but the much larger responseWriterState
@@ -4812,6 +5946,7 @@ type http2responseWriterState struct {
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished
+ dirty bool // a Write failed; don't reuse this responseWriterState
sentContentLen int64 // non-zero if handler set a Content-Length header
wroteBytes int64
@@ -4832,7 +5967,7 @@ func (rws *http2responseWriterState) hasTrailers() bool { return len(rws.trailer
func (rws *http2responseWriterState) declareTrailer(k string) {
k = CanonicalHeaderKey(k)
if !http2ValidTrailerHeader(k) {
-
+ // Forbidden by RFC 2616 14.40.
rws.conn.logf("ignoring invalid trailer %q", k)
return
}
@@ -4874,7 +6009,7 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
}
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
-
+ // TODO(bradfitz): be faster here, like net/http? measure.
date = time.Now().UTC().Format(TimeFormat)
}
@@ -4893,6 +6028,7 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
date: date,
})
if err != nil {
+ rws.dirty = true
return 0, err
}
if endStream {
@@ -4912,8 +6048,9 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
endStream := rws.handlerDone && !rws.hasTrailers()
if len(p) > 0 || endStream {
-
+ // only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+ rws.dirty = true
return 0, err
}
}
@@ -4925,6 +6062,9 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) {
trailers: rws.trailers,
endStream: true,
})
+ if err != nil {
+ rws.dirty = true
+ }
return len(p), err
}
return len(p), nil
@@ -4952,7 +6092,7 @@ const http2TrailerPrefix = "Trailer:"
// says you SHOULD (but not must) predeclare any trailers in the
// header, the official ResponseWriter rules said trailers in Go must
// be predeclared, and then we reuse the same ResponseWriter.Header()
-// map to mean both Headers and Trailers. When it's time to write the
+// map to mean both Headers and Trailers. When it's time to write the
// Trailers, we pick out the fields of Headers that were declared as
// trailers. That worked for a while, until we found the first major
// user of Trailers in the wild: gRPC (using them only over http2),
@@ -4989,11 +6129,14 @@ func (w *http2responseWriter) Flush() {
}
if rws.bw.Buffered() > 0 {
if err := rws.bw.Flush(); err != nil {
-
+ // Ignore the error. The frame writer already knows.
return
}
} else {
-
+ // The bufio.Writer won't call chunkWriter.Write
+ // (writeChunk with zero bytes, so we have to do it
+ // ourselves to force the HTTP response header and/or
+ // final DATA frame (with END_STREAM) to be sent.
rws.writeChunk(nil)
}
}
@@ -5010,7 +6153,7 @@ func (w *http2responseWriter) CloseNotify() <-chan bool {
rws.closeNotifierCh = ch
cw := rws.stream.cw
go func() {
- cw.Wait()
+ cw.Wait() // wait for close
ch <- true
}()
}
@@ -5061,7 +6204,7 @@ func http2cloneHeader(h Header) Header {
//
// * Handler calls w.Write or w.WriteString ->
// * -> rws.bw (*bufio.Writer) ->
-// * (Handler migth call Flush)
+// * (Handler might call Flush)
// * -> chunkWriter{rws}
// * -> responseWriterState.writeChunk(p []byte)
// * -> responseWriterState.writeChunk (most of the magic; see comment there)
@@ -5085,9 +6228,9 @@ func (w *http2responseWriter) write(lenData int, dataB []byte, dataS string) (n
if !http2bodyAllowedForStatus(rws.status) {
return 0, ErrBodyNotAllowed
}
- rws.wroteBytes += int64(len(dataB)) + int64(len(dataS))
+ rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
-
+ // TODO: send a RST_STREAM
return 0, errors.New("http2: handler wrote more than declared Content-Length")
}
@@ -5100,10 +6243,19 @@ func (w *http2responseWriter) write(lenData int, dataB []byte, dataS string) (n
func (w *http2responseWriter) handlerDone() {
rws := w.rws
+ dirty := rws.dirty
rws.handlerDone = true
w.Flush()
w.rws = nil
- http2responseWriterStatePool.Put(rws)
+ if !dirty {
+ // Only recycle the pool if all prior Write calls to
+ // the serverConn goroutine completed successfully. If
+ // they returned earlier due to resets from the peer
+ // there might still be write goroutines outstanding
+ // from the serverConn referencing the rws memory. See
+ // issue 20704.
+ http2responseWriterStatePool.Put(rws)
+ }
}
// Push errors.
@@ -5124,10 +6276,13 @@ func (w *http2responseWriter) push(target string, opts http2pushOptions) error {
sc := st.sc
sc.serveG.checkNotOn()
+ // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
+ // http://tools.ietf.org/html/rfc7540#section-6.6
if st.isPushed() {
return http2ErrRecursivePush
}
+ // Default options.
if opts.Method == "" {
opts.Method = "GET"
}
@@ -5139,6 +6294,7 @@ func (w *http2responseWriter) push(target string, opts http2pushOptions) error {
wantScheme = "https"
}
+ // Validate the request.
u, err := url.Parse(target)
if err != nil {
return err
@@ -5161,7 +6317,10 @@ func (w *http2responseWriter) push(target string, opts http2pushOptions) error {
if strings.HasPrefix(k, ":") {
return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
}
-
+ // These headers are meaningful only if the request has a body,
+ // but PUSH_PROMISE requests cannot have a body.
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ // Also disallow Host, since the promised URL must be absolute.
switch strings.ToLower(k) {
case "content-length", "content-encoding", "trailer", "te", "expect", "host":
return fmt.Errorf("promised request headers cannot include %q", k)
@@ -5171,11 +6330,14 @@ func (w *http2responseWriter) push(target string, opts http2pushOptions) error {
return err
}
+ // The RFC effectively limits promised requests to GET and HEAD:
+ // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
+ // http://tools.ietf.org/html/rfc7540#section-8.2
if opts.Method != "GET" && opts.Method != "HEAD" {
return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
}
- msg := http2startPushRequest{
+ msg := &http2startPushRequest{
parent: st,
method: opts.Method,
url: u,
@@ -5188,7 +6350,7 @@ func (w *http2responseWriter) push(target string, opts http2pushOptions) error {
return http2errClientDisconnected
case <-st.cw:
return http2errStreamClosed
- case sc.wantStartPushCh <- msg:
+ case sc.serveMsgCh <- msg:
}
select {
@@ -5210,48 +6372,66 @@ type http2startPushRequest struct {
done chan error
}
-func (sc *http2serverConn) startPush(msg http2startPushRequest) {
+func (sc *http2serverConn) startPush(msg *http2startPushRequest) {
sc.serveG.check()
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
+ // is in either the "open" or "half-closed (remote)" state.
if msg.parent.state != http2stateOpen && msg.parent.state != http2stateHalfClosedRemote {
-
+ // responseWriter.Push checks that the stream is peer-initiaed.
msg.done <- http2errStreamClosed
return
}
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
if !sc.pushEnabled {
msg.done <- ErrNotSupported
return
}
+ // PUSH_PROMISE frames must be sent in increasing order by stream ID, so
+ // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
+ // is written. Once the ID is allocated, we start the request handler.
allocatePromisedID := func() (uint32, error) {
sc.serveG.check()
+ // Check this again, just in case. Technically, we might have received
+ // an updated SETTINGS by the time we got around to writing this frame.
if !sc.pushEnabled {
return 0, ErrNotSupported
}
-
+ // http://tools.ietf.org/html/rfc7540#section-6.5.2.
if sc.curPushedStreams+1 > sc.clientMaxStreams {
return 0, http2ErrPushLimitReached
}
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1.
+ // Streams initiated by the server MUST use even-numbered identifiers.
+ // A server that is unable to establish a new stream identifier can send a GOAWAY
+ // frame so that the client is forced to open a new connection for new streams.
if sc.maxPushPromiseID+2 >= 1<<31 {
- sc.startGracefulShutdown()
+ sc.startGracefulShutdownInternal()
return 0, http2ErrPushLimitReached
}
sc.maxPushPromiseID += 2
promisedID := sc.maxPushPromiseID
+ // http://tools.ietf.org/html/rfc7540#section-8.2.
+ // Strictly speaking, the new stream should start in "reserved (local)", then
+ // transition to "half closed (remote)" after sending the initial HEADERS, but
+ // we start in "half closed (remote)" for simplicity.
+ // See further comments at the definition of stateHalfClosedRemote.
promised := sc.newStream(promisedID, msg.parent.id, http2stateHalfClosedRemote)
rw, req, err := sc.newWriterAndRequestNoBody(promised, http2requestParam{
method: msg.method,
scheme: msg.url.Scheme,
authority: msg.url.Host,
path: msg.url.RequestURI(),
- header: http2cloneHeader(msg.header),
+ header: http2cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
})
if err != nil {
-
+ // Should not happen, since we've already validated msg.url.
panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
}
@@ -5356,31 +6536,6 @@ var http2badTrailer = map[string]bool{
"Www-Authenticate": true,
}
-// h1ServerShutdownChan returns a channel that will be closed when the
-// provided *http.Server wants to shut down.
-//
-// This is a somewhat hacky way to get at http1 innards. It works
-// when the http2 code is bundled into the net/http package in the
-// standard library. The alternatives ended up making the cmd/go tool
-// depend on http Servers. This is the lightest option for now.
-// This is tested via the TestServeShutdown* tests in net/http.
-func http2h1ServerShutdownChan(hs *Server) <-chan struct{} {
- if fn := http2testh1ServerShutdownChan; fn != nil {
- return fn(hs)
- }
- var x interface{} = hs
- type I interface {
- getDoneChan() <-chan struct{}
- }
- if hs, ok := x.(I); ok {
- return hs.getDoneChan()
- }
- return nil
-}
-
-// optional test hook for h1ServerShutdownChan.
-var http2testh1ServerShutdownChan func(hs *Server) <-chan struct{}
-
// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
// disabled. See comments on h1ServerShutdownChan above for why
// the code is written this way.
@@ -5486,7 +6641,7 @@ var http2errTransportVersion = errors.New("http2: ConfigureTransport is only sup
// It requires Go 1.6 or later and returns an error if the net/http package is too old
// or if t1 has already been HTTP/2-enabled.
func http2ConfigureTransport(t1 *Transport) error {
- _, err := http2configureTransport(t1)
+ _, err := http2configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go
return err
}
@@ -5669,7 +6824,7 @@ func (t *http2Transport) RoundTrip(req *Request) (*Response, error) {
// and returns a host:port. The port 443 is added if needed.
func http2authorityAddr(scheme string, authority string) (addr string) {
host, port, err := net.SplitHostPort(authority)
- if err != nil {
+ if err != nil { // authority didn't have a port
port = "443"
if scheme == "http" {
port = "80"
@@ -5679,7 +6834,7 @@ func http2authorityAddr(scheme string, authority string) (addr string) {
if a, err := idna.ToASCII(host); err == nil {
host = a
}
-
+ // IPv6 address literal, without a port:
if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
return host + ":" + port
}
@@ -5742,12 +6897,14 @@ func http2shouldRetryRequest(req *Request, err error) (*Request, error) {
case http2errClientConnUnusable, http2errClientConnGotGoAway:
return req, nil
case http2errClientConnGotGoAwayAfterSomeReqBody:
-
+ // If the Body is nil (or http.NoBody), it's safe to reuse
+ // this request and its Body.
if req.Body == nil || http2reqBodyIsNoBody(req.Body) {
return req, nil
}
-
- getBody := http2reqGetBody(req)
+ // Otherwise we depend on the Request having its GetBody
+ // func defined.
+ getBody := http2reqGetBody(req) // Go 1.8: getBody = req.GetBody
if getBody == nil {
return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error")
}
@@ -5840,9 +6997,9 @@ func (t *http2Transport) newClientConn(c net.Conn, singleUse bool) (*http2Client
tconn: c,
readerDone: make(chan struct{}),
nextStreamID: 1,
- maxFrameSize: 16 << 10,
- initialWindowSize: 65535,
- maxConcurrentStreams: 1000,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
streams: make(map[uint32]*http2clientStream),
singleUse: singleUse,
wantSettingsAck: true,
@@ -5859,12 +7016,16 @@ func (t *http2Transport) newClientConn(c net.Conn, singleUse bool) (*http2Client
cc.cond = sync.NewCond(&cc.mu)
cc.flow.add(int32(http2initialWindowSize))
+ // TODO: adjust this writer size to account for frame size +
+ // MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(http2stickyErrWriter{c, &cc.werr})
cc.br = bufio.NewReader(c)
cc.fr = http2NewFramer(cc.bw, cc.br)
cc.fr.ReadMetaHeaders = hpack.NewDecoder(http2initialHeaderTableSize, nil)
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
+ // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
+ // henc in response to SETTINGS frames?
cc.henc = hpack.NewEncoder(&cc.hbuf)
if cs, ok := c.(http2connectionStater); ok {
@@ -5900,6 +7061,7 @@ func (cc *http2ClientConn) setGoAway(f *http2GoAwayFrame) {
old := cc.goAway
cc.goAway = f
+ // Merge the previous and current GoAway error frames.
if cc.goAwayDebug == "" {
cc.goAwayDebug = string(f.DebugData())
}
@@ -5932,7 +7094,7 @@ func (cc *http2ClientConn) canTakeNewRequestLocked() bool {
cc.nextStreamID < math.MaxInt32
}
-// onIdleTimeout is called from a time.AfterFunc goroutine. It will
+// onIdleTimeout is called from a time.AfterFunc goroutine. It will
// only be called when we're idle, but because we're coming from a new
// goroutine, there could be a new request coming in at the same time,
// so this simply calls the synchronized closeIfIdle to shut down this
@@ -5950,7 +7112,7 @@ func (cc *http2ClientConn) closeIfIdle() {
}
cc.closed = true
nextID := cc.nextStreamID
-
+ // TODO: do clients send GOAWAY too? maybe? Just Close:
cc.mu.Unlock()
if http2VerboseLogs {
@@ -5996,7 +7158,7 @@ func (cc *http2ClientConn) putFrameScratchBuffer(buf []byte) {
return
}
}
-
+ // forget about it.
}
// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
@@ -6024,7 +7186,10 @@ func (cc *http2ClientConn) responseHeaderTimeout() time.Duration {
if cc.t.t1 != nil {
return cc.t.t1.ResponseHeaderTimeout
}
-
+ // No way to do this (yet?) with just an http2.Transport. Probably
+ // no need. Request.Cancel this is the new way. We only need to support
+ // this for compatibility with the old http.Transport fields when
+ // we're doing transparent http2.
return 0
}
@@ -6048,7 +7213,7 @@ func http2checkConnHeaders(req *Request) error {
// req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown.
func http2actualContentLength(req *Request) int64 {
- if req.Body == nil {
+ if req.Body == nil || http2reqBodyIsNoBody(req.Body) {
return 0
}
if req.ContentLength != 0 {
@@ -6079,8 +7244,8 @@ func (cc *http2ClientConn) RoundTrip(req *Request) (*Response, error) {
}
body := req.Body
- hasBody := body != nil
contentLen := http2actualContentLength(req)
+ hasBody := contentLen != 0
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
var requestedGzip bool
@@ -6088,10 +7253,24 @@ func (cc *http2ClientConn) RoundTrip(req *Request) (*Response, error) {
req.Header.Get("Accept-Encoding") == "" &&
req.Header.Get("Range") == "" &&
req.Method != "HEAD" {
-
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
requestedGzip = true
}
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
if err != nil {
cc.mu.Unlock()
@@ -6114,11 +7293,12 @@ func (cc *http2ClientConn) RoundTrip(req *Request) (*Response, error) {
if werr != nil {
if hasBody {
- req.Body.Close()
+ req.Body.Close() // per RoundTripper contract
bodyWriter.cancel()
}
cc.forgetStreamID(cs.ID)
-
+ // Don't bother sending a RST_STREAM (our write already failed;
+ // no need to keep writing)
http2traceWroteRequest(cs.trace, werr)
return nil, werr
}
@@ -6142,7 +7322,15 @@ func (cc *http2ClientConn) RoundTrip(req *Request) (*Response, error) {
handleReadLoopResponse := func(re http2resAndError) (*Response, error) {
res := re.res
if re.err != nil || res.StatusCode > 299 {
-
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
bodyWriter.cancel()
cs.abortRequestBodyWrite(http2errStopReqBodyWrite)
}
@@ -6209,9 +7397,12 @@ func (cc *http2ClientConn) RoundTrip(req *Request) (*Response, error) {
return handleReadLoopResponse(re)
default:
}
+ // processResetStream already removed the
+ // stream from the streams map; no need for
+ // forgetStreamID.
return nil, cs.resetErr
case err := <-bodyWriter.resc:
-
+ // Prefer the read loop's response, if available. Issue 16102.
select {
case re := <-readLoopResCh:
return handleReadLoopResponse(re)
@@ -6232,7 +7423,7 @@ func (cc *http2ClientConn) RoundTrip(req *Request) (*Response, error) {
// requires cc.wmu be held
func (cc *http2ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
- first := true
+ first := true // first frame written (HEADERS is first, then CONTINUATION)
frameSize := int(cc.maxFrameSize)
for len(hdrs) > 0 && cc.werr == nil {
chunk := hdrs
@@ -6253,7 +7444,10 @@ func (cc *http2ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []
cc.fr.WriteContinuation(streamID, endHeaders, chunk)
}
}
-
+ // TODO(bradfitz): this Flush could potentially block (as
+ // could the WriteHeaders call(s) above), which means they
+ // wouldn't respond to Request.Cancel being readable. That's
+ // rare, but this should probably be in a goroutine.
cc.bw.Flush()
return cc.werr
}
@@ -6269,13 +7463,16 @@ var (
func (cs *http2clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
cc := cs.cc
- sentEnd := false
+ sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
buf := cc.frameScratchBuffer()
defer cc.putFrameScratchBuffer(buf)
defer func() {
http2traceWroteRequest(cs.trace, err)
-
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
cerr := bodyCloser.Close()
if err == nil {
err = cerr
@@ -6314,7 +7511,12 @@ func (cs *http2clientStream) writeRequestBody(body io.Reader, bodyCloser io.Clos
sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
err = cc.fr.WriteData(cs.ID, sentEnd, data)
if err == nil {
-
+ // TODO(bradfitz): this flush is for latency, not bandwidth.
+ // Most requests won't need this. Make this opt-in or
+ // opt-out? Use some heuristic on the body type? Nagel-like
+ // timers? Based on 'n'? Only last chunk of this for loop,
+ // unless flow control tokens are low? For now, always.
+ // If we change this, see comment below.
err = cc.bw.Flush()
}
cc.wmu.Unlock()
@@ -6325,7 +7527,9 @@ func (cs *http2clientStream) writeRequestBody(body io.Reader, bodyCloser io.Clos
}
if sentEnd {
-
+ // Already sent END_STREAM (which implies we have no
+ // trailers) and flushed, because currently all
+ // WriteData frames above get a flush. So we're done.
return nil
}
@@ -6339,6 +7543,8 @@ func (cs *http2clientStream) writeRequestBody(body io.Reader, bodyCloser io.Clos
cc.wmu.Lock()
defer cc.wmu.Unlock()
+ // Two ways to send END_STREAM: either with trailers, or
+ // with an empty DATA frame.
if len(trls) > 0 {
err = cc.writeHeaders(cs.ID, true, trls)
} else {
@@ -6372,7 +7578,7 @@ func (cs *http2clientStream) awaitFlowControl(maxBytes int) (taken int32, err er
take := a
if int(take) > maxBytes {
- take = int32(maxBytes)
+ take = int32(maxBytes) // can't truncate int; take is int32
}
if take > int32(cc.maxFrameSize) {
take = int32(cc.maxFrameSize)
@@ -6420,6 +7626,9 @@ func (cc *http2ClientConn) encodeHeaders(req *Request, addGzipHeader bool, trail
}
}
+ // Check for any invalid headers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
for k, vv := range req.Header {
if !httplex.ValidHeaderFieldName(k) {
return nil, fmt.Errorf("invalid HTTP header name %q", k)
@@ -6431,6 +7640,11 @@ func (cc *http2ClientConn) encodeHeaders(req *Request, addGzipHeader bool, trail
}
}
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production (see Sections 3.3 and 3.4 of
+ // [RFC3986]).
cc.writeHeader(":authority", host)
cc.writeHeader(":method", req.Method)
if req.Method != "CONNECT" {
@@ -6446,13 +7660,20 @@ func (cc *http2ClientConn) encodeHeaders(req *Request, addGzipHeader bool, trail
lowKey := strings.ToLower(k)
switch lowKey {
case "host", "content-length":
-
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
continue
case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive":
-
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
continue
case "user-agent":
-
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
didUA = true
if len(vv) < 1 {
continue
@@ -6490,7 +7711,8 @@ func http2shouldSendReqContentLength(method string, contentLength int64) bool {
if contentLength < 0 {
return false
}
-
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
switch method {
case "POST", "PUT", "PATCH":
return true
@@ -6503,7 +7725,8 @@ func http2shouldSendReqContentLength(method string, contentLength int64) bool {
func (cc *http2ClientConn) encodeTrailers(req *Request) []byte {
cc.hbuf.Reset()
for k, vv := range req.Trailer {
-
+ // Transfer-Encoding, etc.. have already been filter at the
+ // start of RoundTrip
lowKey := strings.ToLower(k)
for _, v := range vv {
cc.writeHeader(lowKey, v)
@@ -6557,7 +7780,7 @@ func (cc *http2ClientConn) streamByID(id uint32, andRemove bool) *http2clientStr
cc.idleTimer.Reset(cc.idleTimeout)
}
close(cs.done)
- cc.cond.Broadcast()
+ cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
}
return cs
}
@@ -6616,6 +7839,9 @@ func (rl *http2clientConnReadLoop) cleanup() {
cc.idleTimer.Stop()
}
+ // Close any response bodies if the server closes prematurely.
+ // TODO: also do this if we've written the headers but not
+ // gotten a response yet.
err := cc.readerErr
cc.mu.Lock()
if cc.goAway != nil && http2isEOFOrNetReadError(err) {
@@ -6645,7 +7871,7 @@ func (rl *http2clientConnReadLoop) cleanup() {
func (rl *http2clientConnReadLoop) run() error {
cc := rl.cc
rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
- gotReply := false
+ gotReply := false // ever saw a HEADERS reply
gotSettings := false
for {
f, err := cc.fr.ReadFrame()
@@ -6653,7 +7879,7 @@ func (rl *http2clientConnReadLoop) run() error {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
}
if se, ok := err.(http2StreamError); ok {
- if cs := cc.streamByID(se.StreamID, true); cs != nil {
+ if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
cs.cc.writeStreamReset(cs.ID, se.Code, err)
if se.Cause == nil {
se.Cause = cc.fr.errDetail
@@ -6674,7 +7900,7 @@ func (rl *http2clientConnReadLoop) run() error {
}
gotSettings = true
}
- maybeIdle := false
+ maybeIdle := false // whether frame might transition us to idle
switch f := f.(type) {
case *http2MetaHeadersFrame:
@@ -6717,12 +7943,17 @@ func (rl *http2clientConnReadLoop) processHeaders(f *http2MetaHeadersFrame) erro
cc := rl.cc
cs := cc.streamByID(f.StreamID, f.StreamEnded())
if cs == nil {
-
+ // We'd get here if we canceled a request while the
+ // server had its response still in flight. So if this
+ // was just something we canceled, ignore it.
return nil
}
if !cs.firstByte {
if cs.trace != nil {
-
+ // TODO(bradfitz): move first response byte earlier,
+ // when we first read the 9 byte header, not waiting
+ // until all the HEADERS+CONTINUATION frames have been
+ // merged. This works for now.
http2traceFirstResponseByte(cs.trace)
}
cs.firstByte = true
@@ -6738,13 +7969,13 @@ func (rl *http2clientConnReadLoop) processHeaders(f *http2MetaHeadersFrame) erro
if _, ok := err.(http2ConnectionError); ok {
return err
}
-
+ // Any other error type is a stream error.
cs.cc.writeStreamReset(f.StreamID, http2ErrCodeProtocol, err)
cs.resc <- http2resAndError{err: err}
- return nil
+ return nil // return nil from process* funcs to keep conn alive
}
if res == nil {
-
+ // (nil, nil) special case. See handleResponse docs.
return nil
}
if res.Body != http2noBody {
@@ -6779,9 +8010,9 @@ func (rl *http2clientConnReadLoop) handleResponse(cs *http2clientStream, f *http
if statusCode == 100 {
http2traceGot100Continue(cs.trace)
if cs.on100 != nil {
- cs.on100()
+ cs.on100() // forces any write delay timer to fire
}
- cs.pastHeaders = false
+ cs.pastHeaders = false // do it all again
return nil, nil
}
@@ -6817,10 +8048,12 @@ func (rl *http2clientConnReadLoop) handleResponse(cs *http2clientStream, f *http
if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
res.ContentLength = clen64
} else {
-
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
}
} else if len(clens) > 1 {
-
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
}
}
@@ -6829,8 +8062,7 @@ func (rl *http2clientConnReadLoop) handleResponse(cs *http2clientStream, f *http
return res, nil
}
- buf := new(bytes.Buffer)
- cs.bufPipe = http2pipe{b: buf}
+ cs.bufPipe = http2pipe{b: &http2dataBuffer{expected: res.ContentLength}}
cs.bytesRemain = res.ContentLength
res.Body = http2transportResponseBody{cs}
go cs.awaitRequestCancel(cs.req)
@@ -6847,16 +8079,18 @@ func (rl *http2clientConnReadLoop) handleResponse(cs *http2clientStream, f *http
func (rl *http2clientConnReadLoop) processTrailers(cs *http2clientStream, f *http2MetaHeadersFrame) error {
if cs.pastTrailers {
-
+ // Too many HEADERS frames for this stream.
return http2ConnectionError(http2ErrCodeProtocol)
}
cs.pastTrailers = true
if !f.StreamEnded() {
-
+ // We expect that any headers for trailers also
+ // has END_STREAM.
return http2ConnectionError(http2ErrCodeProtocol)
}
if len(f.PseudoFields()) > 0 {
-
+ // No pseudo header fields are defined for trailers.
+ // TODO: ConnectionError might be overly harsh? Check.
return http2ConnectionError(http2ErrCodeProtocol)
}
@@ -6904,7 +8138,7 @@ func (b http2transportResponseBody) Read(p []byte) (n int, err error) {
}
}
if n == 0 {
-
+ // No flow control tokens to send back.
return
}
@@ -6912,13 +8146,15 @@ func (b http2transportResponseBody) Read(p []byte) (n int, err error) {
defer cc.mu.Unlock()
var connAdd, streamAdd int32
-
+ // Check the conn-level first, before the stream-level.
if v := cc.inflow.available(); v < http2transportDefaultConnFlow/2 {
connAdd = http2transportDefaultConnFlow - v
cc.inflow.add(connAdd)
}
- if err == nil {
-
+ if err == nil { // No need to refresh if the stream is over or failed.
+ // Consider any buffered body data (read from the conn but not
+ // consumed by the client) when computing flow control for this
+ // stream.
v := int(cs.inflow.available()) + cs.bufPipe.Len()
if v < http2transportDefaultStreamFlow-http2transportDefaultStreamMinRefresh {
streamAdd = int32(http2transportDefaultStreamFlow - v)
@@ -6953,8 +8189,9 @@ func (b http2transportResponseBody) Close() error {
cc.wmu.Lock()
if !serverSentStreamEnd {
cc.fr.WriteRSTStream(cs.ID, http2ErrCodeCancel)
+ cs.didReset = true
}
-
+ // Return connection-level flow control.
if unread > 0 {
cc.inflow.add(int32(unread))
cc.fr.WriteWindowUpdate(0, uint32(unread))
@@ -6977,11 +8214,16 @@ func (rl *http2clientConnReadLoop) processData(f *http2DataFrame) error {
neverSent := cc.nextStreamID
cc.mu.Unlock()
if f.StreamID >= neverSent {
-
+ // We never asked for this.
cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
return http2ConnectionError(http2ErrCodeProtocol)
}
+ // We probably did ask for this, but canceled. Just ignore it.
+ // TODO: be stricter here? only silently ignore things which
+ // we canceled, but not things which were closed normally
+ // by the peer? Tough without accumulating too much state.
+ // But at least return their flow control:
if f.Length > 0 {
cc.mu.Lock()
cc.inflow.add(int32(f.Length))
@@ -6995,12 +8237,7 @@ func (rl *http2clientConnReadLoop) processData(f *http2DataFrame) error {
return nil
}
if f.Length > 0 {
- if len(data) > 0 && cs.bufPipe.b == nil {
-
- cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
- return http2ConnectionError(http2ErrCodeProtocol)
- }
-
+ // Check connection-level flow control.
cc.mu.Lock()
if cs.inflow.available() >= int32(f.Length) {
cs.inflow.take(int32(f.Length))
@@ -7008,17 +8245,29 @@ func (rl *http2clientConnReadLoop) processData(f *http2DataFrame) error {
cc.mu.Unlock()
return http2ConnectionError(http2ErrCodeFlowControl)
}
-
- if pad := int32(f.Length) - int32(len(data)); pad > 0 {
- cs.inflow.add(pad)
- cc.inflow.add(pad)
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ var refund int
+ if pad := int(f.Length) - len(data); pad > 0 {
+ refund += pad
+ }
+ // Return len(data) now if the stream is already closed,
+ // since data will never be read.
+ didReset := cs.didReset
+ if didReset {
+ refund += len(data)
+ }
+ if refund > 0 {
+ cc.inflow.add(int32(refund))
cc.wmu.Lock()
- cc.fr.WriteWindowUpdate(0, uint32(pad))
- cc.fr.WriteWindowUpdate(cs.ID, uint32(pad))
+ cc.fr.WriteWindowUpdate(0, uint32(refund))
+ if !didReset {
+ cs.inflow.add(int32(refund))
+ cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
+ }
cc.bw.Flush()
cc.wmu.Unlock()
}
- didReset := cs.didReset
cc.mu.Unlock()
if len(data) > 0 && !didReset {
@@ -7038,7 +8287,8 @@ func (rl *http2clientConnReadLoop) processData(f *http2DataFrame) error {
var http2errInvalidTrailers = errors.New("http2: invalid trailers")
func (rl *http2clientConnReadLoop) endStream(cs *http2clientStream) {
-
+ // TODO: check that any declared content-length matches, like
+ // server.go's (*stream).endStream method.
rl.endStreamError(cs, nil)
}
@@ -7074,7 +8324,7 @@ func (rl *http2clientConnReadLoop) processGoAway(f *http2GoAwayFrame) error {
cc := rl.cc
cc.t.connPool().MarkDead(cc)
if f.ErrCode != 0 {
-
+ // TODO: deal with GOAWAY more. particularly the error code
cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
}
cc.setGoAway(f)
@@ -7101,11 +8351,17 @@ func (rl *http2clientConnReadLoop) processSettings(f *http2SettingsFrame) error
case http2SettingMaxConcurrentStreams:
cc.maxConcurrentStreams = s.Val
case http2SettingInitialWindowSize:
-
+ // Values above the maximum flow-control
+ // window size of 2^31-1 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
if s.Val > math.MaxInt32 {
return http2ConnectionError(http2ErrCodeFlowControl)
}
+ // Adjust flow control of currently-open
+ // frames by the difference of the old initial
+ // window size and this one.
delta := int32(s.Val) - int32(cc.initialWindowSize)
for _, cs := range cc.streams {
cs.flow.add(delta)
@@ -7114,7 +8370,7 @@ func (rl *http2clientConnReadLoop) processSettings(f *http2SettingsFrame) error
cc.initialWindowSize = s.Val
default:
-
+ // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
cc.vlogf("Unhandled Setting: %v", s)
}
return nil
@@ -7155,18 +8411,21 @@ func (rl *http2clientConnReadLoop) processWindowUpdate(f *http2WindowUpdateFrame
func (rl *http2clientConnReadLoop) processResetStream(f *http2RSTStreamFrame) error {
cs := rl.cc.streamByID(f.StreamID, true)
if cs == nil {
-
+ // TODO: return error if server tries to RST_STEAM an idle stream
return nil
}
select {
case <-cs.peerReset:
-
+ // Already reset.
+ // This is the only goroutine
+ // which closes this, so there
+ // isn't a race.
default:
err := http2streamError(cs.ID, f.ErrCode)
cs.resetErr = err
close(cs.peerReset)
cs.bufPipe.CloseWithError(err)
- cs.cc.cond.Broadcast()
+ cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
}
delete(rl.activeRes, cs.ID)
return nil
@@ -7183,7 +8442,7 @@ func (cc *http2ClientConn) ping(ctx http2contextContext) error {
return err
}
cc.mu.Lock()
-
+ // check for dup before insert
if _, found := cc.pings[p]; !found {
cc.pings[p] = c
cc.mu.Unlock()
@@ -7207,7 +8466,7 @@ func (cc *http2ClientConn) ping(ctx http2contextContext) error {
case <-ctx.Done():
return ctx.Err()
case <-cc.readerDone:
-
+ // connection closed
return cc.readerErr
}
}
@@ -7217,7 +8476,7 @@ func (rl *http2clientConnReadLoop) processPing(f *http2PingFrame) error {
cc := rl.cc
cc.mu.Lock()
defer cc.mu.Unlock()
-
+ // If ack, notify listener if any
if c, ok := cc.pings[f.Data]; ok {
close(c)
delete(cc.pings, f.Data)
@@ -7234,12 +8493,21 @@ func (rl *http2clientConnReadLoop) processPing(f *http2PingFrame) error {
}
func (rl *http2clientConnReadLoop) processPushPromise(f *http2PushPromiseFrame) error {
-
+ // We told the peer we don't want them.
+ // Spec says:
+ // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
+ // setting of the peer endpoint is set to 0. An endpoint that
+ // has set this setting and has received acknowledgement MUST
+ // treat the receipt of a PUSH_PROMISE frame as a connection
+ // error (Section 5.4.1) of type PROTOCOL_ERROR."
return http2ConnectionError(http2ErrCodeProtocol)
}
func (cc *http2ClientConn) writeStreamReset(streamID uint32, code http2ErrCode, err error) {
-
+ // TODO: map err to more interesting error codes, once the
+ // HTTP community comes up with some. But currently for
+ // RST_STREAM there's no equivalent to GOAWAY frame's debug
+ // data, and the error codes are all pretty vague ("cancel").
cc.wmu.Lock()
cc.fr.WriteRSTStream(streamID, code)
cc.bw.Flush()
@@ -7368,7 +8636,8 @@ func (s http2bodyWriterState) cancel() {
func (s http2bodyWriterState) on100() {
if s.timer == nil {
-
+ // If we didn't do a delayed write, ignore the server's
+ // bogus 100 continue response.
return
}
s.timer.Stop()
@@ -7380,7 +8649,9 @@ func (s http2bodyWriterState) on100() {
// called until after the headers have been written.
func (s http2bodyWriterState) scheduleBodyWrite() {
if s.timer == nil {
-
+ // We're not doing a delayed write (see
+ // getBodyWriterState), so just start the writing
+ // goroutine immediately.
go s.fn()
return
}
@@ -7435,7 +8706,9 @@ func http2writeEndsStream(w http2writeFramer) bool {
case *http2writeResHeaders:
return v.endStream
case nil:
-
+ // This can only happen if the caller reuses w after it's
+ // been intentionally nil'ed out to prevent use. Keep this
+ // here to catch future refactoring breaking it.
panic("writeEndsStream called on nil writeFramer")
}
return false
@@ -7469,14 +8742,14 @@ type http2writeGoAway struct {
func (p *http2writeGoAway) writeFrame(ctx http2writeContext) error {
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
if p.code != 0 {
- ctx.Flush()
+ ctx.Flush() // ignore error: we're hanging up on them anyway
time.Sleep(50 * time.Millisecond)
ctx.CloseConn()
}
return err
}
-func (*http2writeGoAway) staysWithinBuffer(max int) bool { return false }
+func (*http2writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
type http2writeData struct {
streamID uint32
@@ -7581,7 +8854,13 @@ func http2encKV(enc *hpack.Encoder, k, v string) {
}
func (w *http2writeResHeaders) staysWithinBuffer(max int) bool {
-
+ // TODO: this is a common one. It'd be nice to return true
+ // here and get into the fast path if we could be clever and
+ // calculate the size fast enough, or at least a conservative
+ // uppper bound that usually fires. (Maybe if w.h and
+ // w.trailers are nil, so we don't need to enumerate it.)
+ // Otherwise I'm afraid that just calculating the length to
+ // answer this question would be slower than the ~2µs benefit.
return false
}
@@ -7640,7 +8919,7 @@ type http2writePushPromise struct {
}
func (w *http2writePushPromise) staysWithinBuffer(max int) bool {
-
+ // TODO: see writeResHeaders.staysWithinBuffer
return false
}
@@ -7692,7 +8971,7 @@ func (w http2write100ContinueHeadersFrame) writeFrame(ctx http2writeContext) err
}
func (w http2write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
-
+ // Sloppy but conservative:
return 9+2*(len(":status")+len("100")) <= max
}
@@ -7712,7 +8991,9 @@ func (wu http2writeWindowUpdate) writeFrame(ctx http2writeContext) error {
func http2encodeHeaders(enc *hpack.Encoder, h Header, keys []string) {
if keys == nil {
sorter := http2sorterPool.Get().(*http2sorter)
-
+ // Using defer here, since the returned keys from the
+ // sorter.Keys method is only valid until the sorter
+ // is returned:
defer http2sorterPool.Put(sorter)
keys = sorter.Keys(h)
}
@@ -7720,16 +9001,19 @@ func http2encodeHeaders(enc *hpack.Encoder, h Header, keys []string) {
vv := h[k]
k = http2lowerHeader(k)
if !http2validWireHeaderFieldName(k) {
-
+ // Skip it as backup paranoia. Per
+ // golang.org/issue/14048, these should
+ // already be rejected at a higher level.
continue
}
isTE := k == "transfer-encoding"
for _, v := range vv {
if !httplex.ValidHeaderFieldValue(v) {
-
+ // TODO: return an error? golang.org/issue/14048
+ // For now just omit it.
continue
}
-
+ // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
if isTE && v != "trailers" {
continue
}
@@ -7797,7 +9081,10 @@ type http2FrameWriteRequest struct {
func (wr http2FrameWriteRequest) StreamID() uint32 {
if wr.stream == nil {
if se, ok := wr.write.(http2StreamError); ok {
-
+ // (*serverConn).resetStream doesn't set
+ // stream because it doesn't necessarily have
+ // one. So special case this type of write
+ // message.
return se.StreamID
}
return 0
@@ -7827,11 +9114,13 @@ func (wr http2FrameWriteRequest) DataSize() int {
func (wr http2FrameWriteRequest) Consume(n int32) (http2FrameWriteRequest, http2FrameWriteRequest, int) {
var empty http2FrameWriteRequest
+ // Non-DATA frames are always consumed whole.
wd, ok := wr.write.(*http2writeData)
if !ok || len(wd.p) == 0 {
return wr, empty, 1
}
+ // Might need to split after applying limits.
allowed := wr.stream.flow.available()
if n < allowed {
allowed = n
@@ -7849,10 +9138,13 @@ func (wr http2FrameWriteRequest) Consume(n int32) (http2FrameWriteRequest, http2
write: &http2writeData{
streamID: wd.streamID,
p: wd.p[:allowed],
-
+ // Even if the original had endStream set, there
+ // are bytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false.
endStream: false,
},
-
+ // Our caller is blocking on the final DATA frame, not
+ // this intermediate frame, so no need to wait.
done: nil,
}
rest := http2FrameWriteRequest{
@@ -7867,6 +9159,8 @@ func (wr http2FrameWriteRequest) Consume(n int32) (http2FrameWriteRequest, http2
return consumed, rest, 2
}
+ // The frame is consumed whole.
+ // NB: This cast cannot overflow because allowed is <= math.MaxInt32.
wr.stream.flow.take(int32(len(wd.p)))
return wr, empty, 1
}
@@ -7893,7 +9187,7 @@ func (wr *http2FrameWriteRequest) replyToWriter(err error) {
default:
panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
}
- wr.write = nil
+ wr.write = nil // prevent use (assume it's tainted after wr.done send)
}
// writeQueue is used by implementations of WriteScheduler.
@@ -7912,7 +9206,7 @@ func (q *http2writeQueue) shift() http2FrameWriteRequest {
panic("invalid use of queue")
}
wr := q.s[0]
-
+ // TODO: less copy-happy queue.
copy(q.s, q.s[1:])
q.s[len(q.s)-1] = http2FrameWriteRequest{}
q.s = q.s[:len(q.s)-1]
@@ -7941,6 +9235,8 @@ func (q *http2writeQueue) consume(n int32) (http2FrameWriteRequest, bool) {
type http2writeQueuePool []*http2writeQueue
+// put inserts an unused writeQueue into the pool.
+
// put inserts an unused writeQueue into the pool.
func (p *http2writeQueuePool) put(q *http2writeQueue) {
for i := range q.s {
@@ -8006,11 +9302,12 @@ type http2PriorityWriteSchedulerConfig struct {
}
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
-// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3.
+// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
// If cfg is nil, default options are used.
func http2NewPriorityWriteScheduler(cfg *http2PriorityWriteSchedulerConfig) http2WriteScheduler {
if cfg == nil {
-
+ // For justification of these defaults, see:
+ // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
cfg = &http2PriorityWriteSchedulerConfig{
MaxClosedNodesInTree: 10,
MaxIdleNodesInTree: 10,
@@ -8065,7 +9362,7 @@ func (n *http2priorityNode) setParent(parent *http2priorityNode) {
if n.parent == parent {
return
}
-
+ // Unlink from current parent.
if parent := n.parent; parent != nil {
if n.prev == nil {
parent.kids = n.next
@@ -8076,7 +9373,9 @@ func (n *http2priorityNode) setParent(parent *http2priorityNode) {
n.next.prev = n.prev
}
}
-
+ // Link to new parent.
+ // If parent=nil, remove n from the tree.
+ // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
n.parent = parent
if parent == nil {
n.next = nil
@@ -8112,10 +9411,15 @@ func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2prior
return false
}
+ // Don't consider the root "open" when updating openParent since
+ // we can't send data frames on the root stream (only control frames).
if n.id != 0 {
openParent = openParent || (n.state == http2priorityNodeOpen)
}
+ // Common case: only one kid or all kids have the same weight.
+ // Some clients don't use weights; other clients (like web browsers)
+ // use mostly-linear priority trees.
w := n.kids.weight
needSort := false
for k := n.kids.next; k != nil; k = k.next {
@@ -8133,6 +9437,8 @@ func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2prior
return false
}
+ // Uncommon case: sort the child nodes. We remove the kids from the parent,
+ // then re-insert after sorting so we can reuse tmp for future sort calls.
*tmp = (*tmp)[:0]
for n.kids != nil {
*tmp = append(*tmp, n.kids)
@@ -8140,7 +9446,7 @@ func (n *http2priorityNode) walkReadyInOrder(openParent bool, tmp *[]*http2prior
}
sort.Sort(http2sortPriorityNodeSiblings(*tmp))
for i := len(*tmp) - 1; i >= 0; i-- {
- (*tmp)[i].setParent(n)
+ (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
}
for k := n.kids; k != nil; k = k.next {
if k.walkReadyInOrder(openParent, tmp, f) {
@@ -8157,7 +9463,8 @@ func (z http2sortPriorityNodeSiblings) Len() int { return len(z) }
func (z http2sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
func (z http2sortPriorityNodeSiblings) Less(i, k int) bool {
-
+ // Prefer the subtree that has sent fewer bytes relative to its weight.
+ // See sections 5.3.2 and 5.3.4.
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
if bi == 0 && bk == 0 {
@@ -8199,7 +9506,7 @@ type http2priorityWriteScheduler struct {
}
func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) {
-
+ // The stream may be currently idle but cannot be opened or closed.
if curr := ws.nodes[streamID]; curr != nil {
if curr.state != http2priorityNodeIdle {
panic(fmt.Sprintf("stream %d already opened", streamID))
@@ -8208,6 +9515,10 @@ func (ws *http2priorityWriteScheduler) OpenStream(streamID uint32, options http2
return
}
+ // RFC 7540, Section 5.3.5:
+ // "All streams are initially assigned a non-exclusive dependency on stream 0x0.
+ // Pushed streams initially depend on their associated stream. In both cases,
+ // streams are assigned a default weight of 16."
parent := ws.nodes[options.PusherID]
if parent == nil {
parent = &ws.root
@@ -8255,6 +9566,9 @@ func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority ht
panic("adjustPriority on root")
}
+ // If streamID does not exist, there are two cases:
+ // - A closed stream that has been removed (this will have ID <= maxID)
+ // - An idle stream that is being used for "grouping" (this will have ID > maxID)
n := ws.nodes[streamID]
if n == nil {
if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
@@ -8272,6 +9586,8 @@ func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority ht
ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
}
+ // Section 5.3.1: A dependency on a stream that is not currently in the tree
+ // results in that stream being given a default priority (Section 5.3.5).
parent := ws.nodes[priority.StreamDep]
if parent == nil {
n.setParent(&ws.root)
@@ -8279,10 +9595,18 @@ func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority ht
return
}
+ // Ignore if the client tries to make a node its own parent.
if n == parent {
return
}
+ // Section 5.3.3:
+ // "If a stream is made dependent on one of its own dependencies, the
+ // formerly dependent stream is first moved to be dependent on the
+ // reprioritized stream's previous parent. The moved dependency retains
+ // its weight."
+ //
+ // That is: if parent depends on n, move parent to depend on n.parent.
for x := parent.parent; x != nil; x = x.parent {
if x == n {
parent.setParent(n.parent)
@@ -8290,6 +9614,9 @@ func (ws *http2priorityWriteScheduler) AdjustStream(streamID uint32, priority ht
}
}
+ // Section 5.3.3: The exclusive flag causes the stream to become the sole
+ // dependency of its parent stream, causing other dependencies to become
+ // dependent on the exclusive stream.
if priority.Exclusive {
k := parent.kids
for k != nil {
@@ -8312,7 +9639,11 @@ func (ws *http2priorityWriteScheduler) Push(wr http2FrameWriteRequest) {
} else {
n = ws.nodes[id]
if n == nil {
-
+ // id is an idle or closed stream. wr should not be a HEADERS or
+ // DATA frame. However, wr can be a RST_STREAM. In this case, we
+ // push wr onto the root, rather than creating a new priorityNode,
+ // since RST_STREAM is tiny and the stream's priority is unknown
+ // anyway. See issue #17919.
if wr.DataSize() > 0 {
panic("add DATA on non-open stream")
}
@@ -8333,7 +9664,9 @@ func (ws *http2priorityWriteScheduler) Pop() (wr http2FrameWriteRequest, ok bool
return false
}
n.addBytes(int64(wr.DataSize()))
-
+ // If B depends on A and B continuously has data available but A
+ // does not, gradually increase the throttling limit to allow B to
+ // steal more and more bandwidth from A.
if openParent {
ws.writeThrottleLimit += 1024
if ws.writeThrottleLimit < 0 {
@@ -8352,7 +9685,7 @@ func (ws *http2priorityWriteScheduler) addClosedOrIdleNode(list *[]*http2priorit
return
}
if len(*list) == maxSize {
-
+ // Remove the oldest node, then shift left.
ws.removeNode((*list)[0])
x := (*list)[1:]
copy(*list, x)
@@ -8390,7 +9723,7 @@ type http2randomWriteScheduler struct {
}
func (ws *http2randomWriteScheduler) OpenStream(streamID uint32, options http2OpenStreamOptions) {
-
+ // no-op: idle streams are not tracked
}
func (ws *http2randomWriteScheduler) CloseStream(streamID uint32) {
@@ -8403,7 +9736,7 @@ func (ws *http2randomWriteScheduler) CloseStream(streamID uint32) {
}
func (ws *http2randomWriteScheduler) AdjustStream(streamID uint32, priority http2PriorityParam) {
-
+ // no-op: priorities are ignored
}
func (ws *http2randomWriteScheduler) Push(wr http2FrameWriteRequest) {
@@ -8421,11 +9754,11 @@ func (ws *http2randomWriteScheduler) Push(wr http2FrameWriteRequest) {
}
func (ws *http2randomWriteScheduler) Pop() (http2FrameWriteRequest, bool) {
-
+ // Control frames first.
if !ws.zero.empty() {
return ws.zero.shift(), true
}
-
+ // Iterate over all non-idle streams until finding one that can be consumed.
for _, q := range ws.sq {
if wr, ok := q.consume(math.MaxInt32); ok {
return wr, true
diff --git a/libgo/go/net/http/httptest/recorder.go b/libgo/go/net/http/httptest/recorder.go
index 5f1aa6af479..741f076b363 100644
--- a/libgo/go/net/http/httptest/recorder.go
+++ b/libgo/go/net/http/httptest/recorder.go
@@ -6,6 +6,7 @@ package httptest
import (
"bytes"
+ "fmt"
"io/ioutil"
"net/http"
"strconv"
@@ -176,7 +177,7 @@ func (rw *ResponseRecorder) Result() *http.Response {
if res.StatusCode == 0 {
res.StatusCode = 200
}
- res.Status = http.StatusText(res.StatusCode)
+ res.Status = fmt.Sprintf("%03d %s", res.StatusCode, http.StatusText(res.StatusCode))
if rw.Body != nil {
res.Body = ioutil.NopCloser(bytes.NewReader(rw.Body.Bytes()))
}
diff --git a/libgo/go/net/http/httptest/recorder_test.go b/libgo/go/net/http/httptest/recorder_test.go
index 9afba4e556a..a6259ebac71 100644
--- a/libgo/go/net/http/httptest/recorder_test.go
+++ b/libgo/go/net/http/httptest/recorder_test.go
@@ -23,7 +23,15 @@ func TestRecorder(t *testing.T) {
return nil
}
}
- hasResultStatus := func(wantCode int) checkFunc {
+ hasResultStatus := func(want string) checkFunc {
+ return func(rec *ResponseRecorder) error {
+ if rec.Result().Status != want {
+ return fmt.Errorf("Result().Status = %q; want %q", rec.Result().Status, want)
+ }
+ return nil
+ }
+ }
+ hasResultStatusCode := func(wantCode int) checkFunc {
return func(rec *ResponseRecorder) error {
if rec.Result().StatusCode != wantCode {
return fmt.Errorf("Result().StatusCode = %d; want %d", rec.Result().StatusCode, wantCode)
@@ -235,7 +243,8 @@ func TestRecorder(t *testing.T) {
hasOldHeader("X-Foo", "1"),
hasStatus(0),
hasHeader("X-Foo", "1"),
- hasResultStatus(200),
+ hasResultStatus("200 OK"),
+ hasResultStatusCode(200),
),
},
{
diff --git a/libgo/go/net/http/httptest/server.go b/libgo/go/net/http/httptest/server.go
index 5e9ace591f3..e543672b1e8 100644
--- a/libgo/go/net/http/httptest/server.go
+++ b/libgo/go/net/http/httptest/server.go
@@ -9,6 +9,7 @@ package httptest
import (
"bytes"
"crypto/tls"
+ "crypto/x509"
"flag"
"fmt"
"log"
@@ -35,6 +36,9 @@ type Server struct {
// before Start or StartTLS.
Config *http.Server
+ // certificate is a parsed version of the TLS config certificate, if present.
+ certificate *x509.Certificate
+
// wg counts the number of outstanding HTTP requests on this server.
// Close blocks until all requests are finished.
wg sync.WaitGroup
@@ -42,6 +46,10 @@ type Server struct {
mu sync.Mutex // guards closed and conns
closed bool
conns map[net.Conn]http.ConnState // except terminal states
+
+ // client is configured for use with the server.
+ // Its transport is automatically closed when Close is called.
+ client *http.Client
}
func newLocalListener() net.Listener {
@@ -93,6 +101,9 @@ func (s *Server) Start() {
if s.URL != "" {
panic("Server already started")
}
+ if s.client == nil {
+ s.client = &http.Client{Transport: &http.Transport{}}
+ }
s.URL = "http://" + s.Listener.Addr().String()
s.wrap()
s.goServe()
@@ -107,6 +118,9 @@ func (s *Server) StartTLS() {
if s.URL != "" {
panic("Server already started")
}
+ if s.client == nil {
+ s.client = &http.Client{Transport: &http.Transport{}}
+ }
cert, err := tls.X509KeyPair(internal.LocalhostCert, internal.LocalhostKey)
if err != nil {
panic(fmt.Sprintf("httptest: NewTLSServer: %v", err))
@@ -124,6 +138,17 @@ func (s *Server) StartTLS() {
if len(s.TLS.Certificates) == 0 {
s.TLS.Certificates = []tls.Certificate{cert}
}
+ s.certificate, err = x509.ParseCertificate(s.TLS.Certificates[0].Certificate[0])
+ if err != nil {
+ panic(fmt.Sprintf("httptest: NewTLSServer: %v", err))
+ }
+ certpool := x509.NewCertPool()
+ certpool.AddCert(s.certificate)
+ s.client.Transport = &http.Transport{
+ TLSClientConfig: &tls.Config{
+ RootCAs: certpool,
+ },
+ }
s.Listener = tls.NewListener(s.Listener, s.TLS)
s.URL = "https://" + s.Listener.Addr().String()
s.wrap()
@@ -186,6 +211,13 @@ func (s *Server) Close() {
t.CloseIdleConnections()
}
+ // Also close the client idle connections.
+ if s.client != nil {
+ if t, ok := s.client.Transport.(closeIdleTransport); ok {
+ t.CloseIdleConnections()
+ }
+ }
+
s.wg.Wait()
}
@@ -206,7 +238,7 @@ func (s *Server) CloseClientConnections() {
nconn := len(s.conns)
ch := make(chan struct{}, nconn)
for c := range s.conns {
- s.closeConnChan(c, ch)
+ go s.closeConnChan(c, ch)
}
s.mu.Unlock()
@@ -228,6 +260,19 @@ func (s *Server) CloseClientConnections() {
}
}
+// Certificate returns the certificate used by the server, or nil if
+// the server doesn't use TLS.
+func (s *Server) Certificate() *x509.Certificate {
+ return s.certificate
+}
+
+// Client returns an HTTP client configured for making requests to the server.
+// It is configured to trust the server's TLS test certificate and will
+// close its idle connections on Server.Close.
+func (s *Server) Client() *http.Client {
+ return s.client
+}
+
func (s *Server) goServe() {
s.wg.Add(1)
go func() {
diff --git a/libgo/go/net/http/httptest/server_test.go b/libgo/go/net/http/httptest/server_test.go
index d032c5983b7..8ab50cdb0ab 100644
--- a/libgo/go/net/http/httptest/server_test.go
+++ b/libgo/go/net/http/httptest/server_test.go
@@ -12,8 +12,48 @@ import (
"testing"
)
+type newServerFunc func(http.Handler) *Server
+
+var newServers = map[string]newServerFunc{
+ "NewServer": NewServer,
+ "NewTLSServer": NewTLSServer,
+
+ // The manual variants of newServer create a Server manually by only filling
+ // in the exported fields of Server.
+ "NewServerManual": func(h http.Handler) *Server {
+ ts := &Server{Listener: newLocalListener(), Config: &http.Server{Handler: h}}
+ ts.Start()
+ return ts
+ },
+ "NewTLSServerManual": func(h http.Handler) *Server {
+ ts := &Server{Listener: newLocalListener(), Config: &http.Server{Handler: h}}
+ ts.StartTLS()
+ return ts
+ },
+}
+
func TestServer(t *testing.T) {
- ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ for _, name := range []string{"NewServer", "NewServerManual"} {
+ t.Run(name, func(t *testing.T) {
+ newServer := newServers[name]
+ t.Run("Server", func(t *testing.T) { testServer(t, newServer) })
+ t.Run("GetAfterClose", func(t *testing.T) { testGetAfterClose(t, newServer) })
+ t.Run("ServerCloseBlocking", func(t *testing.T) { testServerCloseBlocking(t, newServer) })
+ t.Run("ServerCloseClientConnections", func(t *testing.T) { testServerCloseClientConnections(t, newServer) })
+ t.Run("ServerClientTransportType", func(t *testing.T) { testServerClientTransportType(t, newServer) })
+ })
+ }
+ for _, name := range []string{"NewTLSServer", "NewTLSServerManual"} {
+ t.Run(name, func(t *testing.T) {
+ newServer := newServers[name]
+ t.Run("ServerClient", func(t *testing.T) { testServerClient(t, newServer) })
+ t.Run("TLSServerClientTransportType", func(t *testing.T) { testTLSServerClientTransportType(t, newServer) })
+ })
+ }
+}
+
+func testServer(t *testing.T, newServer newServerFunc) {
+ ts := newServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("hello"))
}))
defer ts.Close()
@@ -22,6 +62,7 @@ func TestServer(t *testing.T) {
t.Fatal(err)
}
got, err := ioutil.ReadAll(res.Body)
+ res.Body.Close()
if err != nil {
t.Fatal(err)
}
@@ -31,8 +72,8 @@ func TestServer(t *testing.T) {
}
// Issue 12781
-func TestGetAfterClose(t *testing.T) {
- ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+func testGetAfterClose(t *testing.T, newServer newServerFunc) {
+ ts := newServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("hello"))
}))
@@ -57,8 +98,8 @@ func TestGetAfterClose(t *testing.T) {
}
}
-func TestServerCloseBlocking(t *testing.T) {
- ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+func testServerCloseBlocking(t *testing.T, newServer newServerFunc) {
+ ts := newServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("hello"))
}))
dial := func() net.Conn {
@@ -86,9 +127,9 @@ func TestServerCloseBlocking(t *testing.T) {
}
// Issue 14290
-func TestServerCloseClientConnections(t *testing.T) {
+func testServerCloseClientConnections(t *testing.T, newServer newServerFunc) {
var s *Server
- s = NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ s = newServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
s.CloseClientConnections()
}))
defer s.Close()
@@ -98,3 +139,66 @@ func TestServerCloseClientConnections(t *testing.T) {
t.Fatalf("Unexpected response: %#v", res)
}
}
+
+// Tests that the Server.Client method works and returns an http.Client that can hit
+// NewTLSServer without cert warnings.
+func testServerClient(t *testing.T, newTLSServer newServerFunc) {
+ ts := newTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("hello"))
+ }))
+ defer ts.Close()
+ client := ts.Client()
+ res, err := client.Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := ioutil.ReadAll(res.Body)
+ res.Body.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(got) != "hello" {
+ t.Errorf("got %q, want hello", string(got))
+ }
+}
+
+// Tests that the Server.Client.Transport interface is implemented
+// by a *http.Transport.
+func testServerClientTransportType(t *testing.T, newServer newServerFunc) {
+ ts := newServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ }))
+ defer ts.Close()
+ client := ts.Client()
+ if _, ok := client.Transport.(*http.Transport); !ok {
+ t.Errorf("got %T, want *http.Transport", client.Transport)
+ }
+}
+
+// Tests that the TLS Server.Client.Transport interface is implemented
+// by a *http.Transport.
+func testTLSServerClientTransportType(t *testing.T, newTLSServer newServerFunc) {
+ ts := newTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ }))
+ defer ts.Close()
+ client := ts.Client()
+ if _, ok := client.Transport.(*http.Transport); !ok {
+ t.Errorf("got %T, want *http.Transport", client.Transport)
+ }
+}
+
+type onlyCloseListener struct {
+ net.Listener
+}
+
+func (onlyCloseListener) Close() error { return nil }
+
+// Issue 19729: panic in Server.Close for values created directly
+// without a constructor (so the unexported client field is nil).
+func TestServerZeroValueClose(t *testing.T) {
+ ts := &Server{
+ Listener: onlyCloseListener{},
+ Config: &http.Server{},
+ }
+
+ ts.Close() // tests that it doesn't panic
+}
diff --git a/libgo/go/net/http/httputil/reverseproxy.go b/libgo/go/net/http/httputil/reverseproxy.go
index 79c8fe27702..0d514f529ba 100644
--- a/libgo/go/net/http/httputil/reverseproxy.go
+++ b/libgo/go/net/http/httputil/reverseproxy.go
@@ -114,6 +114,16 @@ func copyHeader(dst, src http.Header) {
}
}
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
// Hop-by-hop headers. These are removed when sent to the backend.
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
var hopHeaders = []string{
@@ -149,30 +159,21 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
}()
}
- outreq := new(http.Request)
- *outreq = *req // includes shallow copies of maps, but okay
+ outreq := req.WithContext(ctx) // includes shallow copies of maps, but okay
if req.ContentLength == 0 {
outreq.Body = nil // Issue 16036: nil Body for http.Transport retries
}
- outreq = outreq.WithContext(ctx)
+
+ outreq.Header = cloneHeader(req.Header)
p.Director(outreq)
outreq.Close = false
- // We are modifying the same underlying map from req (shallow
- // copied above) so we only copy it if necessary.
- copiedHeaders := false
-
// Remove hop-by-hop headers listed in the "Connection" header.
// See RFC 2616, section 14.10.
if c := outreq.Header.Get("Connection"); c != "" {
for _, f := range strings.Split(c, ",") {
if f = strings.TrimSpace(f); f != "" {
- if !copiedHeaders {
- outreq.Header = make(http.Header)
- copyHeader(outreq.Header, req.Header)
- copiedHeaders = true
- }
outreq.Header.Del(f)
}
}
@@ -183,11 +184,6 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
// connection, regardless of what the client sent to us.
for _, h := range hopHeaders {
if outreq.Header.Get(h) != "" {
- if !copiedHeaders {
- outreq.Header = make(http.Header)
- copyHeader(outreq.Header, req.Header)
- copiedHeaders = true
- }
outreq.Header.Del(h)
}
}
@@ -235,7 +231,8 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
// The "Trailer" header isn't included in the Transport's response,
// at least for *http.Transport. Build it up from Trailer.
- if len(res.Trailer) > 0 {
+ announcedTrailers := len(res.Trailer)
+ if announcedTrailers > 0 {
trailerKeys := make([]string, 0, len(res.Trailer))
for k := range res.Trailer {
trailerKeys = append(trailerKeys, k)
@@ -254,7 +251,18 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
}
p.copyResponse(rw, res.Body)
res.Body.Close() // close now, instead of defer, to populate res.Trailer
- copyHeader(rw.Header(), res.Trailer)
+
+ if len(res.Trailer) == announcedTrailers {
+ copyHeader(rw.Header(), res.Trailer)
+ return
+ }
+
+ for k, vv := range res.Trailer {
+ k = http.TrailerPrefix + k
+ for _, v := range vv {
+ rw.Header().Add(k, v)
+ }
+ }
}
func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) {
@@ -288,7 +296,7 @@ func (p *ReverseProxy) copyBuffer(dst io.Writer, src io.Reader, buf []byte) (int
var written int64
for {
nr, rerr := src.Read(buf)
- if rerr != nil && rerr != io.EOF {
+ if rerr != nil && rerr != io.EOF && rerr != context.Canceled {
p.logf("httputil: ReverseProxy read error during body copy: %v", rerr)
}
if nr > 0 {
diff --git a/libgo/go/net/http/httputil/reverseproxy_test.go b/libgo/go/net/http/httputil/reverseproxy_test.go
index 20c4e16bcb8..37a9992375d 100644
--- a/libgo/go/net/http/httputil/reverseproxy_test.go
+++ b/libgo/go/net/http/httputil/reverseproxy_test.go
@@ -69,6 +69,7 @@ func TestReverseProxy(t *testing.T) {
w.WriteHeader(backendStatus)
w.Write([]byte(backendResponse))
w.Header().Set("X-Trailer", "trailer_value")
+ w.Header().Set(http.TrailerPrefix+"X-Unannounced-Trailer", "unannounced_trailer_value")
}))
defer backend.Close()
backendURL, err := url.Parse(backend.URL)
@@ -79,6 +80,7 @@ func TestReverseProxy(t *testing.T) {
proxyHandler.ErrorLog = log.New(ioutil.Discard, "", 0) // quiet for tests
frontend := httptest.NewServer(proxyHandler)
defer frontend.Close()
+ frontendClient := frontend.Client()
getReq, _ := http.NewRequest("GET", frontend.URL, nil)
getReq.Host = "some-name"
@@ -86,7 +88,7 @@ func TestReverseProxy(t *testing.T) {
getReq.Header.Set("Proxy-Connection", "should be deleted")
getReq.Header.Set("Upgrade", "foo")
getReq.Close = true
- res, err := http.DefaultClient.Do(getReq)
+ res, err := frontendClient.Do(getReq)
if err != nil {
t.Fatalf("Get: %v", err)
}
@@ -121,12 +123,15 @@ func TestReverseProxy(t *testing.T) {
if g, e := res.Trailer.Get("X-Trailer"), "trailer_value"; g != e {
t.Errorf("Trailer(X-Trailer) = %q ; want %q", g, e)
}
+ if g, e := res.Trailer.Get("X-Unannounced-Trailer"), "unannounced_trailer_value"; g != e {
+ t.Errorf("Trailer(X-Unannounced-Trailer) = %q ; want %q", g, e)
+ }
// Test that a backend failing to be reached or one which doesn't return
// a response results in a StatusBadGateway.
getReq, _ = http.NewRequest("GET", frontend.URL+"/?mode=hangup", nil)
getReq.Close = true
- res, err = http.DefaultClient.Do(getReq)
+ res, err = frontendClient.Do(getReq)
if err != nil {
t.Fatal(err)
}
@@ -172,7 +177,7 @@ func TestReverseProxyStripHeadersPresentInConnection(t *testing.T) {
getReq.Header.Set("Connection", "Upgrade, "+fakeConnectionToken)
getReq.Header.Set("Upgrade", "original value")
getReq.Header.Set(fakeConnectionToken, "should be deleted")
- res, err := http.DefaultClient.Do(getReq)
+ res, err := frontend.Client().Do(getReq)
if err != nil {
t.Fatalf("Get: %v", err)
}
@@ -220,7 +225,7 @@ func TestXForwardedFor(t *testing.T) {
getReq.Header.Set("Connection", "close")
getReq.Header.Set("X-Forwarded-For", prevForwardedFor)
getReq.Close = true
- res, err := http.DefaultClient.Do(getReq)
+ res, err := frontend.Client().Do(getReq)
if err != nil {
t.Fatalf("Get: %v", err)
}
@@ -259,7 +264,7 @@ func TestReverseProxyQuery(t *testing.T) {
frontend := httptest.NewServer(NewSingleHostReverseProxy(backendURL))
req, _ := http.NewRequest("GET", frontend.URL+tt.reqSuffix, nil)
req.Close = true
- res, err := http.DefaultClient.Do(req)
+ res, err := frontend.Client().Do(req)
if err != nil {
t.Fatalf("%d. Get: %v", i, err)
}
@@ -295,7 +300,7 @@ func TestReverseProxyFlushInterval(t *testing.T) {
req, _ := http.NewRequest("GET", frontend.URL, nil)
req.Close = true
- res, err := http.DefaultClient.Do(req)
+ res, err := frontend.Client().Do(req)
if err != nil {
t.Fatalf("Get: %v", err)
}
@@ -349,13 +354,14 @@ func TestReverseProxyCancelation(t *testing.T) {
frontend := httptest.NewServer(proxyHandler)
defer frontend.Close()
+ frontendClient := frontend.Client()
getReq, _ := http.NewRequest("GET", frontend.URL, nil)
go func() {
<-reqInFlight
- http.DefaultTransport.(*http.Transport).CancelRequest(getReq)
+ frontendClient.Transport.(*http.Transport).CancelRequest(getReq)
}()
- res, err := http.DefaultClient.Do(getReq)
+ res, err := frontendClient.Do(getReq)
if res != nil {
t.Errorf("got response %v; want nil", res.Status)
}
@@ -363,7 +369,7 @@ func TestReverseProxyCancelation(t *testing.T) {
// This should be an error like:
// Get http://127.0.0.1:58079: read tcp 127.0.0.1:58079:
// use of closed network connection
- t.Error("DefaultClient.Do() returned nil error; want non-nil error")
+ t.Error("Server.Client().Do() returned nil error; want non-nil error")
}
}
@@ -428,11 +434,12 @@ func TestUserAgentHeader(t *testing.T) {
proxyHandler.ErrorLog = log.New(ioutil.Discard, "", 0) // quiet for tests
frontend := httptest.NewServer(proxyHandler)
defer frontend.Close()
+ frontendClient := frontend.Client()
getReq, _ := http.NewRequest("GET", frontend.URL, nil)
getReq.Header.Set("User-Agent", explicitUA)
getReq.Close = true
- res, err := http.DefaultClient.Do(getReq)
+ res, err := frontendClient.Do(getReq)
if err != nil {
t.Fatalf("Get: %v", err)
}
@@ -441,7 +448,7 @@ func TestUserAgentHeader(t *testing.T) {
getReq, _ = http.NewRequest("GET", frontend.URL+"/noua", nil)
getReq.Header.Set("User-Agent", "")
getReq.Close = true
- res, err = http.DefaultClient.Do(getReq)
+ res, err = frontendClient.Do(getReq)
if err != nil {
t.Fatalf("Get: %v", err)
}
@@ -493,7 +500,7 @@ func TestReverseProxyGetPutBuffer(t *testing.T) {
req, _ := http.NewRequest("GET", frontend.URL, nil)
req.Close = true
- res, err := http.DefaultClient.Do(req)
+ res, err := frontend.Client().Do(req)
if err != nil {
t.Fatalf("Get: %v", err)
}
@@ -540,7 +547,7 @@ func TestReverseProxy_Post(t *testing.T) {
defer frontend.Close()
postReq, _ := http.NewRequest("POST", frontend.URL, bytes.NewReader(requestBody))
- res, err := http.DefaultClient.Do(postReq)
+ res, err := frontend.Client().Do(postReq)
if err != nil {
t.Fatalf("Do: %v", err)
}
@@ -573,7 +580,7 @@ func TestReverseProxy_NilBody(t *testing.T) {
frontend := httptest.NewServer(proxyHandler)
defer frontend.Close()
- res, err := http.DefaultClient.Get(frontend.URL)
+ res, err := frontend.Client().Get(frontend.URL)
if err != nil {
t.Fatal(err)
}
@@ -664,3 +671,101 @@ func TestReverseProxy_CopyBuffer(t *testing.T) {
}
}
}
+
+type staticTransport struct {
+ res *http.Response
+}
+
+func (t *staticTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+ return t.res, nil
+}
+
+func BenchmarkServeHTTP(b *testing.B) {
+ res := &http.Response{
+ StatusCode: 200,
+ Body: ioutil.NopCloser(strings.NewReader("")),
+ }
+ proxy := &ReverseProxy{
+ Director: func(*http.Request) {},
+ Transport: &staticTransport{res},
+ }
+
+ w := httptest.NewRecorder()
+ r := httptest.NewRequest("GET", "/", nil)
+
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ proxy.ServeHTTP(w, r)
+ }
+}
+
+func TestServeHTTPDeepCopy(t *testing.T) {
+ backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Hello Gopher!"))
+ }))
+ defer backend.Close()
+ backendURL, err := url.Parse(backend.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ type result struct {
+ before, after string
+ }
+
+ resultChan := make(chan result, 1)
+ proxyHandler := NewSingleHostReverseProxy(backendURL)
+ frontend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ before := r.URL.String()
+ proxyHandler.ServeHTTP(w, r)
+ after := r.URL.String()
+ resultChan <- result{before: before, after: after}
+ }))
+ defer frontend.Close()
+
+ want := result{before: "/", after: "/"}
+
+ res, err := frontend.Client().Get(frontend.URL)
+ if err != nil {
+ t.Fatalf("Do: %v", err)
+ }
+ res.Body.Close()
+
+ got := <-resultChan
+ if got != want {
+ t.Errorf("got = %+v; want = %+v", got, want)
+ }
+}
+
+// Issue 18327: verify we always do a deep copy of the Request.Header map
+// before any mutations.
+func TestClonesRequestHeaders(t *testing.T) {
+ req, _ := http.NewRequest("GET", "http://foo.tld/", nil)
+ req.RemoteAddr = "1.2.3.4:56789"
+ rp := &ReverseProxy{
+ Director: func(req *http.Request) {
+ req.Header.Set("From-Director", "1")
+ },
+ Transport: roundTripperFunc(func(req *http.Request) (*http.Response, error) {
+ if v := req.Header.Get("From-Director"); v != "1" {
+ t.Errorf("From-Directory value = %q; want 1", v)
+ }
+ return nil, io.EOF
+ }),
+ }
+ rp.ServeHTTP(httptest.NewRecorder(), req)
+
+ if req.Header.Get("From-Director") == "1" {
+ t.Error("Director header mutation modified caller's request")
+ }
+ if req.Header.Get("X-Forwarded-For") != "" {
+ t.Error("X-Forward-For header mutation modified caller's request")
+ }
+
+}
+
+type roundTripperFunc func(req *http.Request) (*http.Response, error)
+
+func (fn roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) {
+ return fn(req)
+}
diff --git a/libgo/go/net/http/main_test.go b/libgo/go/net/http/main_test.go
index 438bd2e58fd..21c850566cc 100644
--- a/libgo/go/net/http/main_test.go
+++ b/libgo/go/net/http/main_test.go
@@ -37,6 +37,8 @@ func interestingGoroutines() (gs []string) {
}
stack := strings.TrimSpace(sl[1])
if stack == "" ||
+ strings.Contains(stack, "testing.(*M).before.func1") ||
+ strings.Contains(stack, "os/signal.signal_recv") ||
strings.Contains(stack, "created by net.startServer") ||
strings.Contains(stack, "created by testing.RunTests") ||
strings.Contains(stack, "closeWriteAndWait") ||
@@ -56,8 +58,9 @@ func interestingGoroutines() (gs []string) {
// Verify the other tests didn't leave any goroutines running.
func goroutineLeaked() bool {
- if testing.Short() {
- // not counting goroutines for leakage in -short mode
+ if testing.Short() || runningBenchmarks() {
+ // Don't worry about goroutine leaks in -short mode or in
+ // benchmark mode. Too distracting when there are false positives.
return false
}
@@ -92,6 +95,18 @@ func setParallel(t *testing.T) {
}
}
+func runningBenchmarks() bool {
+ for i, arg := range os.Args {
+ if strings.HasPrefix(arg, "-test.bench=") && !strings.HasSuffix(arg, "=") {
+ return true
+ }
+ if arg == "-test.bench" && i < len(os.Args)-1 && os.Args[i+1] != "" {
+ return true
+ }
+ }
+ return false
+}
+
func afterTest(t testing.TB) {
http.DefaultTransport.(*http.Transport).CloseIdleConnections()
if testing.Short() {
@@ -151,7 +166,3 @@ func waitErrCondition(waitFor, checkEvery time.Duration, fn func() error) error
}
return err
}
-
-func closeClient(c *http.Client) {
- c.Transport.(*http.Transport).CloseIdleConnections()
-}
diff --git a/libgo/go/net/http/npn_test.go b/libgo/go/net/http/npn_test.go
index 4c1f6b573df..618bdbe54a6 100644
--- a/libgo/go/net/http/npn_test.go
+++ b/libgo/go/net/http/npn_test.go
@@ -8,6 +8,7 @@ import (
"bufio"
"bytes"
"crypto/tls"
+ "crypto/x509"
"fmt"
"io"
"io/ioutil"
@@ -43,10 +44,7 @@ func TestNextProtoUpgrade(t *testing.T) {
// Normal request, without NPN.
{
- tr := newTLSTransport(t, ts)
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
-
+ c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
@@ -63,11 +61,18 @@ func TestNextProtoUpgrade(t *testing.T) {
// Request to an advertised but unhandled NPN protocol.
// Server will hang up.
{
- tr := newTLSTransport(t, ts)
- tr.TLSClientConfig.NextProtos = []string{"unhandled-proto"}
+ certPool := x509.NewCertPool()
+ certPool.AddCert(ts.Certificate())
+ tr := &Transport{
+ TLSClientConfig: &tls.Config{
+ RootCAs: certPool,
+ NextProtos: []string{"unhandled-proto"},
+ },
+ }
defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
-
+ c := &Client{
+ Transport: tr,
+ }
res, err := c.Get(ts.URL)
if err == nil {
defer res.Body.Close()
@@ -80,7 +85,8 @@ func TestNextProtoUpgrade(t *testing.T) {
// Request using the "tls-0.9" protocol, which we register here.
// It is HTTP/0.9 over TLS.
{
- tlsConfig := newTLSTransport(t, ts).TLSClientConfig
+ c := ts.Client()
+ tlsConfig := c.Transport.(*Transport).TLSClientConfig
tlsConfig.NextProtos = []string{"tls-0.9"}
conn, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig)
if err != nil {
diff --git a/libgo/go/net/http/pprof/pprof.go b/libgo/go/net/http/pprof/pprof.go
index 05d0890fdf3..12c7599ab0f 100644
--- a/libgo/go/net/http/pprof/pprof.go
+++ b/libgo/go/net/http/pprof/pprof.go
@@ -37,6 +37,11 @@
//
// wget http://localhost:6060/debug/pprof/trace?seconds=5
//
+// Or to look at the holders of contended mutexes, after calling
+// runtime.SetMutexProfileFraction in your program:
+//
+// go tool pprof http://localhost:6060/debug/pprof/mutex
+//
// To view all available profiles, open http://localhost:6060/debug/pprof/
// in your browser.
//
@@ -57,6 +62,7 @@ import (
"os"
"runtime"
"runtime/pprof"
+ "runtime/trace"
"strconv"
"strings"
"time"
@@ -89,6 +95,11 @@ func sleep(w http.ResponseWriter, d time.Duration) {
}
}
+func durationExceedsWriteTimeout(r *http.Request, seconds float64) bool {
+ srv, ok := r.Context().Value(http.ServerContextKey).(*http.Server)
+ return ok && srv.WriteTimeout != 0 && seconds >= srv.WriteTimeout.Seconds()
+}
+
// Profile responds with the pprof-formatted cpu profile.
// The package initialization registers it as /debug/pprof/profile.
func Profile(w http.ResponseWriter, r *http.Request) {
@@ -97,6 +108,14 @@ func Profile(w http.ResponseWriter, r *http.Request) {
sec = 30
}
+ if durationExceedsWriteTimeout(r, float64(sec)) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("X-Go-Pprof", "1")
+ w.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintln(w, "profile duration exceeds server's WriteTimeout")
+ return
+ }
+
// Set Content Type assuming StartCPUProfile will work,
// because if it does it starts writing.
w.Header().Set("Content-Type", "application/octet-stream")
@@ -105,6 +124,7 @@ func Profile(w http.ResponseWriter, r *http.Request) {
// Can change header back to text content
// and send error code.
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("X-Go-Pprof", "1")
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err)
return
@@ -122,20 +142,28 @@ func Trace(w http.ResponseWriter, r *http.Request) {
sec = 1
}
+ if durationExceedsWriteTimeout(r, sec) {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("X-Go-Pprof", "1")
+ w.WriteHeader(http.StatusBadRequest)
+ fmt.Fprintln(w, "profile duration exceeds server's WriteTimeout")
+ return
+ }
+
// Set Content Type assuming trace.Start will work,
// because if it does it starts writing.
w.Header().Set("Content-Type", "application/octet-stream")
- w.Write([]byte("tracing not yet supported with gccgo"))
- // if err := trace.Start(w); err != nil {
- // // trace.Start failed, so no writes yet.
- // // Can change header back to text content and send error code.
- // w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- // w.WriteHeader(http.StatusInternalServerError)
- // fmt.Fprintf(w, "Could not enable tracing: %s\n", err)
- // return
- // }
- // sleep(w, time.Duration(sec*float64(time.Second)))
- // trace.Stop()
+ if err := trace.Start(w); err != nil {
+ // trace.Start failed, so no writes yet.
+ // Can change header back to text content and send error code.
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("X-Go-Pprof", "1")
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "Could not enable tracing: %s\n", err)
+ return
+ }
+ sleep(w, time.Duration(sec*float64(time.Second)))
+ trace.Stop()
}
// Symbol looks up the program counters listed in the request,
@@ -207,7 +235,6 @@ func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
runtime.GC()
}
p.WriteTo(w, debug)
- return
}
// Index responds with the pprof-formatted profile named by the request.
diff --git a/libgo/go/net/http/proxy_test.go b/libgo/go/net/http/proxy_test.go
index 823d1447ee9..f59a551f0ac 100644
--- a/libgo/go/net/http/proxy_test.go
+++ b/libgo/go/net/http/proxy_test.go
@@ -75,7 +75,13 @@ func TestCacheKeys(t *testing.T) {
func ResetProxyEnv() {
for _, v := range []string{"HTTP_PROXY", "http_proxy", "NO_PROXY", "no_proxy"} {
- os.Setenv(v, "")
+ os.Unsetenv(v)
}
ResetCachedEnvironment()
}
+
+func TestInvalidNoProxy(t *testing.T) {
+ ResetProxyEnv()
+ os.Setenv("NO_PROXY", ":1")
+ useProxy("example.com:80") // should not panic
+}
diff --git a/libgo/go/net/http/request.go b/libgo/go/net/http/request.go
index fb6bb0aab58..13f367c1a8f 100644
--- a/libgo/go/net/http/request.go
+++ b/libgo/go/net/http/request.go
@@ -27,8 +27,6 @@ import (
"sync"
"golang_org/x/net/idna"
- "golang_org/x/text/unicode/norm"
- "golang_org/x/text/width"
)
const (
@@ -331,6 +329,16 @@ func (r *Request) WithContext(ctx context.Context) *Request {
r2 := new(Request)
*r2 = *r
r2.ctx = ctx
+
+ // Deep copy the URL because it isn't
+ // a map and the URL is mutable by users
+ // of WithContext.
+ if r.URL != nil {
+ r2URL := new(url.URL)
+ *r2URL = *r.URL
+ r2.URL = r2URL
+ }
+
return r2
}
@@ -341,18 +349,6 @@ func (r *Request) ProtoAtLeast(major, minor int) bool {
r.ProtoMajor == major && r.ProtoMinor >= minor
}
-// protoAtLeastOutgoing is like ProtoAtLeast, but is for outgoing
-// requests (see issue 18407) where these fields aren't supposed to
-// matter. As a minor fix for Go 1.8, at least treat (0, 0) as
-// matching HTTP/1.1 or HTTP/1.0. Only HTTP/1.1 is used.
-// TODO(bradfitz): ideally remove this whole method. It shouldn't be used.
-func (r *Request) protoAtLeastOutgoing(major, minor int) bool {
- if r.ProtoMajor == 0 && r.ProtoMinor == 0 && major == 1 && minor <= 1 {
- return true
- }
- return r.ProtoAtLeast(major, minor)
-}
-
// UserAgent returns the client's User-Agent, if sent in the request.
func (r *Request) UserAgent() string {
return r.Header.Get("User-Agent")
@@ -621,6 +617,9 @@ func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, wai
// Write body and trailer
err = tw.WriteBody(w)
if err != nil {
+ if tw.bodyReadError == err {
+ err = requestBodyReadError{err}
+ }
return err
}
@@ -630,17 +629,25 @@ func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header, wai
return nil
}
+// requestBodyReadError wraps an error from (*Request).write to indicate
+// that the error came from a Read call on the Request.Body.
+// This error type should not escape the net/http package to users.
+type requestBodyReadError struct{ error }
+
func idnaASCII(v string) (string, error) {
+ // TODO: Consider removing this check after verifying performance is okay.
+ // Right now punycode verification, length checks, context checks, and the
+ // permissible character tests are all omitted. It also prevents the ToASCII
+ // call from salvaging an invalid IDN, when possible. As a result it may be
+ // possible to have two IDNs that appear identical to the user where the
+ // ASCII-only version causes an error downstream whereas the non-ASCII
+ // version does not.
+ // Note that for correct ASCII IDNs ToASCII will only do considerably more
+ // work, but it will not cause an allocation.
if isASCII(v) {
return v, nil
}
- // The idna package doesn't do everything from
- // https://tools.ietf.org/html/rfc5895 so we do it here.
- // TODO(bradfitz): should the idna package do this instead?
- v = strings.ToLower(v)
- v = width.Fold.String(v)
- v = norm.NFC.String(v)
- return idna.ToASCII(v)
+ return idna.Lookup.ToASCII(v)
}
// cleanHost cleans up the host sent in request's Host header.
@@ -755,7 +762,7 @@ func validMethod(method string) bool {
// exact value (instead of -1), GetBody is populated (so 307 and 308
// redirects can replay the body), and Body is set to NoBody if the
// ContentLength is 0.
-func NewRequest(method, urlStr string, body io.Reader) (*Request, error) {
+func NewRequest(method, url string, body io.Reader) (*Request, error) {
if method == "" {
// We document that "" means "GET" for Request.Method, and people have
// relied on that from NewRequest, so keep that working.
@@ -765,7 +772,7 @@ func NewRequest(method, urlStr string, body io.Reader) (*Request, error) {
if !validMethod(method) {
return nil, fmt.Errorf("net/http: invalid method %q", method)
}
- u, err := url.Parse(urlStr)
+ u, err := parseURL(url) // Just url.Parse (url is shadowed for godoc).
if err != nil {
return nil, err
}
@@ -930,6 +937,9 @@ func readRequest(b *bufio.Reader, deleteHostHeader bool) (req *Request, err erro
if !ok {
return nil, &badStringError{"malformed HTTP request", s}
}
+ if !validMethod(req.Method) {
+ return nil, &badStringError{"invalid method", req.Method}
+ }
rawurl := req.RequestURI
if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok {
return nil, &badStringError{"malformed HTTP version", req.Proto}
@@ -1021,11 +1031,6 @@ type maxBytesReader struct {
err error // sticky error
}
-func (l *maxBytesReader) tooLarge() (n int, err error) {
- l.err = errors.New("http: request body too large")
- return 0, l.err
-}
-
func (l *maxBytesReader) Read(p []byte) (n int, err error) {
if l.err != nil {
return 0, l.err
@@ -1297,7 +1302,7 @@ func (r *Request) closeBody() {
}
func (r *Request) isReplayable() bool {
- if r.Body == nil {
+ if r.Body == nil || r.Body == NoBody || r.GetBody != nil {
switch valueOrDefault(r.Method, "GET") {
case "GET", "HEAD", "OPTIONS", "TRACE":
return true
diff --git a/libgo/go/net/http/request_test.go b/libgo/go/net/http/request_test.go
index e6748375b58..967156bac9b 100644
--- a/libgo/go/net/http/request_test.go
+++ b/libgo/go/net/http/request_test.go
@@ -7,6 +7,7 @@ package http_test
import (
"bufio"
"bytes"
+ "context"
"encoding/base64"
"fmt"
"io"
@@ -785,6 +786,28 @@ func TestMaxBytesReaderStickyError(t *testing.T) {
}
}
+func TestWithContextDeepCopiesURL(t *testing.T) {
+ req, err := NewRequest("POST", "https://golang.org/", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ reqCopy := req.WithContext(context.Background())
+ reqCopy.URL.Scheme = "http"
+
+ firstURL, secondURL := req.URL.String(), reqCopy.URL.String()
+ if firstURL == secondURL {
+ t.Errorf("unexpected change to original request's URL")
+ }
+
+ // And also check we don't crash on nil (Issue 20601)
+ req.URL = nil
+ reqCopy = req.WithContext(context.Background())
+ if reqCopy.URL != nil {
+ t.Error("expected nil URL in cloned request")
+ }
+}
+
// verify that NewRequest sets Request.GetBody and that it works
func TestNewRequestGetBody(t *testing.T) {
tests := []struct {
diff --git a/libgo/go/net/http/response.go b/libgo/go/net/http/response.go
index ae118fb386d..0357b605023 100644
--- a/libgo/go/net/http/response.go
+++ b/libgo/go/net/http/response.go
@@ -37,9 +37,10 @@ type Response struct {
// Header maps header keys to values. If the response had multiple
// headers with the same key, they may be concatenated, with comma
// delimiters. (Section 4.2 of RFC 2616 requires that multiple headers
- // be semantically equivalent to a comma-delimited sequence.) Values
- // duplicated by other fields in this struct (e.g., ContentLength) are
- // omitted from Header.
+ // be semantically equivalent to a comma-delimited sequence.) When
+ // Header values are duplicated by other fields in this struct (e.g.,
+ // ContentLength, TransferEncoding, Trailer), the field values are
+ // authoritative.
//
// Keys in the map are canonicalized (see CanonicalHeaderKey).
Header Header
@@ -152,23 +153,23 @@ func ReadResponse(r *bufio.Reader, req *Request) (*Response, error) {
}
return nil, err
}
- f := strings.SplitN(line, " ", 3)
- if len(f) < 2 {
+ if i := strings.IndexByte(line, ' '); i == -1 {
return nil, &badStringError{"malformed HTTP response", line}
+ } else {
+ resp.Proto = line[:i]
+ resp.Status = strings.TrimLeft(line[i+1:], " ")
}
- reasonPhrase := ""
- if len(f) > 2 {
- reasonPhrase = f[2]
+ statusCode := resp.Status
+ if i := strings.IndexByte(resp.Status, ' '); i != -1 {
+ statusCode = resp.Status[:i]
}
- if len(f[1]) != 3 {
- return nil, &badStringError{"malformed HTTP status code", f[1]}
+ if len(statusCode) != 3 {
+ return nil, &badStringError{"malformed HTTP status code", statusCode}
}
- resp.StatusCode, err = strconv.Atoi(f[1])
+ resp.StatusCode, err = strconv.Atoi(statusCode)
if err != nil || resp.StatusCode < 0 {
- return nil, &badStringError{"malformed HTTP status code", f[1]}
+ return nil, &badStringError{"malformed HTTP status code", statusCode}
}
- resp.Status = f[1] + " " + reasonPhrase
- resp.Proto = f[0]
var ok bool
if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok {
return nil, &badStringError{"malformed HTTP version", resp.Proto}
@@ -320,3 +321,9 @@ func (r *Response) Write(w io.Writer) error {
// Success
return nil
}
+
+func (r *Response) closeBody() {
+ if r.Body != nil {
+ r.Body.Close()
+ }
+}
diff --git a/libgo/go/net/http/response_test.go b/libgo/go/net/http/response_test.go
index 660d51791b7..f1a50bd5989 100644
--- a/libgo/go/net/http/response_test.go
+++ b/libgo/go/net/http/response_test.go
@@ -318,7 +318,7 @@ var respTests = []respTest{
{
"HTTP/1.0 303\r\n\r\n",
Response{
- Status: "303 ",
+ Status: "303",
StatusCode: 303,
Proto: "HTTP/1.0",
ProtoMajor: 1,
@@ -532,6 +532,29 @@ some body`,
},
"\x1f\x8b\b\x00\x00\x00\x00\x00\x00\x00s\xf3\xf7\a\x00\xab'\xd4\x1a\x03\x00\x00\x00",
},
+
+ // Issue 19989: two spaces between HTTP version and status.
+ {
+ "HTTP/1.0 401 Unauthorized\r\n" +
+ "Content-type: text/html\r\n" +
+ "WWW-Authenticate: Basic realm=\"\"\r\n\r\n" +
+ "Your Authentication failed.\r\n",
+ Response{
+ Status: "401 Unauthorized",
+ StatusCode: 401,
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ ProtoMinor: 0,
+ Request: dummyReq("GET"),
+ Header: Header{
+ "Content-Type": {"text/html"},
+ "Www-Authenticate": {`Basic realm=""`},
+ },
+ Close: true,
+ ContentLength: -1,
+ },
+ "Your Authentication failed.\r\n",
+ },
}
// tests successful calls to ReadResponse, and inspects the returned Response.
@@ -926,3 +949,29 @@ func TestNeedsSniff(t *testing.T) {
t.Errorf("needsSniff empty Content-Type = %t; want %t", got, want)
}
}
+
+// A response should only write out single Connection: close header. Tests #19499.
+func TestResponseWritesOnlySingleConnectionClose(t *testing.T) {
+ const connectionCloseHeader = "Connection: close"
+
+ res, err := ReadResponse(bufio.NewReader(strings.NewReader("HTTP/1.0 200 OK\r\n\r\nAAAA")), nil)
+ if err != nil {
+ t.Fatalf("ReadResponse failed %v", err)
+ }
+
+ var buf1 bytes.Buffer
+ if err = res.Write(&buf1); err != nil {
+ t.Fatalf("Write failed %v", err)
+ }
+ if res, err = ReadResponse(bufio.NewReader(&buf1), nil); err != nil {
+ t.Fatalf("ReadResponse failed %v", err)
+ }
+
+ var buf2 bytes.Buffer
+ if err = res.Write(&buf2); err != nil {
+ t.Fatalf("Write failed %v", err)
+ }
+ if count := strings.Count(buf2.String(), connectionCloseHeader); count != 1 {
+ t.Errorf("Found %d %q header", count, connectionCloseHeader)
+ }
+}
diff --git a/libgo/go/net/http/serve_test.go b/libgo/go/net/http/serve_test.go
index 73dd56e8c42..7137599c42e 100644
--- a/libgo/go/net/http/serve_test.go
+++ b/libgo/go/net/http/serve_test.go
@@ -337,6 +337,7 @@ var serveMuxTests = []struct {
{"GET", "codesearch.google.com", "/search/", 203, "codesearch.google.com/"},
{"GET", "codesearch.google.com", "/search/foo", 203, "codesearch.google.com/"},
{"GET", "codesearch.google.com", "/", 203, "codesearch.google.com/"},
+ {"GET", "codesearch.google.com:443", "/", 203, "codesearch.google.com/"},
{"GET", "images.google.com", "/search", 201, "/search"},
{"GET", "images.google.com", "/search/", 404, ""},
{"GET", "images.google.com", "/search/foo", 404, ""},
@@ -460,31 +461,86 @@ func TestMuxRedirectLeadingSlashes(t *testing.T) {
}
}
+func BenchmarkServeMux(b *testing.B) {
+
+ type test struct {
+ path string
+ code int
+ req *Request
+ }
+
+ // Build example handlers and requests
+ var tests []test
+ endpoints := []string{"search", "dir", "file", "change", "count", "s"}
+ for _, e := range endpoints {
+ for i := 200; i < 230; i++ {
+ p := fmt.Sprintf("/%s/%d/", e, i)
+ tests = append(tests, test{
+ path: p,
+ code: i,
+ req: &Request{Method: "GET", Host: "localhost", URL: &url.URL{Path: p}},
+ })
+ }
+ }
+ mux := NewServeMux()
+ for _, tt := range tests {
+ mux.Handle(tt.path, serve(tt.code))
+ }
+
+ rw := httptest.NewRecorder()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, tt := range tests {
+ *rw = httptest.ResponseRecorder{}
+ h, pattern := mux.Handler(tt.req)
+ h.ServeHTTP(rw, tt.req)
+ if pattern != tt.path || rw.Code != tt.code {
+ b.Fatalf("got %d, %q, want %d, %q", rw.Code, pattern, tt.code, tt.path)
+ }
+ }
+ }
+}
+
func TestServerTimeouts(t *testing.T) {
setParallel(t)
defer afterTest(t)
+ // Try three times, with increasing timeouts.
+ tries := []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second}
+ for i, timeout := range tries {
+ err := testServerTimeouts(timeout)
+ if err == nil {
+ return
+ }
+ t.Logf("failed at %v: %v", timeout, err)
+ if i != len(tries)-1 {
+ t.Logf("retrying at %v ...", tries[i+1])
+ }
+ }
+ t.Fatal("all attempts failed")
+}
+
+func testServerTimeouts(timeout time.Duration) error {
reqNum := 0
ts := httptest.NewUnstartedServer(HandlerFunc(func(res ResponseWriter, req *Request) {
reqNum++
fmt.Fprintf(res, "req=%d", reqNum)
}))
- ts.Config.ReadTimeout = 250 * time.Millisecond
- ts.Config.WriteTimeout = 250 * time.Millisecond
+ ts.Config.ReadTimeout = timeout
+ ts.Config.WriteTimeout = timeout
ts.Start()
defer ts.Close()
// Hit the HTTP server successfully.
- tr := &Transport{DisableKeepAlives: true} // they interfere with this test
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
r, err := c.Get(ts.URL)
if err != nil {
- t.Fatalf("http Get #1: %v", err)
+ return fmt.Errorf("http Get #1: %v", err)
}
got, err := ioutil.ReadAll(r.Body)
expected := "req=1"
if string(got) != expected || err != nil {
- t.Errorf("Unexpected response for request #1; got %q ,%v; expected %q, nil",
+ return fmt.Errorf("Unexpected response for request #1; got %q ,%v; expected %q, nil",
string(got), err, expected)
}
@@ -492,17 +548,18 @@ func TestServerTimeouts(t *testing.T) {
t1 := time.Now()
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
- t.Fatalf("Dial: %v", err)
+ return fmt.Errorf("Dial: %v", err)
}
buf := make([]byte, 1)
n, err := conn.Read(buf)
conn.Close()
latency := time.Since(t1)
if n != 0 || err != io.EOF {
- t.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, io.EOF)
+ return fmt.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, io.EOF)
}
- if latency < 200*time.Millisecond /* fudge from 250 ms above */ {
- t.Errorf("got EOF after %s, want >= %s", latency, 200*time.Millisecond)
+ minLatency := timeout / 5 * 4
+ if latency < minLatency {
+ return fmt.Errorf("got EOF after %s, want >= %s", latency, minLatency)
}
// Hit the HTTP server successfully again, verifying that the
@@ -510,29 +567,31 @@ func TestServerTimeouts(t *testing.T) {
// get "req=2", not "req=3")
r, err = c.Get(ts.URL)
if err != nil {
- t.Fatalf("http Get #2: %v", err)
+ return fmt.Errorf("http Get #2: %v", err)
}
got, err = ioutil.ReadAll(r.Body)
+ r.Body.Close()
expected = "req=2"
if string(got) != expected || err != nil {
- t.Errorf("Get #2 got %q, %v, want %q, nil", string(got), err, expected)
+ return fmt.Errorf("Get #2 got %q, %v, want %q, nil", string(got), err, expected)
}
if !testing.Short() {
conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
- t.Fatalf("Dial: %v", err)
+ return fmt.Errorf("long Dial: %v", err)
}
defer conn.Close()
go io.Copy(ioutil.Discard, conn)
for i := 0; i < 5; i++ {
_, err := conn.Write([]byte("GET / HTTP/1.1\r\nHost: foo\r\n\r\n"))
if err != nil {
- t.Fatalf("on write %d: %v", i, err)
+ return fmt.Errorf("on write %d: %v", i, err)
}
- time.Sleep(ts.Config.ReadTimeout / 2)
+ time.Sleep(timeout / 2)
}
}
+ return nil
}
// Test that the HTTP/2 server handles Server.WriteTimeout (Issue 18437)
@@ -548,12 +607,10 @@ func TestHTTP2WriteDeadlineExtendedOnNewRequest(t *testing.T) {
ts.StartTLS()
defer ts.Close()
- tr := newTLSTransport(t, ts)
- defer tr.CloseIdleConnections()
- if err := ExportHttp2ConfigureTransport(tr); err != nil {
+ c := ts.Client()
+ if err := ExportHttp2ConfigureTransport(c.Transport.(*Transport)); err != nil {
t.Fatal(err)
}
- c := &Client{Transport: tr}
for i := 1; i <= 3; i++ {
req, err := NewRequest("GET", ts.URL, nil)
@@ -585,13 +642,139 @@ func TestHTTP2WriteDeadlineExtendedOnNewRequest(t *testing.T) {
}
}
+// tryTimeouts runs testFunc with increasing timeouts. Test passes on first success,
+// and fails if all timeouts fail.
+func tryTimeouts(t *testing.T, testFunc func(timeout time.Duration) error) {
+ tries := []time.Duration{250 * time.Millisecond, 500 * time.Millisecond, 1 * time.Second}
+ for i, timeout := range tries {
+ err := testFunc(timeout)
+ if err == nil {
+ return
+ }
+ t.Logf("failed at %v: %v", timeout, err)
+ if i != len(tries)-1 {
+ t.Logf("retrying at %v ...", tries[i+1])
+ }
+ }
+ t.Fatal("all attempts failed")
+}
+
+// Test that the HTTP/2 server RSTs stream on slow write.
+func TestHTTP2WriteDeadlineEnforcedPerStream(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ setParallel(t)
+ defer afterTest(t)
+ tryTimeouts(t, testHTTP2WriteDeadlineEnforcedPerStream)
+}
+
+func testHTTP2WriteDeadlineEnforcedPerStream(timeout time.Duration) error {
+ reqNum := 0
+ ts := httptest.NewUnstartedServer(HandlerFunc(func(res ResponseWriter, req *Request) {
+ reqNum++
+ if reqNum == 1 {
+ return // first request succeeds
+ }
+ time.Sleep(timeout) // second request times out
+ }))
+ ts.Config.WriteTimeout = timeout / 2
+ ts.TLS = &tls.Config{NextProtos: []string{"h2"}}
+ ts.StartTLS()
+ defer ts.Close()
+
+ c := ts.Client()
+ if err := ExportHttp2ConfigureTransport(c.Transport.(*Transport)); err != nil {
+ return fmt.Errorf("ExportHttp2ConfigureTransport: %v", err)
+ }
+
+ req, err := NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ return fmt.Errorf("NewRequest: %v", err)
+ }
+ r, err := c.Do(req)
+ if err != nil {
+ return fmt.Errorf("http2 Get #1: %v", err)
+ }
+ r.Body.Close()
+ if r.ProtoMajor != 2 {
+ return fmt.Errorf("http2 Get expected HTTP/2.0, got %q", r.Proto)
+ }
+
+ req, err = NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ return fmt.Errorf("NewRequest: %v", err)
+ }
+ r, err = c.Do(req)
+ if err == nil {
+ r.Body.Close()
+ if r.ProtoMajor != 2 {
+ return fmt.Errorf("http2 Get expected HTTP/2.0, got %q", r.Proto)
+ }
+ return fmt.Errorf("http2 Get #2 expected error, got nil")
+ }
+ expected := "stream ID 3; INTERNAL_ERROR" // client IDs are odd, second stream should be 3
+ if !strings.Contains(err.Error(), expected) {
+ return fmt.Errorf("http2 Get #2: expected error to contain %q, got %q", expected, err)
+ }
+ return nil
+}
+
+// Test that the HTTP/2 server does not send RST when WriteDeadline not set.
+func TestHTTP2NoWriteDeadline(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+ setParallel(t)
+ defer afterTest(t)
+ tryTimeouts(t, testHTTP2NoWriteDeadline)
+}
+
+func testHTTP2NoWriteDeadline(timeout time.Duration) error {
+ reqNum := 0
+ ts := httptest.NewUnstartedServer(HandlerFunc(func(res ResponseWriter, req *Request) {
+ reqNum++
+ if reqNum == 1 {
+ return // first request succeeds
+ }
+ time.Sleep(timeout) // second request timesout
+ }))
+ ts.TLS = &tls.Config{NextProtos: []string{"h2"}}
+ ts.StartTLS()
+ defer ts.Close()
+
+ c := ts.Client()
+ if err := ExportHttp2ConfigureTransport(c.Transport.(*Transport)); err != nil {
+ return fmt.Errorf("ExportHttp2ConfigureTransport: %v", err)
+ }
+
+ for i := 0; i < 2; i++ {
+ req, err := NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ return fmt.Errorf("NewRequest: %v", err)
+ }
+ r, err := c.Do(req)
+ if err != nil {
+ return fmt.Errorf("http2 Get #%d: %v", i, err)
+ }
+ r.Body.Close()
+ if r.ProtoMajor != 2 {
+ return fmt.Errorf("http2 Get expected HTTP/2.0, got %q", r.Proto)
+ }
+ }
+ return nil
+}
+
// golang.org/issue/4741 -- setting only a write timeout that triggers
// shouldn't cause a handler to block forever on reads (next HTTP
// request) that will never happen.
func TestOnlyWriteTimeout(t *testing.T) {
setParallel(t)
defer afterTest(t)
- var conn net.Conn
+ var (
+ mu sync.RWMutex
+ conn net.Conn
+ )
var afterTimeoutErrc = make(chan error, 1)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, req *Request) {
buf := make([]byte, 512<<10)
@@ -600,17 +783,21 @@ func TestOnlyWriteTimeout(t *testing.T) {
t.Errorf("handler Write error: %v", err)
return
}
+ mu.RLock()
+ defer mu.RUnlock()
+ if conn == nil {
+ t.Error("no established connection found")
+ return
+ }
conn.SetWriteDeadline(time.Now().Add(-30 * time.Second))
_, err = w.Write(buf)
afterTimeoutErrc <- err
}))
- ts.Listener = trackLastConnListener{ts.Listener, &conn}
+ ts.Listener = trackLastConnListener{ts.Listener, &mu, &conn}
ts.Start()
defer ts.Close()
- tr := &Transport{DisableKeepAlives: false}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
errc := make(chan error)
go func() {
@@ -620,6 +807,7 @@ func TestOnlyWriteTimeout(t *testing.T) {
return
}
_, err = io.Copy(ioutil.Discard, res.Body)
+ res.Body.Close()
errc <- err
}()
select {
@@ -638,12 +826,18 @@ func TestOnlyWriteTimeout(t *testing.T) {
// trackLastConnListener tracks the last net.Conn that was accepted.
type trackLastConnListener struct {
net.Listener
+
+ mu *sync.RWMutex
last *net.Conn // destination
}
func (l trackLastConnListener) Accept() (c net.Conn, err error) {
c, err = l.Listener.Accept()
- *l.last = c
+ if err == nil {
+ l.mu.Lock()
+ *l.last = c
+ l.mu.Unlock()
+ }
return
}
@@ -671,8 +865,7 @@ func TestIdentityResponse(t *testing.T) {
ts := httptest.NewServer(handler)
defer ts.Close()
- c := &Client{Transport: new(Transport)}
- defer closeClient(c)
+ c := ts.Client()
// Note: this relies on the assumption (which is true) that
// Get sends HTTP/1.1 or greater requests. Otherwise the
@@ -936,7 +1129,6 @@ func (c *blockingRemoteAddrConn) RemoteAddr() net.Addr {
// Issue 12943
func TestServerAllowsBlockingRemoteAddr(t *testing.T) {
- setParallel(t)
defer afterTest(t)
ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {
fmt.Fprintf(w, "RA:%s", r.RemoteAddr)
@@ -949,21 +1141,22 @@ func TestServerAllowsBlockingRemoteAddr(t *testing.T) {
ts.Start()
defer ts.Close()
- tr := &Transport{DisableKeepAlives: true}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr, Timeout: time.Second}
+ c := ts.Client()
+ c.Timeout = time.Second
+ // Force separate connection for each:
+ c.Transport.(*Transport).DisableKeepAlives = true
- fetch := func(response chan string) {
+ fetch := func(num int, response chan<- string) {
resp, err := c.Get(ts.URL)
if err != nil {
- t.Error(err)
+ t.Errorf("Request %d: %v", num, err)
response <- ""
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
- t.Error(err)
+ t.Errorf("Request %d: %v", num, err)
response <- ""
return
}
@@ -972,14 +1165,14 @@ func TestServerAllowsBlockingRemoteAddr(t *testing.T) {
// Start a request. The server will block on getting conn.RemoteAddr.
response1c := make(chan string, 1)
- go fetch(response1c)
+ go fetch(1, response1c)
// Wait for the server to accept it; grab the connection.
conn1 := <-conns
// Start another request and grab its connection
response2c := make(chan string, 1)
- go fetch(response2c)
+ go fetch(2, response2c)
var conn2 net.Conn
select {
@@ -1022,9 +1215,7 @@ func TestIdentityResponseHeaders(t *testing.T) {
}))
defer ts.Close()
- c := &Client{Transport: new(Transport)}
- defer closeClient(c)
-
+ c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
t.Fatalf("Get error: %v", err)
@@ -1145,12 +1336,7 @@ func TestTLSServer(t *testing.T) {
t.Errorf("expected test TLS server to start with https://, got %q", ts.URL)
return
}
- noVerifyTransport := &Transport{
- TLSClientConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- }
- client := &Client{Transport: noVerifyTransport}
+ client := ts.Client()
res, err := client.Get(ts.URL)
if err != nil {
t.Error(err)
@@ -1171,6 +1357,59 @@ func TestTLSServer(t *testing.T) {
})
}
+func TestServeTLS(t *testing.T) {
+ // Not parallel: uses global test hooks.
+ defer afterTest(t)
+ defer SetTestHookServerServe(nil)
+
+ cert, err := tls.X509KeyPair(internal.LocalhostCert, internal.LocalhostKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tlsConf := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+
+ ln := newLocalListener(t)
+ defer ln.Close()
+ addr := ln.Addr().String()
+
+ serving := make(chan bool, 1)
+ SetTestHookServerServe(func(s *Server, ln net.Listener) {
+ serving <- true
+ })
+ handler := HandlerFunc(func(w ResponseWriter, r *Request) {})
+ s := &Server{
+ Addr: addr,
+ TLSConfig: tlsConf,
+ Handler: handler,
+ }
+ errc := make(chan error, 1)
+ go func() { errc <- s.ServeTLS(ln, "", "") }()
+ select {
+ case err := <-errc:
+ t.Fatalf("ServeTLS: %v", err)
+ case <-serving:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout")
+ }
+
+ c, err := tls.Dial("tcp", ln.Addr().String(), &tls.Config{
+ InsecureSkipVerify: true,
+ NextProtos: []string{"h2", "http/1.1"},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ if got, want := c.ConnectionState().NegotiatedProtocol, "h2"; got != want {
+ t.Errorf("NegotiatedProtocol = %q; want %q", got, want)
+ }
+ if got, want := c.ConnectionState().NegotiatedProtocolIsMutual, true; got != want {
+ t.Errorf("NegotiatedProtocolIsMutual = %v; want %v", got, want)
+ }
+}
+
// Issue 15908
func TestAutomaticHTTP2_Serve_NoTLSConfig(t *testing.T) {
testAutomaticHTTP2_Serve(t, nil, true)
@@ -1967,8 +2206,7 @@ func TestTimeoutHandlerRace(t *testing.T) {
ts := httptest.NewServer(TimeoutHandler(delayHi, 20*time.Millisecond, ""))
defer ts.Close()
- c := &Client{Transport: new(Transport)}
- defer closeClient(c)
+ c := ts.Client()
var wg sync.WaitGroup
gate := make(chan bool, 10)
@@ -2011,8 +2249,8 @@ func TestTimeoutHandlerRaceHeader(t *testing.T) {
if testing.Short() {
n = 10
}
- c := &Client{Transport: new(Transport)}
- defer closeClient(c)
+
+ c := ts.Client()
for i := 0; i < n; i++ {
gate <- true
wg.Add(1)
@@ -2099,8 +2337,7 @@ func TestTimeoutHandlerStartTimerWhenServing(t *testing.T) {
ts := httptest.NewServer(TimeoutHandler(handler, timeout, ""))
defer ts.Close()
- c := &Client{Transport: new(Transport)}
- defer closeClient(c)
+ c := ts.Client()
// Issue was caused by the timeout handler starting the timer when
// was created, not when the request. So wait for more than the timeout
@@ -2127,8 +2364,7 @@ func TestTimeoutHandlerEmptyResponse(t *testing.T) {
ts := httptest.NewServer(TimeoutHandler(handler, timeout, ""))
defer ts.Close()
- c := &Client{Transport: new(Transport)}
- defer closeClient(c)
+ c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
@@ -2364,9 +2600,7 @@ func TestServerWriteHijackZeroBytes(t *testing.T) {
ts.Start()
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
@@ -2411,8 +2645,7 @@ func TestStripPrefix(t *testing.T) {
ts := httptest.NewServer(StripPrefix("/foo", h))
defer ts.Close()
- c := &Client{Transport: new(Transport)}
- defer closeClient(c)
+ c := ts.Client()
res, err := c.Get(ts.URL + "/foo/bar")
if err != nil {
@@ -2433,6 +2666,16 @@ func TestStripPrefix(t *testing.T) {
res.Body.Close()
}
+// https://golang.org/issue/18952.
+func TestStripPrefix_notModifyRequest(t *testing.T) {
+ h := StripPrefix("/foo", NotFoundHandler())
+ req := httptest.NewRequest("GET", "/foo/bar", nil)
+ h.ServeHTTP(httptest.NewRecorder(), req)
+ if req.URL.Path != "/foo/bar" {
+ t.Errorf("StripPrefix should not modify the provided Request, but it did")
+ }
+}
+
func TestRequestLimit_h1(t *testing.T) { testRequestLimit(t, h1Mode) }
func TestRequestLimit_h2(t *testing.T) { testRequestLimit(t, h2Mode) }
func testRequestLimit(t *testing.T, h2 bool) {
@@ -3512,8 +3755,8 @@ func testTransportAndServerSharedBodyRace(t *testing.T, h2 bool) {
// Test that a hanging Request.Body.Read from another goroutine can't
// cause the Handler goroutine's Request.Body.Close to block.
+// See issue 7121.
func TestRequestBodyCloseDoesntBlock(t *testing.T) {
- t.Skipf("Skipping known issue; see golang.org/issue/7121")
if testing.Short() {
t.Skip("skipping in -short mode")
}
@@ -3644,9 +3887,7 @@ func TestServerConnState(t *testing.T) {
}
ts.Start()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
mustGet := func(url string, headers ...string) {
req, err := NewRequest("GET", url, nil)
@@ -4170,6 +4411,9 @@ func TestServerValidatesHostHeader(t *testing.T) {
// Make an exception for HTTP upgrade requests:
{"PRI * HTTP/2.0", "", 200},
+ // Also an exception for CONNECT requests: (Issue 18215)
+ {"CONNECT golang.org:443 HTTP/1.1", "", 200},
+
// But not other HTTP/2 stuff:
{"PRI / HTTP/2.0", "", 400},
{"GET / HTTP/2.0", "", 400},
@@ -4373,13 +4617,6 @@ func testServerContext_ServerContextKey(t *testing.T, h2 bool) {
if _, ok := got.(*Server); !ok {
t.Errorf("context value = %T; want *http.Server", got)
}
-
- got = ctx.Value(LocalAddrContextKey)
- if addr, ok := got.(net.Addr); !ok {
- t.Errorf("local addr value = %T; want net.Addr", got)
- } else if fmt.Sprint(addr) != r.Host {
- t.Errorf("local addr = %v; want %v", addr, r.Host)
- }
}))
defer cst.close()
res, err := cst.c.Get(cst.ts.URL)
@@ -4389,6 +4626,37 @@ func testServerContext_ServerContextKey(t *testing.T, h2 bool) {
res.Body.Close()
}
+func TestServerContext_LocalAddrContextKey_h1(t *testing.T) {
+ testServerContext_LocalAddrContextKey(t, h1Mode)
+}
+func TestServerContext_LocalAddrContextKey_h2(t *testing.T) {
+ testServerContext_LocalAddrContextKey(t, h2Mode)
+}
+func testServerContext_LocalAddrContextKey(t *testing.T, h2 bool) {
+ setParallel(t)
+ defer afterTest(t)
+ ch := make(chan interface{}, 1)
+ cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
+ ch <- r.Context().Value(LocalAddrContextKey)
+ }))
+ defer cst.close()
+ if _, err := cst.c.Head(cst.ts.URL); err != nil {
+ t.Fatal(err)
+ }
+
+ host := cst.ts.Listener.Addr().String()
+ select {
+ case got := <-ch:
+ if addr, ok := got.(net.Addr); !ok {
+ t.Errorf("local addr value = %T; want net.Addr", got)
+ } else if fmt.Sprint(addr) != host {
+ t.Errorf("local addr = %v; want %v", addr, host)
+ }
+ case <-time.After(5 * time.Second):
+ t.Error("timed out")
+ }
+}
+
// https://golang.org/issue/15960
func TestHandlerSetTransferEncodingChunked(t *testing.T) {
setParallel(t)
@@ -4481,15 +4749,9 @@ func benchmarkClientServerParallel(b *testing.B, parallelism int, useTLS bool) {
b.ResetTimer()
b.SetParallelism(parallelism)
b.RunParallel(func(pb *testing.PB) {
- noVerifyTransport := &Transport{
- TLSClientConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- }
- defer noVerifyTransport.CloseIdleConnections()
- client := &Client{Transport: noVerifyTransport}
+ c := ts.Client()
for pb.Next() {
- res, err := client.Get(ts.URL)
+ res, err := c.Get(ts.URL)
if err != nil {
b.Logf("Get: %v", err)
continue
@@ -4924,10 +5186,7 @@ func TestServerIdleTimeout(t *testing.T) {
ts.Config.IdleTimeout = 2 * time.Second
ts.Start()
defer ts.Close()
-
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
get := func() string {
res, err := c.Get(ts.URL)
@@ -4988,9 +5247,8 @@ func TestServerSetKeepAlivesEnabledClosesConns(t *testing.T) {
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
get := func() string { return get(t, c, ts.URL) }
@@ -5030,7 +5288,8 @@ func testServerShutdown(t *testing.T, h2 bool) {
defer afterTest(t)
var doShutdown func() // set later
var shutdownRes = make(chan error, 1)
- cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
+ var gotOnShutdown = make(chan struct{}, 1)
+ handler := HandlerFunc(func(w ResponseWriter, r *Request) {
go doShutdown()
// Shutdown is graceful, so it should not interrupt
// this in-flight response. Add a tiny sleep here to
@@ -5038,7 +5297,10 @@ func testServerShutdown(t *testing.T, h2 bool) {
// bugs.
time.Sleep(20 * time.Millisecond)
io.WriteString(w, r.RemoteAddr)
- }))
+ })
+ cst := newClientServerTest(t, h2, handler, func(srv *httptest.Server) {
+ srv.Config.RegisterOnShutdown(func() { gotOnShutdown <- struct{}{} })
+ })
defer cst.close()
doShutdown = func() {
@@ -5049,6 +5311,11 @@ func testServerShutdown(t *testing.T, h2 bool) {
if err := <-shutdownRes; err != nil {
t.Fatalf("Shutdown: %v", err)
}
+ select {
+ case <-gotOnShutdown:
+ case <-time.After(5 * time.Second):
+ t.Errorf("onShutdown callback not called, RegisterOnShutdown broken?")
+ }
res, err := cst.c.Get(cst.ts.URL)
if err == nil {
@@ -5109,9 +5376,7 @@ func TestServerCancelsReadTimeoutWhenIdle(t *testing.T) {
ts.Start()
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
@@ -5312,3 +5577,41 @@ func TestServerHijackGetsBackgroundByte_big(t *testing.T) {
t.Error("timeout")
}
}
+
+// Issue 18319: test that the Server validates the request method.
+func TestServerValidatesMethod(t *testing.T) {
+ tests := []struct {
+ method string
+ want int
+ }{
+ {"GET", 200},
+ {"GE(T", 400},
+ }
+ for _, tt := range tests {
+ conn := &testConn{closec: make(chan bool, 1)}
+ io.WriteString(&conn.readBuf, tt.method+" / HTTP/1.1\r\nHost: foo.example\r\n\r\n")
+
+ ln := &oneConnListener{conn}
+ go Serve(ln, serve(200))
+ <-conn.closec
+ res, err := ReadResponse(bufio.NewReader(&conn.writeBuf), nil)
+ if err != nil {
+ t.Errorf("For %s, ReadResponse: %v", tt.method, res)
+ continue
+ }
+ if res.StatusCode != tt.want {
+ t.Errorf("For %s, Status = %d; want %d", tt.method, res.StatusCode, tt.want)
+ }
+ }
+}
+
+func BenchmarkResponseStatusLine(b *testing.B) {
+ b.ReportAllocs()
+ b.RunParallel(func(pb *testing.PB) {
+ bw := bufio.NewWriter(ioutil.Discard)
+ var buf3 [3]byte
+ for pb.Next() {
+ Export_writeStatusLine(bw, true, 200, buf3[:])
+ }
+ })
+}
diff --git a/libgo/go/net/http/server.go b/libgo/go/net/http/server.go
index df70a15193b..2fa8ab23d8a 100644
--- a/libgo/go/net/http/server.go
+++ b/libgo/go/net/http/server.go
@@ -75,9 +75,10 @@ var (
// If ServeHTTP panics, the server (the caller of ServeHTTP) assumes
// that the effect of the panic was isolated to the active request.
// It recovers the panic, logs a stack trace to the server error log,
-// and hangs up the connection. To abort a handler so the client sees
-// an interrupted response but the server doesn't log an error, panic
-// with the value ErrAbortHandler.
+// and either closes the network connection or sends an HTTP/2
+// RST_STREAM, depending on the HTTP protocol. To abort a handler so
+// the client sees an interrupted response but the server doesn't log
+// an error, panic with the value ErrAbortHandler.
type Handler interface {
ServeHTTP(ResponseWriter, *Request)
}
@@ -177,6 +178,9 @@ type Hijacker interface {
//
// The returned bufio.Reader may contain unprocessed buffered
// data from the client.
+ //
+ // After a call to Hijack, the original Request.Body should
+ // not be used.
Hijack() (net.Conn, *bufio.ReadWriter, error)
}
@@ -439,9 +443,10 @@ type response struct {
handlerDone atomicBool // set true when the handler exits
- // Buffers for Date and Content-Length
- dateBuf [len(TimeFormat)]byte
- clenBuf [10]byte
+ // Buffers for Date, Content-Length, and status code
+ dateBuf [len(TimeFormat)]byte
+ clenBuf [10]byte
+ statusBuf [3]byte
// closeNotifyCh is the channel returned by CloseNotify.
// TODO(bradfitz): this is currently (for Go 1.8) always
@@ -622,7 +627,6 @@ type connReader struct {
mu sync.Mutex // guards following
hasByte bool
byteBuf [1]byte
- bgErr error // non-nil means error happened on background read
cond *sync.Cond
inRead bool
aborted bool // set true before conn.rwc deadline is set to past
@@ -731,11 +735,6 @@ func (cr *connReader) Read(p []byte) (n int, err error) {
cr.unlock()
return 0, io.EOF
}
- if cr.bgErr != nil {
- err = cr.bgErr
- cr.unlock()
- return 0, err
- }
if len(p) == 0 {
cr.unlock()
return 0, nil
@@ -839,7 +838,7 @@ func (srv *Server) initialReadLimitSize() int64 {
return int64(srv.maxHeaderBytes()) + 4096 // bufio slop
}
-// wrapper around io.ReaderCloser which on first read, sends an
+// wrapper around io.ReadCloser which on first read, sends an
// HTTP/1.1 100 Continue header
type expectContinueReader struct {
resp *response
@@ -948,7 +947,7 @@ func (c *conn) readRequest(ctx context.Context) (w *response, err error) {
hosts, haveHost := req.Header["Host"]
isH2Upgrade := req.isH2Upgrade()
- if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade {
+ if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" {
return nil, badRequestError("missing required Host header")
}
if len(hosts) > 1 {
@@ -1379,7 +1378,7 @@ func (cw *chunkWriter) writeHeader(p []byte) {
}
}
- w.conn.bufw.WriteString(statusLine(w.req, code))
+ writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:])
cw.header.WriteSubset(w.conn.bufw, excludeHeader)
setHeader.Write(w.conn.bufw)
w.conn.bufw.Write(crlf)
@@ -1403,49 +1402,25 @@ func foreachHeaderElement(v string, fn func(string)) {
}
}
-// statusLines is a cache of Status-Line strings, keyed by code (for
-// HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a
-// map keyed by struct of two fields. This map's max size is bounded
-// by 2*len(statusText), two protocol types for each known official
-// status code in the statusText map.
-var (
- statusMu sync.RWMutex
- statusLines = make(map[int]string)
-)
-
-// statusLine returns a response Status-Line (RFC 2616 Section 6.1)
-// for the given request and response status code.
-func statusLine(req *Request, code int) string {
- // Fast path:
- key := code
- proto11 := req.ProtoAtLeast(1, 1)
- if !proto11 {
- key = -key
- }
- statusMu.RLock()
- line, ok := statusLines[key]
- statusMu.RUnlock()
- if ok {
- return line
- }
-
- // Slow path:
- proto := "HTTP/1.0"
- if proto11 {
- proto = "HTTP/1.1"
- }
- codestring := fmt.Sprintf("%03d", code)
- text, ok := statusText[code]
- if !ok {
- text = "status code " + codestring
+// writeStatusLine writes an HTTP/1.x Status-Line (RFC 2616 Section 6.1)
+// to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0.
+// code is the response status code.
+// scratch is an optional scratch buffer. If it has at least capacity 3, it's used.
+func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {
+ if is11 {
+ bw.WriteString("HTTP/1.1 ")
+ } else {
+ bw.WriteString("HTTP/1.0 ")
}
- line = proto + " " + codestring + " " + text + "\r\n"
- if ok {
- statusMu.Lock()
- defer statusMu.Unlock()
- statusLines[key] = line
+ if text, ok := statusText[code]; ok {
+ bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10))
+ bw.WriteByte(' ')
+ bw.WriteString(text)
+ bw.WriteString("\r\n")
+ } else {
+ // don't worry about performance
+ fmt.Fprintf(bw, "%03d status code %d\r\n", code, code)
}
- return line
}
// bodyAllowed reports whether a Write is allowed for this response type.
@@ -1714,6 +1689,7 @@ func isCommonNetReadError(err error) bool {
// Serve a new connection.
func (c *conn) serve(ctx context.Context) {
c.remoteAddr = c.rwc.RemoteAddr().String()
+ ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr())
defer func() {
if err := recover(); err != nil && err != ErrAbortHandler {
const size = 64 << 10
@@ -1973,8 +1949,12 @@ func StripPrefix(prefix string, h Handler) Handler {
}
return HandlerFunc(func(w ResponseWriter, r *Request) {
if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) {
- r.URL.Path = p
- h.ServeHTTP(w, r)
+ r2 := new(Request)
+ *r2 = *r
+ r2.URL = new(url.URL)
+ *r2.URL = *r.URL
+ r2.URL.Path = p
+ h.ServeHTTP(w, r2)
} else {
NotFound(w, r)
}
@@ -1986,8 +1966,9 @@ func StripPrefix(prefix string, h Handler) Handler {
//
// The provided code should be in the 3xx range and is usually
// StatusMovedPermanently, StatusFound or StatusSeeOther.
-func Redirect(w ResponseWriter, r *Request, urlStr string, code int) {
- if u, err := url.Parse(urlStr); err == nil {
+func Redirect(w ResponseWriter, r *Request, url string, code int) {
+ // parseURL is just url.Parse (url is shadowed for godoc).
+ if u, err := parseURL(url); err == nil {
// If url was relative, make absolute by
// combining with request path.
// The browser would probably do this for us,
@@ -2011,39 +1992,43 @@ func Redirect(w ResponseWriter, r *Request, urlStr string, code int) {
}
// no leading http://server
- if urlStr == "" || urlStr[0] != '/' {
+ if url == "" || url[0] != '/' {
// make relative path absolute
olddir, _ := path.Split(oldpath)
- urlStr = olddir + urlStr
+ url = olddir + url
}
var query string
- if i := strings.Index(urlStr, "?"); i != -1 {
- urlStr, query = urlStr[:i], urlStr[i:]
+ if i := strings.Index(url, "?"); i != -1 {
+ url, query = url[:i], url[i:]
}
// clean up but preserve trailing slash
- trailing := strings.HasSuffix(urlStr, "/")
- urlStr = path.Clean(urlStr)
- if trailing && !strings.HasSuffix(urlStr, "/") {
- urlStr += "/"
+ trailing := strings.HasSuffix(url, "/")
+ url = path.Clean(url)
+ if trailing && !strings.HasSuffix(url, "/") {
+ url += "/"
}
- urlStr += query
+ url += query
}
}
- w.Header().Set("Location", hexEscapeNonASCII(urlStr))
+ w.Header().Set("Location", hexEscapeNonASCII(url))
w.WriteHeader(code)
// RFC 2616 recommends that a short note "SHOULD" be included in the
// response because older user agents may not understand 301/307.
// Shouldn't send the response for POST or HEAD; that leaves GET.
if r.Method == "GET" {
- note := "
" + statusText[code] + ".\n"
+ note := "
" + statusText[code] + ".\n"
fmt.Fprintln(w, note)
}
}
+// parseURL is just url.Parse. It exists only so that url.Parse can be called
+// in places where url is shadowed for godoc. See https://golang.org/cl/49930.
+var parseURL = url.Parse
+
var htmlReplacer = strings.NewReplacer(
"&", "&",
"<", "<",
@@ -2163,9 +2148,29 @@ func cleanPath(p string) string {
return np
}
-// Find a handler on a handler map given a path string
-// Most-specific (longest) pattern wins
+// stripHostPort returns h without any trailing ":
".
+func stripHostPort(h string) string {
+ // If no port on host, return unchanged
+ if strings.IndexByte(h, ':') == -1 {
+ return h
+ }
+ host, _, err := net.SplitHostPort(h)
+ if err != nil {
+ return h // on error, return unchanged
+ }
+ return host
+}
+
+// Find a handler on a handler map given a path string.
+// Most-specific (longest) pattern wins.
func (mux *ServeMux) match(path string) (h Handler, pattern string) {
+ // Check for exact match first.
+ v, ok := mux.m[path]
+ if ok {
+ return v.h, v.pattern
+ }
+
+ // Check for longest valid match.
var n = 0
for k, v := range mux.m {
if !pathMatch(k, path) {
@@ -2184,7 +2189,10 @@ func (mux *ServeMux) match(path string) (h Handler, pattern string) {
// consulting r.Method, r.Host, and r.URL.Path. It always returns
// a non-nil handler. If the path is not in its canonical form, the
// handler will be an internally-generated handler that redirects
-// to the canonical path.
+// to the canonical path. If the host contains a port, it is ignored
+// when matching handlers.
+//
+// The path and host are used unchanged for CONNECT requests.
//
// Handler also returns the registered pattern that matches the
// request or, in the case of internally-generated redirects,
@@ -2193,16 +2201,24 @@ func (mux *ServeMux) match(path string) (h Handler, pattern string) {
// If there is no registered handler that applies to the request,
// Handler returns a ``page not found'' handler and an empty pattern.
func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) {
- if r.Method != "CONNECT" {
- if p := cleanPath(r.URL.Path); p != r.URL.Path {
- _, pattern = mux.handler(r.Host, p)
- url := *r.URL
- url.Path = p
- return RedirectHandler(url.String(), StatusMovedPermanently), pattern
- }
+
+ // CONNECT requests are not canonicalized.
+ if r.Method == "CONNECT" {
+ return mux.handler(r.Host, r.URL.Path)
}
- return mux.handler(r.Host, r.URL.Path)
+ // All other requests have any port stripped and path cleaned
+ // before passing to mux.handler.
+ host := stripHostPort(r.Host)
+ path := cleanPath(r.URL.Path)
+ if path != r.URL.Path {
+ _, pattern = mux.handler(host, path)
+ url := *r.URL
+ url.Path = path
+ return RedirectHandler(url.String(), StatusMovedPermanently), pattern
+ }
+
+ return mux.handler(host, r.URL.Path)
}
// handler is the main implementation of Handler.
@@ -2307,12 +2323,27 @@ func Serve(l net.Listener, handler Handler) error {
return srv.Serve(l)
}
+// Serve accepts incoming HTTPS connections on the listener l,
+// creating a new service goroutine for each. The service goroutines
+// read requests and then call handler to reply to them.
+//
+// Handler is typically nil, in which case the DefaultServeMux is used.
+//
+// Additionally, files containing a certificate and matching private key
+// for the server must be provided. If the certificate is signed by a
+// certificate authority, the certFile should be the concatenation
+// of the server's certificate, any intermediates, and the CA's certificate.
+func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error {
+ srv := &Server{Handler: handler}
+ return srv.ServeTLS(l, certFile, keyFile)
+}
+
// A Server defines parameters for running an HTTP server.
// The zero value for Server is a valid configuration.
type Server struct {
Addr string // TCP address to listen on, ":http" if empty
Handler Handler // handler to invoke, http.DefaultServeMux if nil
- TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS
+ TLSConfig *tls.Config // optional TLS config, used by ServeTLS and ListenAndServeTLS
// ReadTimeout is the maximum duration for reading the entire
// request, including the body.
@@ -2338,7 +2369,7 @@ type Server struct {
// IdleTimeout is the maximum amount of time to wait for the
// next request when keep-alives are enabled. If IdleTimeout
// is zero, the value of ReadTimeout is used. If both are
- // zero, there is no timeout.
+ // zero, ReadHeaderTimeout is used.
IdleTimeout time.Duration
// MaxHeaderBytes controls the maximum number of bytes the
@@ -2379,6 +2410,7 @@ type Server struct {
listeners map[net.Listener]struct{}
activeConn map[*conn]struct{}
doneChan chan struct{}
+ onShutdown []func()
}
func (s *Server) getDoneChan() <-chan struct{} {
@@ -2441,7 +2473,12 @@ var shutdownPollInterval = 500 * time.Millisecond
// listeners, then closing all idle connections, and then waiting
// indefinitely for connections to return to idle and then shut down.
// If the provided context expires before the shutdown is complete,
-// then the context's error is returned.
+// Shutdown returns the context's error, otherwise it returns any
+// error returned from closing the Server's underlying Listener(s).
+//
+// When Shutdown is called, Serve, ListenAndServe, and
+// ListenAndServeTLS immediately return ErrServerClosed. Make sure the
+// program doesn't exit and waits instead for Shutdown to return.
//
// Shutdown does not attempt to close nor wait for hijacked
// connections such as WebSockets. The caller of Shutdown should
@@ -2454,6 +2491,9 @@ func (srv *Server) Shutdown(ctx context.Context) error {
srv.mu.Lock()
lnerr := srv.closeListenersLocked()
srv.closeDoneChanLocked()
+ for _, f := range srv.onShutdown {
+ go f()
+ }
srv.mu.Unlock()
ticker := time.NewTicker(shutdownPollInterval)
@@ -2470,6 +2510,17 @@ func (srv *Server) Shutdown(ctx context.Context) error {
}
}
+// RegisterOnShutdown registers a function to call on Shutdown.
+// This can be used to gracefully shutdown connections that have
+// undergone NPN/ALPN protocol upgrade or that have been hijacked.
+// This function should start protocol-specific graceful shutdown,
+// but should not wait for shutdown to complete.
+func (srv *Server) RegisterOnShutdown(f func()) {
+ srv.mu.Lock()
+ srv.onShutdown = append(srv.onShutdown, f)
+ srv.mu.Unlock()
+}
+
// closeIdleConns closes all idle connections and reports whether the
// server is quiescent.
func (s *Server) closeIdleConns() bool {
@@ -2609,6 +2660,8 @@ func (srv *Server) shouldConfigureHTTP2ForServe() bool {
return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS)
}
+// ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe,
+// and ListenAndServeTLS methods after a call to Shutdown or Close.
var ErrServerClosed = errors.New("http: Server closed")
// Serve accepts incoming connections on the Listener l, creating a
@@ -2638,7 +2691,6 @@ func (srv *Server) Serve(l net.Listener) error {
baseCtx := context.Background() // base is always background, per Issue 16220
ctx := context.WithValue(baseCtx, ServerContextKey, srv)
- ctx = context.WithValue(ctx, LocalAddrContextKey, l.Addr())
for {
rw, e := l.Accept()
if e != nil {
@@ -2669,6 +2721,49 @@ func (srv *Server) Serve(l net.Listener) error {
}
}
+// ServeTLS accepts incoming connections on the Listener l, creating a
+// new service goroutine for each. The service goroutines read requests and
+// then call srv.Handler to reply to them.
+//
+// Additionally, files containing a certificate and matching private key for
+// the server must be provided if neither the Server's TLSConfig.Certificates
+// nor TLSConfig.GetCertificate are populated.. If the certificate is signed by
+// a certificate authority, the certFile should be the concatenation of the
+// server's certificate, any intermediates, and the CA's certificate.
+//
+// For HTTP/2 support, srv.TLSConfig should be initialized to the
+// provided listener's TLS Config before calling Serve. If
+// srv.TLSConfig is non-nil and doesn't include the string "h2" in
+// Config.NextProtos, HTTP/2 support is not enabled.
+//
+// ServeTLS always returns a non-nil error. After Shutdown or Close, the
+// returned error is ErrServerClosed.
+func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error {
+ // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
+ // before we clone it and create the TLS Listener.
+ if err := srv.setupHTTP2_ServeTLS(); err != nil {
+ return err
+ }
+
+ config := cloneTLSConfig(srv.TLSConfig)
+ if !strSliceContains(config.NextProtos, "http/1.1") {
+ config.NextProtos = append(config.NextProtos, "http/1.1")
+ }
+
+ configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
+ if !configHasCert || certFile != "" || keyFile != "" {
+ var err error
+ config.Certificates = make([]tls.Certificate, 1)
+ config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return err
+ }
+ }
+
+ tlsListener := tls.NewListener(l, config)
+ return srv.Serve(tlsListener)
+}
+
func (s *Server) trackListener(ln net.Listener, add bool) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -2840,47 +2935,25 @@ func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error {
addr = ":https"
}
- // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig
- // before we clone it and create the TLS Listener.
- if err := srv.setupHTTP2_ListenAndServeTLS(); err != nil {
- return err
- }
-
- config := cloneTLSConfig(srv.TLSConfig)
- if !strSliceContains(config.NextProtos, "http/1.1") {
- config.NextProtos = append(config.NextProtos, "http/1.1")
- }
-
- configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil
- if !configHasCert || certFile != "" || keyFile != "" {
- var err error
- config.Certificates = make([]tls.Certificate, 1)
- config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return err
- }
- }
-
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
- tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config)
- return srv.Serve(tlsListener)
+ return srv.ServeTLS(tcpKeepAliveListener{ln.(*net.TCPListener)}, certFile, keyFile)
}
-// setupHTTP2_ListenAndServeTLS conditionally configures HTTP/2 on
+// setupHTTP2_ServeTLS conditionally configures HTTP/2 on
// srv and returns whether there was an error setting it up. If it is
// not configured for policy reasons, nil is returned.
-func (srv *Server) setupHTTP2_ListenAndServeTLS() error {
+func (srv *Server) setupHTTP2_ServeTLS() error {
srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults)
return srv.nextProtoErr
}
// setupHTTP2_Serve is called from (*Server).Serve and conditionally
// configures HTTP/2 on srv using a more conservative policy than
-// setupHTTP2_ListenAndServeTLS because Serve may be called
+// setupHTTP2_ServeTLS because Serve may be called
// concurrently.
//
// The tests named TestTransportAutomaticHTTP2* and
@@ -2907,7 +2980,10 @@ func (srv *Server) onceSetNextProtoDefaults() {
// Enable HTTP/2 by default if the user hasn't otherwise
// configured their TLSNextProto map.
if srv.TLSNextProto == nil {
- srv.nextProtoErr = http2ConfigureServer(srv, nil)
+ conf := &http2Server{
+ NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) },
+ }
+ srv.nextProtoErr = http2ConfigureServer(srv, conf)
}
}
diff --git a/libgo/go/net/http/sniff.go b/libgo/go/net/http/sniff.go
index 0d21b44a560..ecc65e4de64 100644
--- a/libgo/go/net/http/sniff.go
+++ b/libgo/go/net/http/sniff.go
@@ -107,8 +107,8 @@ var sniffSignatures = []sniffSig{
ct: "audio/basic",
},
&maskedSig{
- mask: []byte("OggS\x00"),
- pat: []byte("\x4F\x67\x67\x53\x00"),
+ mask: []byte("\xFF\xFF\xFF\xFF\xFF"),
+ pat: []byte("OggS\x00"),
ct: "application/ogg",
},
&maskedSig{
diff --git a/libgo/go/net/http/sniff_test.go b/libgo/go/net/http/sniff_test.go
index 38f3f8197e9..24f1298e5d9 100644
--- a/libgo/go/net/http/sniff_test.go
+++ b/libgo/go/net/http/sniff_test.go
@@ -45,7 +45,11 @@ var sniffTests = []struct {
{"WAV audio #1", []byte("RIFFb\xb8\x00\x00WAVEfmt \x12\x00\x00\x00\x06"), "audio/wave"},
{"WAV audio #2", []byte("RIFF,\x00\x00\x00WAVEfmt \x12\x00\x00\x00\x06"), "audio/wave"},
{"AIFF audio #1", []byte("FORM\x00\x00\x00\x00AIFFCOMM\x00\x00\x00\x12\x00\x01\x00\x00\x57\x55\x00\x10\x40\x0d\xf3\x34"), "audio/aiff"},
+
{"OGG audio", []byte("OggS\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x7e\x46\x00\x00\x00\x00\x00\x00\x1f\xf6\xb4\xfc\x01\x1e\x01\x76\x6f\x72"), "application/ogg"},
+ {"Must not match OGG", []byte("owow\x00"), "application/octet-stream"},
+ {"Must not match OGG", []byte("oooS\x00"), "application/octet-stream"},
+ {"Must not match OGG", []byte("oggS\x00"), "application/octet-stream"},
// Video types.
{"MP4 video", []byte("\x00\x00\x00\x18ftypmp42\x00\x00\x00\x00mp42isom<\x06t\xbfmdat"), "video/mp4"},
diff --git a/libgo/go/net/http/transfer.go b/libgo/go/net/http/transfer.go
index 4f47637aa76..8faff2d74a6 100644
--- a/libgo/go/net/http/transfer.go
+++ b/libgo/go/net/http/transfer.go
@@ -51,6 +51,19 @@ func (br *byteReader) Read(p []byte) (n int, err error) {
return 1, io.EOF
}
+// transferBodyReader is an io.Reader that reads from tw.Body
+// and records any non-EOF error in tw.bodyReadError.
+// It is exactly 1 pointer wide to avoid allocations into interfaces.
+type transferBodyReader struct{ tw *transferWriter }
+
+func (br transferBodyReader) Read(p []byte) (n int, err error) {
+ n, err = br.tw.Body.Read(p)
+ if err != nil && err != io.EOF {
+ br.tw.bodyReadError = err
+ }
+ return
+}
+
// transferWriter inspects the fields of a user-supplied Request or Response,
// sanitizes them without changing the user object and provides methods for
// writing the respective header, body and trailer in wire format.
@@ -62,8 +75,10 @@ type transferWriter struct {
ContentLength int64 // -1 means unknown, 0 means exactly none
Close bool
TransferEncoding []string
+ Header Header
Trailer Header
IsResponse bool
+ bodyReadError error // any non-EOF error from reading Body
FlushHeaders bool // flush headers to network before body
ByteReadCh chan readResult // non-nil if probeRequestBody called
@@ -82,14 +97,15 @@ func newTransferWriter(r interface{}) (t *transferWriter, err error) {
t.Method = valueOrDefault(rr.Method, "GET")
t.Close = rr.Close
t.TransferEncoding = rr.TransferEncoding
+ t.Header = rr.Header
t.Trailer = rr.Trailer
- atLeastHTTP11 = rr.protoAtLeastOutgoing(1, 1)
t.Body = rr.Body
t.BodyCloser = rr.Body
t.ContentLength = rr.outgoingLength()
- if t.ContentLength < 0 && len(t.TransferEncoding) == 0 && atLeastHTTP11 && t.shouldSendChunkedRequestBody() {
+ if t.ContentLength < 0 && len(t.TransferEncoding) == 0 && t.shouldSendChunkedRequestBody() {
t.TransferEncoding = []string{"chunked"}
}
+ atLeastHTTP11 = true // Transport requests are always 1.1 or 2.0
case *Response:
t.IsResponse = true
if rr.Request != nil {
@@ -100,6 +116,7 @@ func newTransferWriter(r interface{}) (t *transferWriter, err error) {
t.ContentLength = rr.ContentLength
t.Close = rr.Close
t.TransferEncoding = rr.TransferEncoding
+ t.Header = rr.Header
t.Trailer = rr.Trailer
atLeastHTTP11 = rr.ProtoAtLeast(1, 1)
t.ResponseToHEAD = noResponseBodyExpected(t.Method)
@@ -252,7 +269,7 @@ func (t *transferWriter) shouldSendContentLength() bool {
}
func (t *transferWriter) WriteHeader(w io.Writer) error {
- if t.Close {
+ if t.Close && !hasToken(t.Header.get("Connection"), "close") {
if _, err := io.WriteString(w, "Connection: close\r\n"); err != nil {
return err
}
@@ -304,24 +321,25 @@ func (t *transferWriter) WriteBody(w io.Writer) error {
// Write body
if t.Body != nil {
+ var body = transferBodyReader{t}
if chunked(t.TransferEncoding) {
if bw, ok := w.(*bufio.Writer); ok && !t.IsResponse {
w = &internal.FlushAfterChunkWriter{Writer: bw}
}
cw := internal.NewChunkedWriter(w)
- _, err = io.Copy(cw, t.Body)
+ _, err = io.Copy(cw, body)
if err == nil {
err = cw.Close()
}
} else if t.ContentLength == -1 {
- ncopy, err = io.Copy(w, t.Body)
+ ncopy, err = io.Copy(w, body)
} else {
- ncopy, err = io.Copy(w, io.LimitReader(t.Body, t.ContentLength))
+ ncopy, err = io.Copy(w, io.LimitReader(body, t.ContentLength))
if err != nil {
return err
}
var nextra int64
- nextra, err = io.Copy(ioutil.Discard, t.Body)
+ nextra, err = io.Copy(ioutil.Discard, body)
ncopy += nextra
}
if err != nil {
diff --git a/libgo/go/net/http/transport.go b/libgo/go/net/http/transport.go
index 571943d6e5c..6a89392a996 100644
--- a/libgo/go/net/http/transport.go
+++ b/libgo/go/net/http/transport.go
@@ -29,6 +29,7 @@ import (
"time"
"golang_org/x/net/lex/httplex"
+ "golang_org/x/net/proxy"
)
// DefaultTransport is the default implementation of Transport and is
@@ -88,6 +89,11 @@ type Transport struct {
// Proxy specifies a function to return a proxy for a given
// Request. If the function returns a non-nil error, the
// request is aborted with the provided error.
+ //
+ // The proxy type is determined by the URL scheme. "http"
+ // and "socks5" are supported. If the scheme is empty,
+ // "http" is assumed.
+ //
// If Proxy is nil or returns a nil *URL, no proxy is used.
Proxy func(*Request) (*url.URL, error)
@@ -275,13 +281,17 @@ func ProxyFromEnvironment(req *Request) (*url.URL, error) {
return nil, nil
}
proxyURL, err := url.Parse(proxy)
- if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") {
+ if err != nil ||
+ (proxyURL.Scheme != "http" &&
+ proxyURL.Scheme != "https" &&
+ proxyURL.Scheme != "socks5") {
// proxy was bogus. Try prepending "http://" to it and
// see if that parses correctly. If not, we fall
// through and complain about the original one.
if proxyURL, err := url.Parse("http://" + proxy); err == nil {
return proxyURL, nil
}
+
}
if err != nil {
return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
@@ -298,11 +308,15 @@ func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) {
}
// transportRequest is a wrapper around a *Request that adds
-// optional extra headers to write.
+// optional extra headers to write and stores any error to return
+// from roundTrip.
type transportRequest struct {
*Request // original request, not to be mutated
extra Header // extra headers to write, or nil
trace *httptrace.ClientTrace // optional
+
+ mu sync.Mutex // guards err
+ err error // first setError value for mapRoundTripError to consider
}
func (tr *transportRequest) extraHeaders() Header {
@@ -312,6 +326,14 @@ func (tr *transportRequest) extraHeaders() Header {
return tr.extra
}
+func (tr *transportRequest) setError(err error) {
+ tr.mu.Lock()
+ if tr.err == nil {
+ tr.err = err
+ }
+ tr.mu.Unlock()
+}
+
// RoundTrip implements the RoundTripper interface.
//
// For higher-level HTTP client support (such as handling of cookies
@@ -402,6 +424,18 @@ func (t *Transport) RoundTrip(req *Request) (*Response, error) {
return nil, err
}
testHookRoundTripRetried()
+
+ // Rewind the body if we're able to. (HTTP/2 does this itself so we only
+ // need to do it for HTTP/1.1 connections.)
+ if req.GetBody != nil && pconn.alt == nil {
+ newReq := *req
+ var err error
+ newReq.Body, err = req.GetBody()
+ if err != nil {
+ return nil, err
+ }
+ req = &newReq
+ }
}
}
@@ -433,8 +467,9 @@ func (pc *persistConn) shouldRetryRequest(req *Request, err error) bool {
return false
}
if _, ok := err.(nothingWrittenError); ok {
- // We never wrote anything, so it's safe to retry.
- return true
+ // We never wrote anything, so it's safe to retry, if there's no body or we
+ // can "rewind" the body with GetBody.
+ return req.outgoingLength() == 0 || req.GetBody != nil
}
if !req.isReplayable() {
// Don't retry non-idempotent requests.
@@ -788,7 +823,7 @@ func (t *Transport) removeIdleConnLocked(pconn *persistConn) {
}
t.idleLRU.remove(pconn)
key := pconn.cacheKey
- pconns, _ := t.idleConn[key]
+ pconns := t.idleConn[key]
switch len(pconns) {
case 0:
// Nothing
@@ -964,6 +999,23 @@ func (t *Transport) getConn(treq *transportRequest, cm connectMethod) (*persistC
}
}
+type oneConnDialer <-chan net.Conn
+
+func newOneConnDialer(c net.Conn) proxy.Dialer {
+ ch := make(chan net.Conn, 1)
+ ch <- c
+ return oneConnDialer(ch)
+}
+
+func (d oneConnDialer) Dial(network, addr string) (net.Conn, error) {
+ select {
+ case c := <-d:
+ return c, nil
+ default:
+ return nil, io.EOF
+ }
+}
+
func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (*persistConn, error) {
pconn := &persistConn{
t: t,
@@ -1020,6 +1072,23 @@ func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (*persistCon
switch {
case cm.proxyURL == nil:
// Do nothing. Not using a proxy.
+ case cm.proxyURL.Scheme == "socks5":
+ conn := pconn.conn
+ var auth *proxy.Auth
+ if u := cm.proxyURL.User; u != nil {
+ auth = &proxy.Auth{}
+ auth.User = u.Username()
+ auth.Password, _ = u.Password()
+ }
+ p, err := proxy.SOCKS5("", cm.addr(), auth, newOneConnDialer(conn))
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ if _, err := p.Dial("tcp", cm.targetAddr); err != nil {
+ conn.Close()
+ return nil, err
+ }
case cm.targetScheme == "http":
pconn.isProxy = true
if pa := cm.proxyAuth(); pa != "" {
@@ -1176,6 +1245,10 @@ func useProxy(addr string) bool {
if addr == p {
return false
}
+ if len(p) == 0 {
+ // There is no host part, likely the entry is malformed; ignore.
+ continue
+ }
if p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:]) {
// no_proxy ".foo.com" matches "bar.foo.com" or "foo.com"
return false
@@ -1193,19 +1266,21 @@ func useProxy(addr string) bool {
//
// A connect method may be of the following types:
//
-// Cache key form Description
-// ----------------- -------------------------
-// |http|foo.com http directly to server, no proxy
-// |https|foo.com https directly to server, no proxy
-// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com
-// http://proxy.com|http http to proxy, http to anywhere after that
+// Cache key form Description
+// ----------------- -------------------------
+// |http|foo.com http directly to server, no proxy
+// |https|foo.com https directly to server, no proxy
+// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com
+// http://proxy.com|http http to proxy, http to anywhere after that
+// socks5://proxy.com|http|foo.com socks5 to proxy, then http to foo.com
+// socks5://proxy.com|https|foo.com socks5 to proxy, then https to foo.com
//
// Note: no support to https to the proxy yet.
//
type connectMethod struct {
proxyURL *url.URL // nil for no proxy, else full proxy URL
targetScheme string // "http" or "https"
- targetAddr string // Not used if proxy + http targetScheme (4th example in table)
+ targetAddr string // Not used if http proxy + http targetScheme (4th example in table)
}
func (cm *connectMethod) key() connectMethodKey {
@@ -1213,7 +1288,7 @@ func (cm *connectMethod) key() connectMethodKey {
targetAddr := cm.targetAddr
if cm.proxyURL != nil {
proxyStr = cm.proxyURL.String()
- if cm.targetScheme == "http" {
+ if strings.HasPrefix(cm.proxyURL.Scheme, "http") && cm.targetScheme == "http" {
targetAddr = ""
}
}
@@ -1379,63 +1454,53 @@ func (pc *persistConn) closeConnIfStillIdle() {
pc.close(errIdleConnTimeout)
}
-// mapRoundTripErrorFromReadLoop maps the provided readLoop error into
-// the error value that should be returned from persistConn.roundTrip.
+// mapRoundTripError returns the appropriate error value for
+// persistConn.roundTrip.
+//
+// The provided err is the first error that (*persistConn).roundTrip
+// happened to receive from its select statement.
//
// The startBytesWritten value should be the value of pc.nwrite before the roundTrip
// started writing the request.
-func (pc *persistConn) mapRoundTripErrorFromReadLoop(req *Request, startBytesWritten int64, err error) (out error) {
+func (pc *persistConn) mapRoundTripError(req *transportRequest, startBytesWritten int64, err error) error {
if err == nil {
return nil
}
- if err := pc.canceled(); err != nil {
- return err
+
+ // If the request was canceled, that's better than network
+ // failures that were likely the result of tearing down the
+ // connection.
+ if cerr := pc.canceled(); cerr != nil {
+ return cerr
+ }
+
+ // See if an error was set explicitly.
+ req.mu.Lock()
+ reqErr := req.err
+ req.mu.Unlock()
+ if reqErr != nil {
+ return reqErr
}
+
if err == errServerClosedIdle {
+ // Don't decorate
return err
}
+
if _, ok := err.(transportReadFromServerError); ok {
+ // Don't decorate
return err
}
if pc.isBroken() {
<-pc.writeLoopDone
- if pc.nwrite == startBytesWritten && req.outgoingLength() == 0 {
+ if pc.nwrite == startBytesWritten {
return nothingWrittenError{err}
}
+ return fmt.Errorf("net/http: HTTP/1.x transport connection broken: %v", err)
}
return err
}
-// mapRoundTripErrorAfterClosed returns the error value to be propagated
-// up to Transport.RoundTrip method when persistConn.roundTrip sees
-// its pc.closech channel close, indicating the persistConn is dead.
-// (after closech is closed, pc.closed is valid).
-func (pc *persistConn) mapRoundTripErrorAfterClosed(req *Request, startBytesWritten int64) error {
- if err := pc.canceled(); err != nil {
- return err
- }
- err := pc.closed
- if err == errServerClosedIdle {
- // Don't decorate
- return err
- }
- if _, ok := err.(transportReadFromServerError); ok {
- // Don't decorate
- return err
- }
-
- // Wait for the writeLoop goroutine to terminated, and then
- // see if we actually managed to write anything. If not, we
- // can retry the request.
- <-pc.writeLoopDone
- if pc.nwrite == startBytesWritten && req.outgoingLength() == 0 {
- return nothingWrittenError{err}
- }
-
- return fmt.Errorf("net/http: HTTP/1.x transport connection broken: %v", err)
-
-}
-
func (pc *persistConn) readLoop() {
closeErr := errReadLoopExiting // default value, if not changed below
defer func() {
@@ -1497,16 +1562,6 @@ func (pc *persistConn) readLoop() {
err = fmt.Errorf("net/http: server response headers exceeded %d bytes; aborted", pc.maxHeaderResponseSize())
}
- // If we won't be able to retry this request later (from the
- // roundTrip goroutine), mark it as done now.
- // BEFORE the send on rc.ch, as the client might re-use the
- // same *Request pointer, and we don't want to set call
- // t.setReqCanceler from this persistConn while the Transport
- // potentially spins up a different persistConn for the
- // caller's subsequent request.
- if !pc.shouldRetryRequest(rc.req, err) {
- pc.t.setReqCanceler(rc.req, nil)
- }
select {
case rc.ch <- responseAndError{err: err}:
case <-rc.callerGone:
@@ -1579,7 +1634,7 @@ func (pc *persistConn) readLoop() {
}
resp.Body = body
- if rc.addedGzip && resp.Header.Get("Content-Encoding") == "gzip" {
+ if rc.addedGzip && strings.EqualFold(resp.Header.Get("Content-Encoding"), "gzip") {
resp.Body = &gzipReader{body: body}
resp.Header.Del("Content-Encoding")
resp.Header.Del("Content-Length")
@@ -1705,12 +1760,23 @@ func (pc *persistConn) writeLoop() {
case wr := <-pc.writech:
startBytesWritten := pc.nwrite
err := wr.req.Request.write(pc.bw, pc.isProxy, wr.req.extra, pc.waitForContinue(wr.continueCh))
+ if bre, ok := err.(requestBodyReadError); ok {
+ err = bre.error
+ // Errors reading from the user's
+ // Request.Body are high priority.
+ // Set it here before sending on the
+ // channels below or calling
+ // pc.close() which tears town
+ // connections and causes other
+ // errors.
+ wr.req.setError(err)
+ }
if err == nil {
err = pc.bw.Flush()
}
if err != nil {
wr.req.Request.closeBody()
- if pc.nwrite == startBytesWritten && wr.req.outgoingLength() == 0 {
+ if pc.nwrite == startBytesWritten {
err = nothingWrittenError{err}
}
}
@@ -1872,6 +1938,14 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err
gone := make(chan struct{})
defer close(gone)
+ defer func() {
+ if err != nil {
+ pc.t.setReqCanceler(req.Request, nil)
+ }
+ }()
+
+ const debugRoundTrip = false
+
// Write the request concurrently with waiting for a response,
// in case the server decides to reply before reading our full
// request body.
@@ -1888,38 +1962,50 @@ func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err err
callerGone: gone,
}
- var re responseAndError
var respHeaderTimer <-chan time.Time
cancelChan := req.Request.Cancel
ctxDoneChan := req.Context().Done()
-WaitResponse:
for {
testHookWaitResLoop()
select {
case err := <-writeErrCh:
+ if debugRoundTrip {
+ req.logf("writeErrCh resv: %T/%#v", err, err)
+ }
if err != nil {
- if cerr := pc.canceled(); cerr != nil {
- err = cerr
- }
- re = responseAndError{err: err}
pc.close(fmt.Errorf("write error: %v", err))
- break WaitResponse
+ return nil, pc.mapRoundTripError(req, startBytesWritten, err)
}
if d := pc.t.ResponseHeaderTimeout; d > 0 {
+ if debugRoundTrip {
+ req.logf("starting timer for %v", d)
+ }
timer := time.NewTimer(d)
defer timer.Stop() // prevent leaks
respHeaderTimer = timer.C
}
case <-pc.closech:
- re = responseAndError{err: pc.mapRoundTripErrorAfterClosed(req.Request, startBytesWritten)}
- break WaitResponse
+ if debugRoundTrip {
+ req.logf("closech recv: %T %#v", pc.closed, pc.closed)
+ }
+ return nil, pc.mapRoundTripError(req, startBytesWritten, pc.closed)
case <-respHeaderTimer:
+ if debugRoundTrip {
+ req.logf("timeout waiting for response headers.")
+ }
pc.close(errTimeout)
- re = responseAndError{err: errTimeout}
- break WaitResponse
- case re = <-resc:
- re.err = pc.mapRoundTripErrorFromReadLoop(req.Request, startBytesWritten, re.err)
- break WaitResponse
+ return nil, errTimeout
+ case re := <-resc:
+ if (re.res == nil) == (re.err == nil) {
+ panic(fmt.Sprintf("internal error: exactly one of res or err should be set; nil=%v", re.res == nil))
+ }
+ if debugRoundTrip {
+ req.logf("resc recv: %p, %T/%#v", re.res, re.err, re.err)
+ }
+ if re.err != nil {
+ return nil, pc.mapRoundTripError(req, startBytesWritten, re.err)
+ }
+ return re.res, nil
case <-cancelChan:
pc.t.CancelRequest(req.Request)
cancelChan = nil
@@ -1929,14 +2015,16 @@ WaitResponse:
ctxDoneChan = nil
}
}
+}
- if re.err != nil {
- pc.t.setReqCanceler(req.Request, nil)
- }
- if (re.res == nil) == (re.err == nil) {
- panic("internal error: exactly one of res or err should be set")
+// tLogKey is a context WithValue key for test debugging contexts containing
+// a t.Logf func. See export_test.go's Request.WithT method.
+type tLogKey struct{}
+
+func (r *transportRequest) logf(format string, args ...interface{}) {
+ if logf, ok := r.Request.Context().Value(tLogKey{}).(func(string, ...interface{})); ok {
+ logf(time.Now().Format(time.RFC3339Nano)+": "+format, args...)
}
- return re.res, re.err
}
// markReused marks this connection as having been successfully used for a
@@ -1982,8 +2070,9 @@ func (pc *persistConn) closeLocked(err error) {
}
var portMap = map[string]string{
- "http": "80",
- "https": "443",
+ "http": "80",
+ "https": "443",
+ "socks5": "1080",
}
// canonicalAddr returns url.Host but always with a ":port" suffix
diff --git a/libgo/go/net/http/transport_internal_test.go b/libgo/go/net/http/transport_internal_test.go
index 3d24fc127d4..594bf6e2c83 100644
--- a/libgo/go/net/http/transport_internal_test.go
+++ b/libgo/go/net/http/transport_internal_test.go
@@ -9,6 +9,7 @@ package http
import (
"errors"
"net"
+ "strings"
"testing"
)
@@ -30,6 +31,7 @@ func TestTransportPersistConnReadLoopEOF(t *testing.T) {
tr := new(Transport)
req, _ := NewRequest("GET", "http://"+ln.Addr().String(), nil)
+ req = req.WithT(t)
treq := &transportRequest{Request: req}
cm := connectMethod{targetScheme: "http", targetAddr: ln.Addr().String()}
pc, err := tr.getConn(treq, cm)
@@ -47,13 +49,13 @@ func TestTransportPersistConnReadLoopEOF(t *testing.T) {
_, err = pc.roundTrip(treq)
if !isTransportReadFromServerError(err) && err != errServerClosedIdle {
- t.Fatalf("roundTrip = %#v, %v; want errServerClosedConn or errServerClosedIdle", err, err)
+ t.Errorf("roundTrip = %#v, %v; want errServerClosedIdle or transportReadFromServerError", err, err)
}
<-pc.closech
err = pc.closed
if !isTransportReadFromServerError(err) && err != errServerClosedIdle {
- t.Fatalf("pc.closed = %#v, %v; want errServerClosedConn or errServerClosedIdle", err, err)
+ t.Errorf("pc.closed = %#v, %v; want errServerClosedIdle or transportReadFromServerError", err, err)
}
}
@@ -80,6 +82,19 @@ func dummyRequest(method string) *Request {
}
return req
}
+func dummyRequestWithBody(method string) *Request {
+ req, err := NewRequest(method, "http://fake.tld/", strings.NewReader("foo"))
+ if err != nil {
+ panic(err)
+ }
+ return req
+}
+
+func dummyRequestWithBodyNoGetBody(method string) *Request {
+ req := dummyRequestWithBody(method)
+ req.GetBody = nil
+ return req
+}
func TestTransportShouldRetryRequest(t *testing.T) {
tests := []struct {
@@ -131,6 +146,18 @@ func TestTransportShouldRetryRequest(t *testing.T) {
err: errServerClosedIdle,
want: true,
},
+ 7: {
+ pc: &persistConn{reused: true},
+ req: dummyRequestWithBody("POST"),
+ err: nothingWrittenError{},
+ want: true,
+ },
+ 8: {
+ pc: &persistConn{reused: true},
+ req: dummyRequestWithBodyNoGetBody("POST"),
+ err: nothingWrittenError{},
+ want: false,
+ },
}
for i, tt := range tests {
got := tt.pc.shouldRetryRequest(tt.req, tt.err)
diff --git a/libgo/go/net/http/transport_test.go b/libgo/go/net/http/transport_test.go
index a58b1839cc6..27b55dca2f3 100644
--- a/libgo/go/net/http/transport_test.go
+++ b/libgo/go/net/http/transport_test.go
@@ -16,6 +16,7 @@ import (
"context"
"crypto/rand"
"crypto/tls"
+ "encoding/binary"
"errors"
"fmt"
"internal/nettrace"
@@ -130,11 +131,9 @@ func TestTransportKeepAlives(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
+ c := ts.Client()
for _, disableKeepAlive := range []bool{false, true} {
- tr := &Transport{DisableKeepAlives: disableKeepAlive}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
-
+ c.Transport.(*Transport).DisableKeepAlives = disableKeepAlive
fetch := func(n int) string {
res, err := c.Get(ts.URL)
if err != nil {
@@ -165,12 +164,11 @@ func TestTransportConnectionCloseOnResponse(t *testing.T) {
connSet, testDial := makeTestDial(t)
- for _, connectionClose := range []bool{false, true} {
- tr := &Transport{
- Dial: testDial,
- }
- c := &Client{Transport: tr}
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
+ tr.Dial = testDial
+ for _, connectionClose := range []bool{false, true} {
fetch := func(n int) string {
req := new(Request)
var err error
@@ -216,12 +214,10 @@ func TestTransportConnectionCloseOnRequest(t *testing.T) {
connSet, testDial := makeTestDial(t)
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
+ tr.Dial = testDial
for _, connectionClose := range []bool{false, true} {
- tr := &Transport{
- Dial: testDial,
- }
- c := &Client{Transport: tr}
-
fetch := func(n int) string {
req := new(Request)
var err error
@@ -272,10 +268,9 @@ func TestTransportConnectionCloseOnRequestDisableKeepAlive(t *testing.T) {
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
- tr := &Transport{
- DisableKeepAlives: true,
- }
- c := &Client{Transport: tr}
+ c := ts.Client()
+ c.Transport.(*Transport).DisableKeepAlives = true
+
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
@@ -290,9 +285,8 @@ func TestTransportIdleCacheKeys(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
-
- tr := &Transport{DisableKeepAlives: false}
- c := &Client{Transport: tr}
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g {
t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g)
@@ -384,9 +378,11 @@ func TestTransportMaxPerHostIdleConns(t *testing.T) {
}
}))
defer ts.Close()
+
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
maxIdleConnsPerHost := 2
- tr := &Transport{DisableKeepAlives: false, MaxIdleConnsPerHost: maxIdleConnsPerHost}
- c := &Client{Transport: tr}
+ tr.MaxIdleConnsPerHost = maxIdleConnsPerHost
// Start 3 outstanding requests and wait for the server to get them.
// Their responses will hang until we write to resch, though.
@@ -449,9 +445,8 @@ func TestTransportRemovesDeadIdleConnections(t *testing.T) {
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
doReq := func(name string) string {
// Do a POST instead of a GET to prevent the Transport's
@@ -495,9 +490,7 @@ func TestTransportServerClosingUnexpectedly(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(hostPortHandler)
defer ts.Close()
-
- tr := &Transport{}
- c := &Client{Transport: tr}
+ c := ts.Client()
fetch := func(n, retries int) string {
condFatalf := func(format string, arg ...interface{}) {
@@ -563,10 +556,7 @@ func TestStressSurpriseServerCloses(t *testing.T) {
conn.Close()
}))
defer ts.Close()
-
- tr := &Transport{DisableKeepAlives: false}
- c := &Client{Transport: tr}
- defer tr.CloseIdleConnections()
+ c := ts.Client()
// Do a bunch of traffic from different goroutines. Send to activityc
// after each request completes, regardless of whether it failed.
@@ -619,9 +609,8 @@ func TestTransportHeadResponses(t *testing.T) {
w.WriteHeader(200)
}))
defer ts.Close()
+ c := ts.Client()
- tr := &Transport{DisableKeepAlives: false}
- c := &Client{Transport: tr}
for i := 0; i < 2; i++ {
res, err := c.Head(ts.URL)
if err != nil {
@@ -655,10 +644,7 @@ func TestTransportHeadChunkedResponse(t *testing.T) {
w.WriteHeader(200)
}))
defer ts.Close()
-
- tr := &Transport{DisableKeepAlives: false}
- c := &Client{Transport: tr}
- defer tr.CloseIdleConnections()
+ c := ts.Client()
// Ensure that we wait for the readLoop to complete before
// calling Head again
@@ -719,6 +705,7 @@ func TestRoundTripGzip(t *testing.T) {
}
}))
defer ts.Close()
+ tr := ts.Client().Transport.(*Transport)
for i, test := range roundTripTests {
// Test basic request (no accept-encoding)
@@ -726,7 +713,7 @@ func TestRoundTripGzip(t *testing.T) {
if test.accept != "" {
req.Header.Set("Accept-Encoding", test.accept)
}
- res, err := DefaultTransport.RoundTrip(req)
+ res, err := tr.RoundTrip(req)
var body []byte
if test.compressed {
var r *gzip.Reader
@@ -791,10 +778,9 @@ func TestTransportGzip(t *testing.T) {
gz.Close()
}))
defer ts.Close()
+ c := ts.Client()
for _, chunked := range []string{"1", "0"} {
- c := &Client{Transport: &Transport{}}
-
// First fetch something large, but only read some of it.
res, err := c.Get(ts.URL + "/?body=large&chunked=" + chunked)
if err != nil {
@@ -844,7 +830,6 @@ func TestTransportGzip(t *testing.T) {
}
// And a HEAD request too, because they're always weird.
- c := &Client{Transport: &Transport{}}
res, err := c.Head(ts.URL)
if err != nil {
t.Fatalf("Head: %v", err)
@@ -914,11 +899,13 @@ func TestTransportExpect100Continue(t *testing.T) {
{path: "/timeout", body: []byte("hello"), sent: 5, status: 200}, // Timeout exceeded and entire body is sent.
}
+ c := ts.Client()
for i, v := range tests {
- tr := &Transport{ExpectContinueTimeout: 2 * time.Second}
+ tr := &Transport{
+ ExpectContinueTimeout: 2 * time.Second,
+ }
defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
-
+ c.Transport = tr
body := bytes.NewReader(v.body)
req, err := NewRequest("PUT", ts.URL+v.path, body)
if err != nil {
@@ -943,6 +930,99 @@ func TestTransportExpect100Continue(t *testing.T) {
}
}
+func TestSocks5Proxy(t *testing.T) {
+ defer afterTest(t)
+ ch := make(chan string, 1)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ ch <- "real server"
+ }))
+ defer ts.Close()
+ l := newLocalListener(t)
+ defer l.Close()
+ go func() {
+ defer close(ch)
+ s, err := l.Accept()
+ if err != nil {
+ t.Errorf("socks5 proxy Accept(): %v", err)
+ return
+ }
+ defer s.Close()
+ var buf [22]byte
+ if _, err := io.ReadFull(s, buf[:3]); err != nil {
+ t.Errorf("socks5 proxy initial read: %v", err)
+ return
+ }
+ if want := []byte{5, 1, 0}; !bytes.Equal(buf[:3], want) {
+ t.Errorf("socks5 proxy initial read: got %v, want %v", buf[:3], want)
+ return
+ }
+ if _, err := s.Write([]byte{5, 0}); err != nil {
+ t.Errorf("socks5 proxy initial write: %v", err)
+ return
+ }
+ if _, err := io.ReadFull(s, buf[:4]); err != nil {
+ t.Errorf("socks5 proxy second read: %v", err)
+ return
+ }
+ if want := []byte{5, 1, 0}; !bytes.Equal(buf[:3], want) {
+ t.Errorf("socks5 proxy second read: got %v, want %v", buf[:3], want)
+ return
+ }
+ var ipLen int
+ switch buf[3] {
+ case 1:
+ ipLen = 4
+ case 4:
+ ipLen = 16
+ default:
+ t.Fatalf("socks5 proxy second read: unexpected address type %v", buf[4])
+ }
+ if _, err := io.ReadFull(s, buf[4:ipLen+6]); err != nil {
+ t.Errorf("socks5 proxy address read: %v", err)
+ return
+ }
+ ip := net.IP(buf[4 : ipLen+4])
+ port := binary.BigEndian.Uint16(buf[ipLen+4 : ipLen+6])
+ copy(buf[:3], []byte{5, 0, 0})
+ if _, err := s.Write(buf[:ipLen+6]); err != nil {
+ t.Errorf("socks5 proxy connect write: %v", err)
+ return
+ }
+ done := make(chan struct{})
+ srv := &Server{Handler: HandlerFunc(func(w ResponseWriter, r *Request) {
+ done <- struct{}{}
+ })}
+ srv.Serve(&oneConnListener{conn: s})
+ <-done
+ srv.Shutdown(context.Background())
+ ch <- fmt.Sprintf("proxy for %s:%d", ip, port)
+ }()
+
+ pu, err := url.Parse("socks5://" + l.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := ts.Client()
+ c.Transport.(*Transport).Proxy = ProxyURL(pu)
+ if _, err := c.Head(ts.URL); err != nil {
+ t.Error(err)
+ }
+ var got string
+ select {
+ case got = <-ch:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout connecting to socks5 proxy")
+ }
+ tsu, err := url.Parse(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "proxy for " + tsu.Host
+ if got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
func TestTransportProxy(t *testing.T) {
defer afterTest(t)
ch := make(chan string, 1)
@@ -959,12 +1039,20 @@ func TestTransportProxy(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- c := &Client{Transport: &Transport{Proxy: ProxyURL(pu)}}
- c.Head(ts.URL)
- got := <-ch
+ c := ts.Client()
+ c.Transport.(*Transport).Proxy = ProxyURL(pu)
+ if _, err := c.Head(ts.URL); err != nil {
+ t.Error(err)
+ }
+ var got string
+ select {
+ case got = <-ch:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout connecting to http proxy")
+ }
want := "proxy for " + ts.URL + "/"
if got != want {
- t.Errorf("want %q, got %q", want, got)
+ t.Errorf("got %q, want %q", got, want)
}
}
@@ -1022,9 +1110,7 @@ func TestTransportGzipRecursive(t *testing.T) {
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
@@ -1052,9 +1138,7 @@ func TestTransportGzipShort(t *testing.T) {
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
@@ -1095,9 +1179,8 @@ func TestTransportPersistConnLeak(t *testing.T) {
w.WriteHeader(204)
}))
defer ts.Close()
-
- tr := &Transport{}
- c := &Client{Transport: tr}
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
n0 := runtime.NumGoroutine()
@@ -1160,9 +1243,8 @@ func TestTransportPersistConnLeakShortBody(t *testing.T) {
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
}))
defer ts.Close()
-
- tr := &Transport{}
- c := &Client{Transport: tr}
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
n0 := runtime.NumGoroutine()
body := []byte("Hello")
@@ -1194,8 +1276,7 @@ func TestTransportPersistConnLeakShortBody(t *testing.T) {
// This used to crash; https://golang.org/issue/3266
func TestTransportIdleConnCrash(t *testing.T) {
defer afterTest(t)
- tr := &Transport{}
- c := &Client{Transport: tr}
+ var tr *Transport
unblockCh := make(chan bool, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
@@ -1203,6 +1284,8 @@ func TestTransportIdleConnCrash(t *testing.T) {
tr.CloseIdleConnections()
}))
defer ts.Close()
+ c := ts.Client()
+ tr = c.Transport.(*Transport)
didreq := make(chan bool)
go func() {
@@ -1232,8 +1315,7 @@ func TestIssue3644(t *testing.T) {
}
}))
defer ts.Close()
- tr := &Transport{}
- c := &Client{Transport: tr}
+ c := ts.Client()
res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
@@ -1258,8 +1340,7 @@ func TestIssue3595(t *testing.T) {
Error(w, deniedMsg, StatusUnauthorized)
}))
defer ts.Close()
- tr := &Transport{}
- c := &Client{Transport: tr}
+ c := ts.Client()
res, err := c.Post(ts.URL, "application/octet-stream", neverEnding('a'))
if err != nil {
t.Errorf("Post: %v", err)
@@ -1283,8 +1364,8 @@ func TestChunkedNoContent(t *testing.T) {
}))
defer ts.Close()
+ c := ts.Client()
for _, closeBody := range []bool{true, false} {
- c := &Client{Transport: &Transport{}}
const n = 4
for i := 1; i <= n; i++ {
res, err := c.Get(ts.URL)
@@ -1324,10 +1405,7 @@ func TestTransportConcurrency(t *testing.T) {
SetPendingDialHooks(func() { wg.Add(1) }, wg.Done)
defer SetPendingDialHooks(nil, nil)
- tr := &Transport{}
- defer tr.CloseIdleConnections()
-
- c := &Client{Transport: tr}
+ c := ts.Client()
reqs := make(chan string)
defer close(reqs)
@@ -1369,23 +1447,20 @@ func TestIssue4191_InfiniteGetTimeout(t *testing.T) {
io.Copy(w, neverEnding('a'))
})
ts := httptest.NewServer(mux)
+ defer ts.Close()
timeout := 100 * time.Millisecond
- client := &Client{
- Transport: &Transport{
- Dial: func(n, addr string) (net.Conn, error) {
- conn, err := net.Dial(n, addr)
- if err != nil {
- return nil, err
- }
- conn.SetDeadline(time.Now().Add(timeout))
- if debug {
- conn = NewLoggingConn("client", conn)
- }
- return conn, nil
- },
- DisableKeepAlives: true,
- },
+ c := ts.Client()
+ c.Transport.(*Transport).Dial = func(n, addr string) (net.Conn, error) {
+ conn, err := net.Dial(n, addr)
+ if err != nil {
+ return nil, err
+ }
+ conn.SetDeadline(time.Now().Add(timeout))
+ if debug {
+ conn = NewLoggingConn("client", conn)
+ }
+ return conn, nil
}
getFailed := false
@@ -1397,7 +1472,7 @@ func TestIssue4191_InfiniteGetTimeout(t *testing.T) {
if debug {
println("run", i+1, "of", nRuns)
}
- sres, err := client.Get(ts.URL + "/get")
+ sres, err := c.Get(ts.URL + "/get")
if err != nil {
if !getFailed {
// Make the timeout longer, once.
@@ -1419,7 +1494,6 @@ func TestIssue4191_InfiniteGetTimeout(t *testing.T) {
if debug {
println("tests complete; waiting for handlers to finish")
}
- ts.Close()
}
func TestIssue4191_InfiniteGetToPutTimeout(t *testing.T) {
@@ -1437,21 +1511,17 @@ func TestIssue4191_InfiniteGetToPutTimeout(t *testing.T) {
ts := httptest.NewServer(mux)
timeout := 100 * time.Millisecond
- client := &Client{
- Transport: &Transport{
- Dial: func(n, addr string) (net.Conn, error) {
- conn, err := net.Dial(n, addr)
- if err != nil {
- return nil, err
- }
- conn.SetDeadline(time.Now().Add(timeout))
- if debug {
- conn = NewLoggingConn("client", conn)
- }
- return conn, nil
- },
- DisableKeepAlives: true,
- },
+ c := ts.Client()
+ c.Transport.(*Transport).Dial = func(n, addr string) (net.Conn, error) {
+ conn, err := net.Dial(n, addr)
+ if err != nil {
+ return nil, err
+ }
+ conn.SetDeadline(time.Now().Add(timeout))
+ if debug {
+ conn = NewLoggingConn("client", conn)
+ }
+ return conn, nil
}
getFailed := false
@@ -1463,7 +1533,7 @@ func TestIssue4191_InfiniteGetToPutTimeout(t *testing.T) {
if debug {
println("run", i+1, "of", nRuns)
}
- sres, err := client.Get(ts.URL + "/get")
+ sres, err := c.Get(ts.URL + "/get")
if err != nil {
if !getFailed {
// Make the timeout longer, once.
@@ -1477,7 +1547,7 @@ func TestIssue4191_InfiniteGetToPutTimeout(t *testing.T) {
break
}
req, _ := NewRequest("PUT", ts.URL+"/put", sres.Body)
- _, err = client.Do(req)
+ _, err = c.Do(req)
if err == nil {
sres.Body.Close()
t.Errorf("Unexpected successful PUT")
@@ -1509,11 +1579,8 @@ func TestTransportResponseHeaderTimeout(t *testing.T) {
ts := httptest.NewServer(mux)
defer ts.Close()
- tr := &Transport{
- ResponseHeaderTimeout: 500 * time.Millisecond,
- }
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
+ c.Transport.(*Transport).ResponseHeaderTimeout = 500 * time.Millisecond
tests := []struct {
path string
@@ -1525,7 +1592,9 @@ func TestTransportResponseHeaderTimeout(t *testing.T) {
{path: "/fast", want: 200},
}
for i, tt := range tests {
- res, err := c.Get(ts.URL + tt.path)
+ req, _ := NewRequest("GET", ts.URL+tt.path, nil)
+ req = req.WithT(t)
+ res, err := c.Do(req)
select {
case <-inHandler:
case <-time.After(5 * time.Second):
@@ -1578,9 +1647,8 @@ func TestTransportCancelRequest(t *testing.T) {
defer ts.Close()
defer close(unblockc)
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
req, _ := NewRequest("GET", ts.URL, nil)
res, err := c.Do(req)
@@ -1688,9 +1756,8 @@ func TestCancelRequestWithChannel(t *testing.T) {
defer ts.Close()
defer close(unblockc)
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
req, _ := NewRequest("GET", ts.URL, nil)
ch := make(chan struct{})
@@ -1747,9 +1814,7 @@ func testCancelRequestWithChannelBeforeDo(t *testing.T, withCtx bool) {
defer ts.Close()
defer close(unblockc)
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
req, _ := NewRequest("GET", ts.URL, nil)
if withCtx {
@@ -1837,9 +1902,8 @@ func TestTransportCloseResponseBody(t *testing.T) {
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
req, _ := NewRequest("GET", ts.URL, nil)
defer tr.CancelRequest(req)
@@ -1959,18 +2023,12 @@ func TestTransportSocketLateBinding(t *testing.T) {
defer ts.Close()
dialGate := make(chan bool, 1)
- tr := &Transport{
- Dial: func(n, addr string) (net.Conn, error) {
- if <-dialGate {
- return net.Dial(n, addr)
- }
- return nil, errors.New("manually closed")
- },
- DisableKeepAlives: false,
- }
- defer tr.CloseIdleConnections()
- c := &Client{
- Transport: tr,
+ c := ts.Client()
+ c.Transport.(*Transport).Dial = func(n, addr string) (net.Conn, error) {
+ if <-dialGate {
+ return net.Dial(n, addr)
+ }
+ return nil, errors.New("manually closed")
}
dialGate <- true // only allow one dial
@@ -2160,6 +2218,7 @@ var proxyFromEnvTests = []proxyFromEnvTest{
{env: "https://cache.corp.example.com", want: "https://cache.corp.example.com"},
{env: "http://127.0.0.1:8080", want: "http://127.0.0.1:8080"},
{env: "https://127.0.0.1:8080", want: "https://127.0.0.1:8080"},
+ {env: "socks5://127.0.0.1", want: "socks5://127.0.0.1"},
// Don't use secure for http
{req: "http://insecure.tld/", env: "http.proxy.tld", httpsenv: "secure.proxy.tld", want: "http://http.proxy.tld"},
@@ -2184,6 +2243,7 @@ var proxyFromEnvTests = []proxyFromEnvTest{
func TestProxyFromEnvironment(t *testing.T) {
ResetProxyEnv()
+ defer ResetProxyEnv()
for _, tt := range proxyFromEnvTests {
os.Setenv("HTTP_PROXY", tt.env)
os.Setenv("HTTPS_PROXY", tt.httpsenv)
@@ -2223,14 +2283,11 @@ func TestIdleConnChannelLeak(t *testing.T) {
SetReadLoopBeforeNextReadHook(func() { didRead <- true })
defer SetReadLoopBeforeNextReadHook(nil)
- tr := &Transport{
- Dial: func(netw, addr string) (net.Conn, error) {
- return net.Dial(netw, ts.Listener.Addr().String())
- },
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
+ tr.Dial = func(netw, addr string) (net.Conn, error) {
+ return net.Dial(netw, ts.Listener.Addr().String())
}
- defer tr.CloseIdleConnections()
-
- c := &Client{Transport: tr}
// First, without keep-alives.
for _, disableKeep := range []bool{true, false} {
@@ -2273,13 +2330,11 @@ func TestTransportClosesRequestBody(t *testing.T) {
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- cl := &Client{Transport: tr}
+ c := ts.Client()
closes := 0
- res, err := cl.Post(ts.URL, "text/plain", countCloseReader{&closes, strings.NewReader("hello")})
+ res, err := c.Post(ts.URL, "text/plain", countCloseReader{&closes, strings.NewReader("hello")})
if err != nil {
t.Fatal(err)
}
@@ -2365,20 +2420,16 @@ func TestTLSServerClosesConnection(t *testing.T) {
fmt.Fprintf(w, "hello")
}))
defer ts.Close()
- tr := &Transport{
- TLSClientConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- }
- defer tr.CloseIdleConnections()
- client := &Client{Transport: tr}
+
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
var nSuccess = 0
var errs []error
const trials = 20
for i := 0; i < trials; i++ {
tr.CloseIdleConnections()
- res, err := client.Get(ts.URL + "/keep-alive-then-die")
+ res, err := c.Get(ts.URL + "/keep-alive-then-die")
if err != nil {
t.Fatal(err)
}
@@ -2393,7 +2444,7 @@ func TestTLSServerClosesConnection(t *testing.T) {
// Now try again and see if we successfully
// pick a new connection.
- res, err = client.Get(ts.URL + "/")
+ res, err = c.Get(ts.URL + "/")
if err != nil {
errs = append(errs, err)
continue
@@ -2472,22 +2523,20 @@ func TestTransportNoReuseAfterEarlyResponse(t *testing.T) {
go io.Copy(ioutil.Discard, conn)
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- client := &Client{Transport: tr}
+ c := ts.Client()
const bodySize = 256 << 10
finalBit := make(byteFromChanReader, 1)
req, _ := NewRequest("POST", ts.URL, io.MultiReader(io.LimitReader(neverEnding('x'), bodySize-1), finalBit))
req.ContentLength = bodySize
- res, err := client.Do(req)
+ res, err := c.Do(req)
if err := wantBody(res, err, "foo"); err != nil {
t.Errorf("POST response: %v", err)
}
donec := make(chan bool)
go func() {
defer close(donec)
- res, err = client.Get(ts.URL)
+ res, err = c.Get(ts.URL)
if err := wantBody(res, err, "bar"); err != nil {
t.Errorf("GET response: %v", err)
return
@@ -2519,10 +2568,9 @@ func TestTransportIssue10457(t *testing.T) {
conn.Close()
}))
defer ts.Close()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- cl := &Client{Transport: tr}
- res, err := cl.Get(ts.URL)
+ c := ts.Client()
+
+ res, err := c.Get(ts.URL)
if err != nil {
t.Fatalf("Get: %v", err)
}
@@ -2553,89 +2601,160 @@ type writerFuncConn struct {
func (c writerFuncConn) Write(p []byte) (n int, err error) { return c.write(p) }
-// Issue 4677. If we try to reuse a connection that the server is in the
-// process of closing, we may end up successfully writing out our request (or a
-// portion of our request) only to find a connection error when we try to read
-// from (or finish writing to) the socket.
+// Issues 4677, 18241, and 17844. If we try to reuse a connection that the
+// server is in the process of closing, we may end up successfully writing out
+// our request (or a portion of our request) only to find a connection error
+// when we try to read from (or finish writing to) the socket.
//
-// NOTE: we resend a request only if the request is idempotent, we reused a
-// keep-alive connection, and we haven't yet received any header data. This
-// automatically prevents an infinite resend loop because we'll run out of the
-// cached keep-alive connections eventually.
-func TestRetryIdempotentRequestsOnError(t *testing.T) {
- defer afterTest(t)
+// NOTE: we resend a request only if:
+// - we reused a keep-alive connection
+// - we haven't yet received any header data
+// - either we wrote no bytes to the server, or the request is idempotent
+// This automatically prevents an infinite resend loop because we'll run out of
+// the cached keep-alive connections eventually.
+func TestRetryRequestsOnError(t *testing.T) {
+ newRequest := func(method, urlStr string, body io.Reader) *Request {
+ req, err := NewRequest(method, urlStr, body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return req
+ }
- var (
- mu sync.Mutex
- logbuf bytes.Buffer
- )
- logf := func(format string, args ...interface{}) {
- mu.Lock()
- defer mu.Unlock()
- fmt.Fprintf(&logbuf, format, args...)
- logbuf.WriteByte('\n')
+ testCases := []struct {
+ name string
+ failureN int
+ failureErr error
+ // Note that we can't just re-use the Request object across calls to c.Do
+ // because we need to rewind Body between calls. (GetBody is only used to
+ // rewind Body on failure and redirects, not just because it's done.)
+ req func() *Request
+ reqString string
+ }{
+ {
+ name: "IdempotentNoBodySomeWritten",
+ // Believe that we've written some bytes to the server, so we know we're
+ // not just in the "retry when no bytes sent" case".
+ failureN: 1,
+ // Use the specific error that shouldRetryRequest looks for with idempotent requests.
+ failureErr: ExportErrServerClosedIdle,
+ req: func() *Request {
+ return newRequest("GET", "http://fake.golang", nil)
+ },
+ reqString: `GET / HTTP/1.1\r\nHost: fake.golang\r\nUser-Agent: Go-http-client/1.1\r\nAccept-Encoding: gzip\r\n\r\n`,
+ },
+ {
+ name: "IdempotentGetBodySomeWritten",
+ // Believe that we've written some bytes to the server, so we know we're
+ // not just in the "retry when no bytes sent" case".
+ failureN: 1,
+ // Use the specific error that shouldRetryRequest looks for with idempotent requests.
+ failureErr: ExportErrServerClosedIdle,
+ req: func() *Request {
+ return newRequest("GET", "http://fake.golang", strings.NewReader("foo\n"))
+ },
+ reqString: `GET / HTTP/1.1\r\nHost: fake.golang\r\nUser-Agent: Go-http-client/1.1\r\nContent-Length: 4\r\nAccept-Encoding: gzip\r\n\r\nfoo\n`,
+ },
+ {
+ name: "NothingWrittenNoBody",
+ // It's key that we return 0 here -- that's what enables Transport to know
+ // that nothing was written, even though this is a non-idempotent request.
+ failureN: 0,
+ failureErr: errors.New("second write fails"),
+ req: func() *Request {
+ return newRequest("DELETE", "http://fake.golang", nil)
+ },
+ reqString: `DELETE / HTTP/1.1\r\nHost: fake.golang\r\nUser-Agent: Go-http-client/1.1\r\nAccept-Encoding: gzip\r\n\r\n`,
+ },
+ {
+ name: "NothingWrittenGetBody",
+ // It's key that we return 0 here -- that's what enables Transport to know
+ // that nothing was written, even though this is a non-idempotent request.
+ failureN: 0,
+ failureErr: errors.New("second write fails"),
+ // Note that NewRequest will set up GetBody for strings.Reader, which is
+ // required for the retry to occur
+ req: func() *Request {
+ return newRequest("POST", "http://fake.golang", strings.NewReader("foo\n"))
+ },
+ reqString: `POST / HTTP/1.1\r\nHost: fake.golang\r\nUser-Agent: Go-http-client/1.1\r\nContent-Length: 4\r\nAccept-Encoding: gzip\r\n\r\nfoo\n`,
+ },
}
- ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
- logf("Handler")
- w.Header().Set("X-Status", "ok")
- }))
- defer ts.Close()
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ defer afterTest(t)
- var writeNumAtomic int32
- tr := &Transport{
- Dial: func(network, addr string) (net.Conn, error) {
- logf("Dial")
- c, err := net.Dial(network, ts.Listener.Addr().String())
- if err != nil {
- logf("Dial error: %v", err)
- return nil, err
+ var (
+ mu sync.Mutex
+ logbuf bytes.Buffer
+ )
+ logf := func(format string, args ...interface{}) {
+ mu.Lock()
+ defer mu.Unlock()
+ fmt.Fprintf(&logbuf, format, args...)
+ logbuf.WriteByte('\n')
}
- return &writerFuncConn{
- Conn: c,
- write: func(p []byte) (n int, err error) {
- if atomic.AddInt32(&writeNumAtomic, 1) == 2 {
- logf("intentional write failure")
- return 0, errors.New("second write fails")
- }
- logf("Write(%q)", p)
- return c.Write(p)
- },
- }, nil
- },
- }
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
- SetRoundTripRetried(func() {
- logf("Retried.")
- })
- defer SetRoundTripRetried(nil)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ logf("Handler")
+ w.Header().Set("X-Status", "ok")
+ }))
+ defer ts.Close()
+
+ var writeNumAtomic int32
+ c := ts.Client()
+ c.Transport.(*Transport).Dial = func(network, addr string) (net.Conn, error) {
+ logf("Dial")
+ c, err := net.Dial(network, ts.Listener.Addr().String())
+ if err != nil {
+ logf("Dial error: %v", err)
+ return nil, err
+ }
+ return &writerFuncConn{
+ Conn: c,
+ write: func(p []byte) (n int, err error) {
+ if atomic.AddInt32(&writeNumAtomic, 1) == 2 {
+ logf("intentional write failure")
+ return tc.failureN, tc.failureErr
+ }
+ logf("Write(%q)", p)
+ return c.Write(p)
+ },
+ }, nil
+ }
- for i := 0; i < 3; i++ {
- res, err := c.Get("http://fake.golang/")
- if err != nil {
- t.Fatalf("i=%d: Get = %v", i, err)
- }
- res.Body.Close()
- }
+ SetRoundTripRetried(func() {
+ logf("Retried.")
+ })
+ defer SetRoundTripRetried(nil)
- mu.Lock()
- got := logbuf.String()
- mu.Unlock()
- const want = `Dial
-Write("GET / HTTP/1.1\r\nHost: fake.golang\r\nUser-Agent: Go-http-client/1.1\r\nAccept-Encoding: gzip\r\n\r\n")
+ for i := 0; i < 3; i++ {
+ res, err := c.Do(tc.req())
+ if err != nil {
+ t.Fatalf("i=%d: Do = %v", i, err)
+ }
+ res.Body.Close()
+ }
+
+ mu.Lock()
+ got := logbuf.String()
+ mu.Unlock()
+ want := fmt.Sprintf(`Dial
+Write("%s")
Handler
intentional write failure
Retried.
Dial
-Write("GET / HTTP/1.1\r\nHost: fake.golang\r\nUser-Agent: Go-http-client/1.1\r\nAccept-Encoding: gzip\r\n\r\n")
+Write("%s")
Handler
-Write("GET / HTTP/1.1\r\nHost: fake.golang\r\nUser-Agent: Go-http-client/1.1\r\nAccept-Encoding: gzip\r\n\r\n")
+Write("%s")
Handler
-`
- if got != want {
- t.Errorf("Log of events differs. Got:\n%s\nWant:\n%s", got, want)
+`, tc.reqString, tc.reqString, tc.reqString)
+ if got != want {
+ t.Errorf("Log of events differs. Got:\n%s\nWant:\n%s", got, want)
+ }
+ })
}
}
@@ -2649,6 +2768,7 @@ func TestTransportClosesBodyOnError(t *testing.T) {
readBody <- err
}))
defer ts.Close()
+ c := ts.Client()
fakeErr := errors.New("fake error")
didClose := make(chan bool, 1)
req, _ := NewRequest("POST", ts.URL, struct {
@@ -2664,7 +2784,7 @@ func TestTransportClosesBodyOnError(t *testing.T) {
return nil
}),
})
- res, err := DefaultClient.Do(req)
+ res, err := c.Do(req)
if res != nil {
defer res.Body.Close()
}
@@ -2698,23 +2818,19 @@ func TestTransportDialTLS(t *testing.T) {
mu.Unlock()
}))
defer ts.Close()
- tr := &Transport{
- DialTLS: func(netw, addr string) (net.Conn, error) {
- mu.Lock()
- didDial = true
- mu.Unlock()
- c, err := tls.Dial(netw, addr, &tls.Config{
- InsecureSkipVerify: true,
- })
- if err != nil {
- return nil, err
- }
- return c, c.Handshake()
- },
+ c := ts.Client()
+ c.Transport.(*Transport).DialTLS = func(netw, addr string) (net.Conn, error) {
+ mu.Lock()
+ didDial = true
+ mu.Unlock()
+ c, err := tls.Dial(netw, addr, c.Transport.(*Transport).TLSClientConfig)
+ if err != nil {
+ return nil, err
+ }
+ return c, c.Handshake()
}
- defer tr.CloseIdleConnections()
- client := &Client{Transport: tr}
- res, err := client.Get(ts.URL)
+
+ res, err := c.Get(ts.URL)
if err != nil {
t.Fatal(err)
}
@@ -2796,10 +2912,11 @@ func TestTransportRangeAndGzip(t *testing.T) {
reqc <- r
}))
defer ts.Close()
+ c := ts.Client()
req, _ := NewRequest("GET", ts.URL, nil)
req.Header.Set("Range", "bytes=7-11")
- res, err := DefaultClient.Do(req)
+ res, err := c.Do(req)
if err != nil {
t.Fatal(err)
}
@@ -2828,9 +2945,7 @@ func TestTransportResponseCancelRace(t *testing.T) {
w.Write(b[:])
}))
defer ts.Close()
-
- tr := &Transport{}
- defer tr.CloseIdleConnections()
+ tr := ts.Client().Transport.(*Transport)
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
@@ -2859,14 +2974,46 @@ func TestTransportResponseCancelRace(t *testing.T) {
res.Body.Close()
}
+// Test for issue 19248: Content-Encoding's value is case insensitive.
+func TestTransportContentEncodingCaseInsensitive(t *testing.T) {
+ setParallel(t)
+ defer afterTest(t)
+ for _, ce := range []string{"gzip", "GZIP"} {
+ ce := ce
+ t.Run(ce, func(t *testing.T) {
+ const encodedString = "Hello Gopher"
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("Content-Encoding", ce)
+ gz := gzip.NewWriter(w)
+ gz.Write([]byte(encodedString))
+ gz.Close()
+ }))
+ defer ts.Close()
+
+ res, err := ts.Client().Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ body, err := ioutil.ReadAll(res.Body)
+ res.Body.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(body) != encodedString {
+ t.Fatalf("Expected body %q, got: %q\n", encodedString, string(body))
+ }
+ })
+ }
+}
+
func TestTransportDialCancelRace(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
defer ts.Close()
-
- tr := &Transport{}
- defer tr.CloseIdleConnections()
+ tr := ts.Client().Transport.(*Transport)
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
@@ -2993,6 +3140,7 @@ func TestTransportPrefersResponseOverWriteError(t *testing.T) {
w.WriteHeader(StatusOK)
}))
defer ts.Close()
+ c := ts.Client()
fail := 0
count := 100
@@ -3002,10 +3150,7 @@ func TestTransportPrefersResponseOverWriteError(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- tr := new(Transport)
- defer tr.CloseIdleConnections()
- client := &Client{Transport: tr}
- resp, err := client.Do(req)
+ resp, err := c.Do(req)
if err != nil {
fail++
t.Logf("%d = %#v", i, err)
@@ -3218,10 +3363,8 @@ func testTransportReuseConnection_Gzip(t *testing.T, chunked bool) {
w.Write(rgz) // arbitrary gzip response
}))
defer ts.Close()
+ c := ts.Client()
- tr := &Transport{}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
for i := 0; i < 2; i++ {
res, err := c.Get(ts.URL)
if err != nil {
@@ -3250,12 +3393,9 @@ func TestTransportResponseHeaderLength(t *testing.T) {
}
}))
defer ts.Close()
+ c := ts.Client()
+ c.Transport.(*Transport).MaxResponseHeaderBytes = 512 << 10
- tr := &Transport{
- MaxResponseHeaderBytes: 512 << 10,
- }
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
if res, err := c.Get(ts.URL); err != nil {
t.Fatal(err)
} else {
@@ -3426,16 +3566,26 @@ func testTransportEventTrace(t *testing.T, h2 bool, noHooks bool) {
}
}
-func TestTransportEventTraceRealDNS(t *testing.T) {
- if testing.Short() && testenv.Builder() == "" {
- // Skip this test in short mode (the default for
- // all.bash), in case the user is using a shady/ISP
- // DNS server hijacking queries.
- // See issues 16732, 16716.
- // Our builders use 8.8.8.8, though, which correctly
- // returns NXDOMAIN, so still run this test there.
- t.Skip("skipping in short mode")
+var (
+ isDNSHijackedOnce sync.Once
+ isDNSHijacked bool
+)
+
+func skipIfDNSHijacked(t *testing.T) {
+ // Skip this test if the user is using a shady/ISP
+ // DNS server hijacking queries.
+ // See issues 16732, 16716.
+ isDNSHijackedOnce.Do(func() {
+ addrs, _ := net.LookupHost("dns-should-not-resolve.golang")
+ isDNSHijacked = len(addrs) != 0
+ })
+ if isDNSHijacked {
+ t.Skip("skipping; test requires non-hijacking DNS server")
}
+}
+
+func TestTransportEventTraceRealDNS(t *testing.T) {
+ skipIfDNSHijacked(t)
defer afterTest(t)
tr := &Transport{}
defer tr.CloseIdleConnections()
@@ -3506,8 +3656,8 @@ func TestTransportRejectsAlphaPort(t *testing.T) {
// connections. The http2 test is done in TestTransportEventTrace_h2
func TestTLSHandshakeTrace(t *testing.T) {
defer afterTest(t)
- s := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
- defer s.Close()
+ ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {}))
+ defer ts.Close()
var mu sync.Mutex
var start, done bool
@@ -3527,10 +3677,8 @@ func TestTLSHandshakeTrace(t *testing.T) {
},
}
- tr := &Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
- req, err := NewRequest("GET", s.URL, nil)
+ c := ts.Client()
+ req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal("Unable to construct test request:", err)
}
@@ -3557,16 +3705,14 @@ func TestTransportMaxIdleConns(t *testing.T) {
// No body for convenience.
}))
defer ts.Close()
- tr := &Transport{
- MaxIdleConns: 4,
- }
- defer tr.CloseIdleConnections()
+ c := ts.Client()
+ tr := c.Transport.(*Transport)
+ tr.MaxIdleConns = 4
ip, port, err := net.SplitHostPort(ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
- c := &Client{Transport: tr}
ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, host string) ([]net.IPAddr, error) {
return []net.IPAddr{{IP: net.ParseIP(ip)}}, nil
})
@@ -3862,17 +4008,16 @@ func TestTransportProxyConnectHeader(t *testing.T) {
c.Close()
}))
defer ts.Close()
- tr := &Transport{
- ProxyConnectHeader: Header{
- "User-Agent": {"foo"},
- "Other": {"bar"},
- },
- Proxy: func(r *Request) (*url.URL, error) {
- return url.Parse(ts.URL)
- },
+
+ c := ts.Client()
+ c.Transport.(*Transport).Proxy = func(r *Request) (*url.URL, error) {
+ return url.Parse(ts.URL)
}
- defer tr.CloseIdleConnections()
- c := &Client{Transport: tr}
+ c.Transport.(*Transport).ProxyConnectHeader = Header{
+ "User-Agent": {"foo"},
+ "Other": {"bar"},
+ }
+
res, err := c.Get("https://dummy.tld/") // https to force a CONNECT
if err == nil {
res.Body.Close()
diff --git a/libgo/go/net/interface.go b/libgo/go/net/interface.go
index b3297f249d6..4036a7f4750 100644
--- a/libgo/go/net/interface.go
+++ b/libgo/go/net/interface.go
@@ -211,30 +211,30 @@ func (zc *ipv6ZoneCache) update(ift []Interface) {
}
}
-func zoneToString(zone int) string {
- if zone == 0 {
+func (zc *ipv6ZoneCache) name(index int) string {
+ if index == 0 {
return ""
}
zoneCache.update(nil)
zoneCache.RLock()
defer zoneCache.RUnlock()
- name, ok := zoneCache.toName[zone]
+ name, ok := zoneCache.toName[index]
if !ok {
- name = uitoa(uint(zone))
+ name = uitoa(uint(index))
}
return name
}
-func zoneToInt(zone string) int {
- if zone == "" {
+func (zc *ipv6ZoneCache) index(name string) int {
+ if name == "" {
return 0
}
zoneCache.update(nil)
zoneCache.RLock()
defer zoneCache.RUnlock()
- index, ok := zoneCache.toIndex[zone]
+ index, ok := zoneCache.toIndex[name]
if !ok {
- index, _, _ = dtoi(zone)
+ index, _, _ = dtoi(name)
}
return index
}
diff --git a/libgo/go/net/interface_linux.go b/libgo/go/net/interface_linux.go
index 5e391b28b0f..441ab2f8805 100644
--- a/libgo/go/net/interface_linux.go
+++ b/libgo/go/net/interface_linux.go
@@ -162,7 +162,7 @@ loop:
if err != nil {
return nil, os.NewSyscallError("parsenetlinkrouteattr", err)
}
- ifa := newAddr(ifi, ifam, attrs)
+ ifa := newAddr(ifam, attrs)
if ifa != nil {
ifat = append(ifat, ifa)
}
@@ -172,7 +172,7 @@ loop:
return ifat, nil
}
-func newAddr(ifi *Interface, ifam *syscall.IfAddrmsg, attrs []syscall.NetlinkRouteAttr) Addr {
+func newAddr(ifam *syscall.IfAddrmsg, attrs []syscall.NetlinkRouteAttr) Addr {
var ipPointToPoint bool
// Seems like we need to make sure whether the IP interface
// stack consists of IP point-to-point numbered or unnumbered
diff --git a/libgo/go/net/interface_test.go b/libgo/go/net/interface_test.go
index 38a2ca46565..534137a9133 100644
--- a/libgo/go/net/interface_test.go
+++ b/libgo/go/net/interface_test.go
@@ -262,13 +262,13 @@ func validateInterfaceMulticastAddrs(ifat []Addr) (*routeStats, error) {
func checkUnicastStats(ifStats *ifStats, uniStats *routeStats) error {
// Test the existence of connected unicast routes for IPv4.
- if supportsIPv4 && ifStats.loop+ifStats.other > 0 && uniStats.ipv4 == 0 {
+ if supportsIPv4() && ifStats.loop+ifStats.other > 0 && uniStats.ipv4 == 0 {
return fmt.Errorf("num IPv4 unicast routes = 0; want >0; summary: %+v, %+v", ifStats, uniStats)
}
// Test the existence of connected unicast routes for IPv6.
// We can assume the existence of ::1/128 when at least one
// loopback interface is installed.
- if supportsIPv6 && ifStats.loop > 0 && uniStats.ipv6 == 0 {
+ if supportsIPv6() && ifStats.loop > 0 && uniStats.ipv6 == 0 {
return fmt.Errorf("num IPv6 unicast routes = 0; want >0; summary: %+v, %+v", ifStats, uniStats)
}
return nil
@@ -290,7 +290,7 @@ func checkMulticastStats(ifStats *ifStats, uniStats, multiStats *routeStats) err
// We can assume the existence of connected multicast
// route clones when at least two connected unicast
// routes, ::1/128 and other, are installed.
- if supportsIPv6 && ifStats.loop > 0 && uniStats.ipv6 > 1 && multiStats.ipv6 == 0 {
+ if supportsIPv6() && ifStats.loop > 0 && uniStats.ipv6 > 1 && multiStats.ipv6 == 0 {
return fmt.Errorf("num IPv6 multicast route clones = 0; want >0; summary: %+v, %+v, %+v", ifStats, uniStats, multiStats)
}
}
diff --git a/libgo/go/net/interface_windows.go b/libgo/go/net/interface_windows.go
index 8b976e585f3..b08d1582d8a 100644
--- a/libgo/go/net/interface_windows.go
+++ b/libgo/go/net/interface_windows.go
@@ -24,10 +24,7 @@ func probeWindowsIPStack() (supportsVistaIP bool) {
if err != nil {
return true // Windows 10 and above will deprecate this API
}
- if byte(v) < 6 { // major version of Windows Vista is 6
- return false
- }
- return true
+ return byte(v) >= 6 // major version of Windows Vista is 6
}
// adapterAddresses returns a list of IP adapter and address
diff --git a/libgo/go/net/internal/socktest/sys_cloexec.go b/libgo/go/net/internal/socktest/sys_cloexec.go
index 340ff071e7e..d1b8f4f3749 100644
--- a/libgo/go/net/internal/socktest/sys_cloexec.go
+++ b/libgo/go/net/internal/socktest/sys_cloexec.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build freebsd linux
+// +build dragonfly freebsd linux
package socktest
@@ -15,7 +15,7 @@ func (sw *Switch) Accept4(s, flags int) (ns int, sa syscall.Sockaddr, err error)
return syscall.Accept4(s, flags)
}
sw.fmu.RLock()
- f, _ := sw.fltab[FilterAccept]
+ f := sw.fltab[FilterAccept]
sw.fmu.RUnlock()
af, err := f.apply(so)
diff --git a/libgo/go/net/internal/socktest/sys_unix.go b/libgo/go/net/internal/socktest/sys_unix.go
index a3d1282c2a4..397c524fb83 100644
--- a/libgo/go/net/internal/socktest/sys_unix.go
+++ b/libgo/go/net/internal/socktest/sys_unix.go
@@ -14,7 +14,7 @@ func (sw *Switch) Socket(family, sotype, proto int) (s int, err error) {
so := &Status{Cookie: cookie(family, sotype, proto)}
sw.fmu.RLock()
- f, _ := sw.fltab[FilterSocket]
+ f := sw.fltab[FilterSocket]
sw.fmu.RUnlock()
af, err := f.apply(so)
@@ -47,7 +47,7 @@ func (sw *Switch) Close(s int) (err error) {
return syscall.Close(s)
}
sw.fmu.RLock()
- f, _ := sw.fltab[FilterClose]
+ f := sw.fltab[FilterClose]
sw.fmu.RUnlock()
af, err := f.apply(so)
@@ -77,7 +77,7 @@ func (sw *Switch) Connect(s int, sa syscall.Sockaddr) (err error) {
return syscall.Connect(s, sa)
}
sw.fmu.RLock()
- f, _ := sw.fltab[FilterConnect]
+ f := sw.fltab[FilterConnect]
sw.fmu.RUnlock()
af, err := f.apply(so)
@@ -106,7 +106,7 @@ func (sw *Switch) Listen(s, backlog int) (err error) {
return syscall.Listen(s, backlog)
}
sw.fmu.RLock()
- f, _ := sw.fltab[FilterListen]
+ f := sw.fltab[FilterListen]
sw.fmu.RUnlock()
af, err := f.apply(so)
@@ -135,7 +135,7 @@ func (sw *Switch) Accept(s int) (ns int, sa syscall.Sockaddr, err error) {
return syscall.Accept(s)
}
sw.fmu.RLock()
- f, _ := sw.fltab[FilterAccept]
+ f := sw.fltab[FilterAccept]
sw.fmu.RUnlock()
af, err := f.apply(so)
@@ -168,7 +168,7 @@ func (sw *Switch) GetsockoptInt(s, level, opt int) (soerr int, err error) {
return syscall.GetsockoptInt(s, level, opt)
}
sw.fmu.RLock()
- f, _ := sw.fltab[FilterGetsockoptInt]
+ f := sw.fltab[FilterGetsockoptInt]
sw.fmu.RUnlock()
af, err := f.apply(so)
diff --git a/libgo/go/net/ip.go b/libgo/go/net/ip.go
index db3364c1b31..6b7ba4c23ea 100644
--- a/libgo/go/net/ip.go
+++ b/libgo/go/net/ip.go
@@ -12,6 +12,8 @@
package net
+import _ "unsafe" // for go:linkname
+
// IP address lengths (bytes).
const (
IPv4len = 4
@@ -106,7 +108,8 @@ var (
IPv6linklocalallrouters = IP{0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x02}
)
-// IsUnspecified reports whether ip is an unspecified address.
+// IsUnspecified reports whether ip is an unspecified address, either
+// the IPv4 address "0.0.0.0" or the IPv6 address "::".
func (ip IP) IsUnspecified() bool {
return ip.Equal(IPv4zero) || ip.Equal(IPv6unspecified)
}
@@ -338,7 +341,8 @@ func ipEmptyString(ip IP) string {
}
// MarshalText implements the encoding.TextMarshaler interface.
-// The encoding is the same as returned by String.
+// The encoding is the same as returned by String, with one exception:
+// When len(ip) is zero, it returns an empty slice.
func (ip IP) MarshalText() ([]byte, error) {
if len(ip) == 0 {
return []byte(""), nil
@@ -381,17 +385,9 @@ func (ip IP) Equal(x IP) bool {
return false
}
-func bytesEqual(x, y []byte) bool {
- if len(x) != len(y) {
- return false
- }
- for i, b := range x {
- if y[i] != b {
- return false
- }
- }
- return true
-}
+// bytes.Equal is implemented in runtime/asm_$goarch.s
+//go:linkname bytesEqual bytes.Equal
+func bytesEqual(x, y []byte) bool
func (ip IP) matchAddrFamily(x IP) bool {
return ip.To4() != nil && x.To4() != nil || ip.To16() != nil && ip.To4() == nil && x.To16() != nil && x.To4() == nil
@@ -667,7 +663,7 @@ func ParseIP(s string) IP {
// It returns the IP address and the network implied by the IP and
// prefix length.
// For example, ParseCIDR("192.0.2.1/24") returns the IP address
-// 198.0.2.1 and the network 198.0.2.0/24.
+// 192.0.2.1 and the network 192.0.2.0/24.
func ParseCIDR(s string) (IP, *IPNet, error) {
i := byteIndex(s, '/')
if i < 0 {
diff --git a/libgo/go/net/ip_test.go b/libgo/go/net/ip_test.go
index 46551633ce2..ad13388dd27 100644
--- a/libgo/go/net/ip_test.go
+++ b/libgo/go/net/ip_test.go
@@ -6,6 +6,7 @@ package net
import (
"bytes"
+ "math/rand"
"reflect"
"runtime"
"testing"
@@ -468,61 +469,77 @@ func TestNetworkNumberAndMask(t *testing.T) {
}
}
-var splitJoinTests = []struct {
- host string
- port string
- join string
-}{
- {"www.google.com", "80", "www.google.com:80"},
- {"127.0.0.1", "1234", "127.0.0.1:1234"},
- {"::1", "80", "[::1]:80"},
- {"fe80::1%lo0", "80", "[fe80::1%lo0]:80"},
- {"localhost%lo0", "80", "[localhost%lo0]:80"},
- {"", "0", ":0"},
-
- {"google.com", "https%foo", "google.com:https%foo"}, // Go 1.0 behavior
- {"127.0.0.1", "", "127.0.0.1:"}, // Go 1.0 behavior
- {"www.google.com", "", "www.google.com:"}, // Go 1.0 behavior
-}
-
-var splitFailureTests = []struct {
- hostPort string
- err string
-}{
- {"www.google.com", "missing port in address"},
- {"127.0.0.1", "missing port in address"},
- {"[::1]", "missing port in address"},
- {"[fe80::1%lo0]", "missing port in address"},
- {"[localhost%lo0]", "missing port in address"},
- {"localhost%lo0", "missing port in address"},
-
- {"::1", "too many colons in address"},
- {"fe80::1%lo0", "too many colons in address"},
- {"fe80::1%lo0:80", "too many colons in address"},
+func TestSplitHostPort(t *testing.T) {
+ for _, tt := range []struct {
+ hostPort string
+ host string
+ port string
+ }{
+ // Host name
+ {"localhost:http", "localhost", "http"},
+ {"localhost:80", "localhost", "80"},
+
+ // Go-specific host name with zone identifier
+ {"localhost%lo0:http", "localhost%lo0", "http"},
+ {"localhost%lo0:80", "localhost%lo0", "80"},
+ {"[localhost%lo0]:http", "localhost%lo0", "http"}, // Go 1 behavior
+ {"[localhost%lo0]:80", "localhost%lo0", "80"}, // Go 1 behavior
+
+ // IP literal
+ {"127.0.0.1:http", "127.0.0.1", "http"},
+ {"127.0.0.1:80", "127.0.0.1", "80"},
+ {"[::1]:http", "::1", "http"},
+ {"[::1]:80", "::1", "80"},
+
+ // IP literal with zone identifier
+ {"[::1%lo0]:http", "::1%lo0", "http"},
+ {"[::1%lo0]:80", "::1%lo0", "80"},
+
+ // Go-specific wildcard for host name
+ {":http", "", "http"}, // Go 1 behavior
+ {":80", "", "80"}, // Go 1 behavior
+
+ // Go-specific wildcard for service name or transport port number
+ {"golang.org:", "golang.org", ""}, // Go 1 behavior
+ {"127.0.0.1:", "127.0.0.1", ""}, // Go 1 behavior
+ {"[::1]:", "::1", ""}, // Go 1 behavior
+
+ // Opaque service name
+ {"golang.org:https%foo", "golang.org", "https%foo"}, // Go 1 behavior
+ } {
+ if host, port, err := SplitHostPort(tt.hostPort); host != tt.host || port != tt.port || err != nil {
+ t.Errorf("SplitHostPort(%q) = %q, %q, %v; want %q, %q, nil", tt.hostPort, host, port, err, tt.host, tt.port)
+ }
+ }
- {"localhost%lo0:80", "missing brackets in address"},
+ for _, tt := range []struct {
+ hostPort string
+ err string
+ }{
+ {"golang.org", "missing port in address"},
+ {"127.0.0.1", "missing port in address"},
+ {"[::1]", "missing port in address"},
+ {"[fe80::1%lo0]", "missing port in address"},
+ {"[localhost%lo0]", "missing port in address"},
+ {"localhost%lo0", "missing port in address"},
- // Test cases that didn't fail in Go 1.0
+ {"::1", "too many colons in address"},
+ {"fe80::1%lo0", "too many colons in address"},
+ {"fe80::1%lo0:80", "too many colons in address"},
- {"[foo:bar]", "missing port in address"},
- {"[foo:bar]baz", "missing port in address"},
- {"[foo]bar:baz", "missing port in address"},
+ // Test cases that didn't fail in Go 1
- {"[foo]:[bar]:baz", "too many colons in address"},
+ {"[foo:bar]", "missing port in address"},
+ {"[foo:bar]baz", "missing port in address"},
+ {"[foo]bar:baz", "missing port in address"},
- {"[foo]:[bar]baz", "unexpected '[' in address"},
- {"foo[bar]:baz", "unexpected '[' in address"},
+ {"[foo]:[bar]:baz", "too many colons in address"},
- {"foo]bar:baz", "unexpected ']' in address"},
-}
+ {"[foo]:[bar]baz", "unexpected '[' in address"},
+ {"foo[bar]:baz", "unexpected '[' in address"},
-func TestSplitHostPort(t *testing.T) {
- for _, tt := range splitJoinTests {
- if host, port, err := SplitHostPort(tt.join); host != tt.host || port != tt.port || err != nil {
- t.Errorf("SplitHostPort(%q) = %q, %q, %v; want %q, %q, nil", tt.join, host, port, err, tt.host, tt.port)
- }
- }
- for _, tt := range splitFailureTests {
+ {"foo]bar:baz", "unexpected ']' in address"},
+ } {
if host, port, err := SplitHostPort(tt.hostPort); err == nil {
t.Errorf("SplitHostPort(%q) should have failed", tt.hostPort)
} else {
@@ -538,9 +555,43 @@ func TestSplitHostPort(t *testing.T) {
}
func TestJoinHostPort(t *testing.T) {
- for _, tt := range splitJoinTests {
- if join := JoinHostPort(tt.host, tt.port); join != tt.join {
- t.Errorf("JoinHostPort(%q, %q) = %q; want %q", tt.host, tt.port, join, tt.join)
+ for _, tt := range []struct {
+ host string
+ port string
+ hostPort string
+ }{
+ // Host name
+ {"localhost", "http", "localhost:http"},
+ {"localhost", "80", "localhost:80"},
+
+ // Go-specific host name with zone identifier
+ {"localhost%lo0", "http", "localhost%lo0:http"},
+ {"localhost%lo0", "80", "localhost%lo0:80"},
+
+ // IP literal
+ {"127.0.0.1", "http", "127.0.0.1:http"},
+ {"127.0.0.1", "80", "127.0.0.1:80"},
+ {"::1", "http", "[::1]:http"},
+ {"::1", "80", "[::1]:80"},
+
+ // IP literal with zone identifier
+ {"::1%lo0", "http", "[::1%lo0]:http"},
+ {"::1%lo0", "80", "[::1%lo0]:80"},
+
+ // Go-specific wildcard for host name
+ {"", "http", ":http"}, // Go 1 behavior
+ {"", "80", ":80"}, // Go 1 behavior
+
+ // Go-specific wildcard for service name or transport port number
+ {"golang.org", "", "golang.org:"}, // Go 1 behavior
+ {"127.0.0.1", "", "127.0.0.1:"}, // Go 1 behavior
+ {"::1", "", "[::1]:"}, // Go 1 behavior
+
+ // Opaque service name
+ {"golang.org", "https%foo", "golang.org:https%foo"}, // Go 1 behavior
+ } {
+ if hostPort := JoinHostPort(tt.host, tt.port); hostPort != tt.hostPort {
+ t.Errorf("JoinHostPort(%q, %q) = %q; want %q", tt.host, tt.port, hostPort, tt.hostPort)
}
}
}
@@ -645,3 +696,32 @@ func TestIPAddrScope(t *testing.T) {
}
}
}
+
+func BenchmarkIPEqual(b *testing.B) {
+ b.Run("IPv4", func(b *testing.B) {
+ benchmarkIPEqual(b, IPv4len)
+ })
+ b.Run("IPv6", func(b *testing.B) {
+ benchmarkIPEqual(b, IPv6len)
+ })
+}
+
+func benchmarkIPEqual(b *testing.B, size int) {
+ ips := make([]IP, 1000)
+ for i := range ips {
+ ips[i] = make(IP, size)
+ rand.Read(ips[i])
+ }
+ // Half of the N are equal.
+ for i := 0; i < b.N/2; i++ {
+ x := ips[i%len(ips)]
+ y := ips[i%len(ips)]
+ x.Equal(y)
+ }
+ // The other half are not equal.
+ for i := 0; i < b.N/2; i++ {
+ x := ips[i%len(ips)]
+ y := ips[(i+1)%len(ips)]
+ x.Equal(y)
+ }
+}
diff --git a/libgo/go/net/iprawsock.go b/libgo/go/net/iprawsock.go
index d994fc67c65..c4b54f00c4e 100644
--- a/libgo/go/net/iprawsock.go
+++ b/libgo/go/net/iprawsock.go
@@ -61,30 +61,37 @@ func (a *IPAddr) opAddr() Addr {
return a
}
-// ResolveIPAddr parses addr as an IP address of the form "host" or
-// "ipv6-host%zone" and resolves the domain name on the network net,
-// which must be "ip", "ip4" or "ip6".
+// ResolveIPAddr returns an address of IP end point.
//
-// Resolving a hostname is not recommended because this returns at most
-// one of its IP addresses.
-func ResolveIPAddr(net, addr string) (*IPAddr, error) {
- if net == "" { // a hint wildcard for Go 1.0 undocumented behavior
- net = "ip"
+// The network must be an IP network name.
+//
+// If the host in the address parameter is not a literal IP address,
+// ResolveIPAddr resolves the address to an address of IP end point.
+// Otherwise, it parses the address as a literal IP address.
+// The address parameter can use a host name, but this is not
+// recommended, because it will return at most one of the host name's
+// IP addresses.
+//
+// See func Dial for a description of the network and address
+// parameters.
+func ResolveIPAddr(network, address string) (*IPAddr, error) {
+ if network == "" { // a hint wildcard for Go 1.0 undocumented behavior
+ network = "ip"
}
- afnet, _, err := parseNetwork(context.Background(), net)
+ afnet, _, err := parseNetwork(context.Background(), network, false)
if err != nil {
return nil, err
}
switch afnet {
case "ip", "ip4", "ip6":
default:
- return nil, UnknownNetworkError(net)
+ return nil, UnknownNetworkError(network)
}
- addrs, err := DefaultResolver.internetAddrList(context.Background(), afnet, addr)
+ addrs, err := DefaultResolver.internetAddrList(context.Background(), afnet, address)
if err != nil {
return nil, err
}
- return addrs.first(isIPv4).(*IPAddr), nil
+ return addrs.forResolve(network, address).(*IPAddr), nil
}
// IPConn is the implementation of the Conn and PacketConn interfaces
@@ -93,13 +100,16 @@ type IPConn struct {
conn
}
-// ReadFromIP reads an IP packet from c, copying the payload into b.
-// It returns the number of bytes copied into b and the return address
-// that was on the packet.
-//
-// ReadFromIP can be made to time out and return an error with
-// Timeout() == true after a fixed time limit; see SetDeadline and
-// SetReadDeadline.
+// SyscallConn returns a raw network connection.
+// This implements the syscall.Conn interface.
+func (c *IPConn) SyscallConn() (syscall.RawConn, error) {
+ if !c.ok() {
+ return nil, syscall.EINVAL
+ }
+ return newRawConn(c.fd)
+}
+
+// ReadFromIP acts like ReadFrom but returns an IPAddr.
func (c *IPConn) ReadFromIP(b []byte) (int, *IPAddr, error) {
if !c.ok() {
return 0, nil, syscall.EINVAL
@@ -126,10 +136,13 @@ func (c *IPConn) ReadFrom(b []byte) (int, Addr, error) {
return n, addr, err
}
-// ReadMsgIP reads a packet from c, copying the payload into b and the
-// associated out-of-band data into oob. It returns the number of
+// ReadMsgIP reads a message from c, copying the payload into b and
+// the associated out-of-band data into oob. It returns the number of
// bytes copied into b, the number of bytes copied into oob, the flags
-// that were set on the packet and the source address of the packet.
+// that were set on the message and the source address of the message.
+//
+// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be
+// used to manipulate IP-level socket options in oob.
func (c *IPConn) ReadMsgIP(b, oob []byte) (n, oobn, flags int, addr *IPAddr, err error) {
if !c.ok() {
return 0, 0, 0, nil, syscall.EINVAL
@@ -141,13 +154,7 @@ func (c *IPConn) ReadMsgIP(b, oob []byte) (n, oobn, flags int, addr *IPAddr, err
return
}
-// WriteToIP writes an IP packet to addr via c, copying the payload
-// from b.
-//
-// WriteToIP can be made to time out and return an error with
-// Timeout() == true after a fixed time limit; see SetDeadline and
-// SetWriteDeadline. On packet-oriented connections, write timeouts
-// are rare.
+// WriteToIP acts like WriteTo but takes an IPAddr.
func (c *IPConn) WriteToIP(b []byte, addr *IPAddr) (int, error) {
if !c.ok() {
return 0, syscall.EINVAL
@@ -175,9 +182,12 @@ func (c *IPConn) WriteTo(b []byte, addr Addr) (int, error) {
return n, err
}
-// WriteMsgIP writes a packet to addr via c, copying the payload from
+// WriteMsgIP writes a message to addr via c, copying the payload from
// b and the associated out-of-band data from oob. It returns the
// number of payload and out-of-band bytes written.
+//
+// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be
+// used to manipulate IP-level socket options in oob.
func (c *IPConn) WriteMsgIP(b, oob []byte, addr *IPAddr) (n, oobn int, err error) {
if !c.ok() {
return 0, 0, syscall.EINVAL
@@ -191,25 +201,32 @@ func (c *IPConn) WriteMsgIP(b, oob []byte, addr *IPAddr) (n, oobn int, err error
func newIPConn(fd *netFD) *IPConn { return &IPConn{conn{fd}} }
-// DialIP connects to the remote address raddr on the network protocol
-// netProto, which must be "ip", "ip4", or "ip6" followed by a colon
-// and a protocol number or name.
-func DialIP(netProto string, laddr, raddr *IPAddr) (*IPConn, error) {
- c, err := dialIP(context.Background(), netProto, laddr, raddr)
+// DialIP acts like Dial for IP networks.
+//
+// The network must be an IP network name; see func Dial for details.
+//
+// If laddr is nil, a local address is automatically chosen.
+// If the IP field of raddr is nil or an unspecified IP address, the
+// local system is assumed.
+func DialIP(network string, laddr, raddr *IPAddr) (*IPConn, error) {
+ c, err := dialIP(context.Background(), network, laddr, raddr)
if err != nil {
- return nil, &OpError{Op: "dial", Net: netProto, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
}
return c, nil
}
-// ListenIP listens for incoming IP packets addressed to the local
-// address laddr. The returned connection's ReadFrom and WriteTo
-// methods can be used to receive and send IP packets with per-packet
-// addressing.
-func ListenIP(netProto string, laddr *IPAddr) (*IPConn, error) {
- c, err := listenIP(context.Background(), netProto, laddr)
+// ListenIP acts like ListenPacket for IP networks.
+//
+// The network must be an IP network name; see func Dial for details.
+//
+// If the IP field of laddr is nil or an unspecified IP address,
+// ListenIP listens on all available IP addresses of the local system
+// except multicast IP addresses.
+func ListenIP(network string, laddr *IPAddr) (*IPConn, error) {
+ c, err := listenIP(context.Background(), network, laddr)
if err != nil {
- return nil, &OpError{Op: "listen", Net: netProto, Source: nil, Addr: laddr.opAddr(), Err: err}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err}
}
return c, nil
}
diff --git a/libgo/go/net/iprawsock_posix.go b/libgo/go/net/iprawsock_posix.go
index 16e65dcc2a8..d613e6fb8e5 100644
--- a/libgo/go/net/iprawsock_posix.go
+++ b/libgo/go/net/iprawsock_posix.go
@@ -16,7 +16,7 @@ func sockaddrToIP(sa syscall.Sockaddr) Addr {
case *syscall.SockaddrInet4:
return &IPAddr{IP: sa.Addr[0:]}
case *syscall.SockaddrInet6:
- return &IPAddr{IP: sa.Addr[0:], Zone: zoneToString(int(sa.ZoneId))}
+ return &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))}
}
return nil
}
@@ -52,7 +52,7 @@ func (c *IPConn) readFrom(b []byte) (int, *IPAddr, error) {
addr = &IPAddr{IP: sa.Addr[0:]}
n = stripIPv4Header(n, b)
case *syscall.SockaddrInet6:
- addr = &IPAddr{IP: sa.Addr[0:], Zone: zoneToString(int(sa.ZoneId))}
+ addr = &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))}
}
return n, addr, err
}
@@ -79,7 +79,7 @@ func (c *IPConn) readMsg(b, oob []byte) (n, oobn, flags int, addr *IPAddr, err e
case *syscall.SockaddrInet4:
addr = &IPAddr{IP: sa.Addr[0:]}
case *syscall.SockaddrInet6:
- addr = &IPAddr{IP: sa.Addr[0:], Zone: zoneToString(int(sa.ZoneId))}
+ addr = &IPAddr{IP: sa.Addr[0:], Zone: zoneCache.name(int(sa.ZoneId))}
}
return
}
@@ -113,7 +113,7 @@ func (c *IPConn) writeMsg(b, oob []byte, addr *IPAddr) (n, oobn int, err error)
}
func dialIP(ctx context.Context, netProto string, laddr, raddr *IPAddr) (*IPConn, error) {
- network, proto, err := parseNetwork(ctx, netProto)
+ network, proto, err := parseNetwork(ctx, netProto, true)
if err != nil {
return nil, err
}
@@ -133,7 +133,7 @@ func dialIP(ctx context.Context, netProto string, laddr, raddr *IPAddr) (*IPConn
}
func listenIP(ctx context.Context, netProto string, laddr *IPAddr) (*IPConn, error) {
- network, proto, err := parseNetwork(ctx, netProto)
+ network, proto, err := parseNetwork(ctx, netProto, true)
if err != nil {
return nil, err
}
diff --git a/libgo/go/net/iprawsock_test.go b/libgo/go/net/iprawsock_test.go
index 5d33b26a91e..8972051f5d5 100644
--- a/libgo/go/net/iprawsock_test.go
+++ b/libgo/go/net/iprawsock_test.go
@@ -117,3 +117,75 @@ func TestIPConnRemoteName(t *testing.T) {
t.Fatalf("got %#v; want %#v", c.RemoteAddr(), raddr)
}
}
+
+func TestDialListenIPArgs(t *testing.T) {
+ type test struct {
+ argLists [][2]string
+ shouldFail bool
+ }
+ tests := []test{
+ {
+ argLists: [][2]string{
+ {"ip", "127.0.0.1"},
+ {"ip:", "127.0.0.1"},
+ {"ip::", "127.0.0.1"},
+ {"ip", "::1"},
+ {"ip:", "::1"},
+ {"ip::", "::1"},
+ {"ip4", "127.0.0.1"},
+ {"ip4:", "127.0.0.1"},
+ {"ip4::", "127.0.0.1"},
+ {"ip6", "::1"},
+ {"ip6:", "::1"},
+ {"ip6::", "::1"},
+ },
+ shouldFail: true,
+ },
+ }
+ if testableNetwork("ip") {
+ priv := test{shouldFail: false}
+ for _, tt := range []struct {
+ network, address string
+ args [2]string
+ }{
+ {"ip4:47", "127.0.0.1", [2]string{"ip4:47", "127.0.0.1"}},
+ {"ip6:47", "::1", [2]string{"ip6:47", "::1"}},
+ } {
+ c, err := ListenPacket(tt.network, tt.address)
+ if err != nil {
+ continue
+ }
+ c.Close()
+ priv.argLists = append(priv.argLists, tt.args)
+ }
+ if len(priv.argLists) > 0 {
+ tests = append(tests, priv)
+ }
+ }
+
+ for _, tt := range tests {
+ for _, args := range tt.argLists {
+ _, err := Dial(args[0], args[1])
+ if tt.shouldFail != (err != nil) {
+ t.Errorf("Dial(%q, %q) = %v; want (err != nil) is %t", args[0], args[1], err, tt.shouldFail)
+ }
+ _, err = ListenPacket(args[0], args[1])
+ if tt.shouldFail != (err != nil) {
+ t.Errorf("ListenPacket(%q, %q) = %v; want (err != nil) is %t", args[0], args[1], err, tt.shouldFail)
+ }
+ a, err := ResolveIPAddr("ip", args[1])
+ if err != nil {
+ t.Errorf("ResolveIPAddr(\"ip\", %q) = %v", args[1], err)
+ continue
+ }
+ _, err = DialIP(args[0], nil, a)
+ if tt.shouldFail != (err != nil) {
+ t.Errorf("DialIP(%q, %v) = %v; want (err != nil) is %t", args[0], a, err, tt.shouldFail)
+ }
+ _, err = ListenIP(args[0], a)
+ if tt.shouldFail != (err != nil) {
+ t.Errorf("ListenIP(%q, %v) = %v; want (err != nil) is %t", args[0], a, err, tt.shouldFail)
+ }
+ }
+ }
+}
diff --git a/libgo/go/net/ipsock.go b/libgo/go/net/ipsock.go
index f1394a7ed87..947bdf34897 100644
--- a/libgo/go/net/ipsock.go
+++ b/libgo/go/net/ipsock.go
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Internet protocol family sockets
-
package net
import (
"context"
+ "sync"
)
// BUG(rsc,mikio): On DragonFly BSD and OpenBSD, listening on the
@@ -17,25 +16,41 @@ import (
// both address families are to be supported.
// See inet6(4) for details.
-var (
- // supportsIPv4 reports whether the platform supports IPv4
- // networking functionality.
- supportsIPv4 bool
+type ipStackCapabilities struct {
+ sync.Once // guards following
+ ipv4Enabled bool
+ ipv6Enabled bool
+ ipv4MappedIPv6Enabled bool
+}
- // supportsIPv6 reports whether the platform supports IPv6
- // networking functionality.
- supportsIPv6 bool
+var ipStackCaps ipStackCapabilities
- // supportsIPv4map reports whether the platform supports
- // mapping an IPv4 address inside an IPv6 address at transport
- // layer protocols. See RFC 4291, RFC 4038 and RFC 3493.
- supportsIPv4map bool
-)
+// supportsIPv4 reports whether the platform supports IPv4 networking
+// functionality.
+func supportsIPv4() bool {
+ ipStackCaps.Once.Do(ipStackCaps.probe)
+ return ipStackCaps.ipv4Enabled
+}
+
+// supportsIPv6 reports whether the platform supports IPv6 networking
+// functionality.
+func supportsIPv6() bool {
+ ipStackCaps.Once.Do(ipStackCaps.probe)
+ return ipStackCaps.ipv6Enabled
+}
+
+// supportsIPv4map reports whether the platform supports mapping an
+// IPv4 address inside an IPv6 address at transport layer
+// protocols. See RFC 4291, RFC 4038 and RFC 3493.
+func supportsIPv4map() bool {
+ ipStackCaps.Once.Do(ipStackCaps.probe)
+ return ipStackCaps.ipv4MappedIPv6Enabled
+}
// An addrList represents a list of network endpoint addresses.
type addrList []Addr
-// isIPv4 returns true if the Addr contains an IPv4 address.
+// isIPv4 reports whether addr contains an IPv4 address.
func isIPv4(addr Addr) bool {
switch addr := addr.(type) {
case *TCPAddr:
@@ -48,6 +63,28 @@ func isIPv4(addr Addr) bool {
return false
}
+// isNotIPv4 reports whether addr does not contain an IPv4 address.
+func isNotIPv4(addr Addr) bool { return !isIPv4(addr) }
+
+// forResolve returns the most appropriate address in address for
+// a call to ResolveTCPAddr, ResolveUDPAddr, or ResolveIPAddr.
+// IPv4 is preferred, unless addr contains an IPv6 literal.
+func (addrs addrList) forResolve(network, addr string) Addr {
+ var want6 bool
+ switch network {
+ case "ip":
+ // IPv6 literal (addr does NOT contain a port)
+ want6 = count(addr, ':') > 0
+ case "tcp", "udp":
+ // IPv6 literal. (addr contains a port, so look for '[')
+ want6 = count(addr, '[') > 0
+ }
+ if want6 {
+ return addrs.first(isNotIPv4)
+ }
+ return addrs.first(isIPv4)
+}
+
// first returns the first address which satisfies strategy, or if
// none do, then the first address of any kind.
func (addrs addrList) first(strategy func(Addr) bool) Addr {
@@ -107,10 +144,14 @@ func ipv6only(addr IPAddr) bool {
}
// SplitHostPort splits a network address of the form "host:port",
-// "[host]:port" or "[ipv6-host%zone]:port" into host or
-// ipv6-host%zone and port. A literal address or host name for IPv6
-// must be enclosed in square brackets, as in "[::1]:80",
-// "[ipv6-host]:http" or "[ipv6-host%zone]:80".
+// "host%zone:port", "[host]:port" or "[host%zone]:port" into host or
+// host%zone and port.
+//
+// A literal IPv6 address in hostport must be enclosed in square
+// brackets, as in "[::1]:80", "[::1%lo0]:80".
+//
+// See func Dial for a description of the hostport parameter, and host
+// and port results.
func SplitHostPort(hostport string) (host, port string, err error) {
const (
missingPort = "missing port in address"
@@ -154,9 +195,6 @@ func SplitHostPort(hostport string) (host, port string, err error) {
if byteIndex(host, ':') >= 0 {
return addrErr(hostport, tooManyColons)
}
- if byteIndex(host, '%') >= 0 {
- return addrErr(hostport, "missing brackets in address")
- }
}
if byteIndex(hostport[j:], '[') >= 0 {
return addrErr(hostport, "unexpected '[' in address")
@@ -181,11 +219,14 @@ func splitHostZone(s string) (host, zone string) {
}
// JoinHostPort combines host and port into a network address of the
-// form "host:port" or, if host contains a colon or a percent sign,
-// "[host]:port".
+// form "host:port". If host contains a colon, as found in literal
+// IPv6 addresses, then JoinHostPort returns "[host]:port".
+//
+// See func Dial for a description of the host and port parameters.
func JoinHostPort(host, port string) string {
- // If host has colons or a percent sign, have to bracket it.
- if byteIndex(host, ':') >= 0 || byteIndex(host, '%') >= 0 {
+ // We assume that host is a literal IPv6 address if host has
+ // colons.
+ if byteIndex(host, ':') >= 0 {
return "[" + host + "]:" + port
}
return host + ":" + port
@@ -240,6 +281,13 @@ func (r *Resolver) internetAddrList(ctx context.Context, net, addr string) (addr
ips = []IPAddr{{IP: ip}}
} else if ip, zone := parseIPv6(host, true); ip != nil {
ips = []IPAddr{{IP: ip, Zone: zone}}
+ // Issue 18806: if the machine has halfway configured
+ // IPv6 such that it can bind on "::" (IPv6unspecified)
+ // but not connect back to that same address, fall
+ // back to dialing 0.0.0.0.
+ if ip.Equal(IPv6unspecified) {
+ ips = append(ips, IPAddr{IP: IPv4zero})
+ }
} else {
// Try as a DNS name.
ips, err = r.LookupIPAddr(ctx, host)
diff --git a/libgo/go/net/ipsock_plan9.go b/libgo/go/net/ipsock_plan9.go
index b7fd344c8ad..312e4adb47d 100644
--- a/libgo/go/net/ipsock_plan9.go
+++ b/libgo/go/net/ipsock_plan9.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Internet protocol family sockets for Plan 9
-
package net
import (
@@ -12,12 +10,25 @@ import (
"syscall"
)
+// Probe probes IPv4, IPv6 and IPv4-mapped IPv6 communication
+// capabilities.
+//
+// Plan 9 uses IPv6 natively, see ip(3).
+func (p *ipStackCapabilities) probe() {
+ p.ipv4Enabled = probe(netdir+"/iproute", "4i")
+ p.ipv6Enabled = probe(netdir+"/iproute", "6i")
+ if p.ipv4Enabled && p.ipv6Enabled {
+ p.ipv4MappedIPv6Enabled = true
+ }
+}
+
func probe(filename, query string) bool {
var file *file
var err error
if file, err = open(filename); err != nil {
return false
}
+ defer file.close()
r := false
for line, ok := file.readLine(); ok && !r; line, ok = file.readLine() {
@@ -32,27 +43,9 @@ func probe(filename, query string) bool {
}
}
}
- file.close()
return r
}
-func probeIPv4Stack() bool {
- return probe(netdir+"/iproute", "4i")
-}
-
-// probeIPv6Stack returns two boolean values. If the first boolean
-// value is true, kernel supports basic IPv6 functionality. If the
-// second boolean value is true, kernel supports IPv6 IPv4-mapping.
-func probeIPv6Stack() (supportsIPv6, supportsIPv4map bool) {
- // Plan 9 uses IPv6 natively, see ip(3).
- r := probe(netdir+"/iproute", "6i")
- v := false
- if r {
- v = probe(netdir+"/iproute", "4i")
- }
- return r, v
-}
-
// parsePlan9Addr parses address of the form [ip!]port (e.g. 127.0.0.1!80).
func parsePlan9Addr(s string) (ip IP, iport int, err error) {
addr := IPv4zero // address contains port only
@@ -249,10 +242,10 @@ func (fd *netFD) netFD() (*netFD, error) {
func (fd *netFD) acceptPlan9() (nfd *netFD, err error) {
defer func() { fixErr(err) }()
- if err := fd.readLock(); err != nil {
+ if err := fd.pfd.ReadLock(); err != nil {
return nil, err
}
- defer fd.readUnlock()
+ defer fd.pfd.ReadUnlock()
listen, err := os.Open(fd.dir + "/listen")
if err != nil {
return nil, err
diff --git a/libgo/go/net/ipsock_posix.go b/libgo/go/net/ipsock_posix.go
index 05bf93956bf..4b4363a47a1 100644
--- a/libgo/go/net/ipsock_posix.go
+++ b/libgo/go/net/ipsock_posix.go
@@ -8,35 +8,29 @@ package net
import (
"context"
+ "internal/poll"
"runtime"
"syscall"
)
-func probeIPv4Stack() bool {
+// Probe probes IPv4, IPv6 and IPv4-mapped IPv6 communication
+// capabilities which are controlled by the IPV6_V6ONLY socket option
+// and kernel configuration.
+//
+// Should we try to use the IPv4 socket interface if we're only
+// dealing with IPv4 sockets? As long as the host system understands
+// IPv4-mapped IPv6, it's okay to pass IPv4-mapeed IPv6 addresses to
+// the IPv6 interface. That simplifies our code and is most
+// general. Unfortunately, we need to run on kernels built without
+// IPv6 support too. So probe the kernel to figure it out.
+func (p *ipStackCapabilities) probe() {
s, err := socketFunc(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
switch err {
case syscall.EAFNOSUPPORT, syscall.EPROTONOSUPPORT:
- return false
case nil:
- closeFunc(s)
+ poll.CloseFunc(s)
+ p.ipv4Enabled = true
}
- return true
-}
-
-// Should we try to use the IPv4 socket interface if we're
-// only dealing with IPv4 sockets? As long as the host system
-// understands IPv6, it's okay to pass IPv4 addresses to the IPv6
-// interface. That simplifies our code and is most general.
-// Unfortunately, we need to run on kernels built without IPv6
-// support too. So probe the kernel to figure it out.
-//
-// probeIPv6Stack probes both basic IPv6 capability and IPv6 IPv4-
-// mapping capability which is controlled by IPV6_V6ONLY socket
-// option and/or kernel state "net.inet6.ip6.v6only".
-// It returns two boolean values. If the first boolean value is
-// true, kernel supports basic IPv6 functionality. If the second
-// boolean value is true, kernel supports IPv6 IPv4-mapping.
-func probeIPv6Stack() (supportsIPv6, supportsIPv4map bool) {
var probes = []struct {
laddr TCPAddr
value int
@@ -46,29 +40,19 @@ func probeIPv6Stack() (supportsIPv6, supportsIPv4map bool) {
// IPv4-mapped IPv6 address communication capability
{laddr: TCPAddr{IP: IPv4(127, 0, 0, 1)}, value: 0},
}
- var supps [2]bool
switch runtime.GOOS {
case "dragonfly", "openbsd":
- // Some released versions of DragonFly BSD pretend to
- // accept IPV6_V6ONLY=0 successfully, but the state
- // still stays IPV6_V6ONLY=1. Eventually DragonFly BSD
- // stops pretending, but the transition period would
- // cause unpredictable behavior and we need to avoid
- // it.
- //
- // OpenBSD also doesn't support IPV6_V6ONLY=0 but it
- // never pretends to accept IPV6_V6OLY=0. It always
- // returns an error and we don't need to probe the
- // capability.
+ // The latest DragonFly BSD and OpenBSD kernels don't
+ // support IPV6_V6ONLY=0. They always return an error
+ // and we don't need to probe the capability.
probes = probes[:1]
}
-
for i := range probes {
s, err := socketFunc(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_TCP)
if err != nil {
continue
}
- defer closeFunc(s)
+ defer poll.CloseFunc(s)
syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, probes[i].value)
sa, err := probes[i].laddr.sockaddr(syscall.AF_INET6)
if err != nil {
@@ -77,51 +61,55 @@ func probeIPv6Stack() (supportsIPv6, supportsIPv4map bool) {
if err := syscall.Bind(s, sa); err != nil {
continue
}
- supps[i] = true
+ if i == 0 {
+ p.ipv6Enabled = true
+ } else {
+ p.ipv4MappedIPv6Enabled = true
+ }
}
-
- return supps[0], supps[1]
}
-// favoriteAddrFamily returns the appropriate address family to
-// the given net, laddr, raddr and mode. At first it figures
-// address family out from the net. If mode indicates "listen"
-// and laddr is a wildcard, it assumes that the user wants to
-// make a passive connection with a wildcard address family, both
-// AF_INET and AF_INET6, and a wildcard address like following:
+// favoriteAddrFamily returns the appropriate address family for the
+// given network, laddr, raddr and mode.
+//
+// If mode indicates "listen" and laddr is a wildcard, we assume that
+// the user wants to make a passive-open connection with a wildcard
+// address family, both AF_INET and AF_INET6, and a wildcard address
+// like the following:
//
-// 1. A wild-wild listen, "tcp" + ""
-// If the platform supports both IPv6 and IPv6 IPv4-mapping
-// capabilities, or does not support IPv4, we assume that
-// the user wants to listen on both IPv4 and IPv6 wildcard
-// addresses over an AF_INET6 socket with IPV6_V6ONLY=0.
-// Otherwise we prefer an IPv4 wildcard address listen over
-// an AF_INET socket.
+// - A listen for a wildcard communication domain, "tcp" or
+// "udp", with a wildcard address: If the platform supports
+// both IPv6 and IPv4-mapped IPv6 communication capabilities,
+// or does not support IPv4, we use a dual stack, AF_INET6 and
+// IPV6_V6ONLY=0, wildcard address listen. The dual stack
+// wildcard address listen may fall back to an IPv6-only,
+// AF_INET6 and IPV6_V6ONLY=1, wildcard address listen.
+// Otherwise we prefer an IPv4-only, AF_INET, wildcard address
+// listen.
//
-// 2. A wild-ipv4wild listen, "tcp" + "0.0.0.0"
-// Same as 1.
+// - A listen for a wildcard communication domain, "tcp" or
+// "udp", with an IPv4 wildcard address: same as above.
//
-// 3. A wild-ipv6wild listen, "tcp" + "[::]"
-// Almost same as 1 but we prefer an IPv6 wildcard address
-// listen over an AF_INET6 socket with IPV6_V6ONLY=0 when
-// the platform supports IPv6 capability but not IPv6 IPv4-
-// mapping capability.
+// - A listen for a wildcard communication domain, "tcp" or
+// "udp", with an IPv6 wildcard address: same as above.
//
-// 4. A ipv4-ipv4wild listen, "tcp4" + "" or "0.0.0.0"
-// We use an IPv4 (AF_INET) wildcard address listen.
+// - A listen for an IPv4 communication domain, "tcp4" or "udp4",
+// with an IPv4 wildcard address: We use an IPv4-only, AF_INET,
+// wildcard address listen.
//
-// 5. A ipv6-ipv6wild listen, "tcp6" + "" or "[::]"
-// We use an IPv6 (AF_INET6, IPV6_V6ONLY=1) wildcard address
-// listen.
+// - A listen for an IPv6 communication domain, "tcp6" or "udp6",
+// with an IPv6 wildcard address: We use an IPv6-only, AF_INET6
+// and IPV6_V6ONLY=1, wildcard address listen.
//
-// Otherwise guess: if the addresses are IPv4 then returns AF_INET,
-// or else returns AF_INET6. It also returns a boolean value what
+// Otherwise guess: If the addresses are IPv4 then returns AF_INET,
+// or else returns AF_INET6. It also returns a boolean value what
// designates IPV6_V6ONLY option.
//
-// Note that OpenBSD allows neither "net.inet6.ip6.v6only=1" change
-// nor IPPROTO_IPV6 level IPV6_V6ONLY socket option setting.
-func favoriteAddrFamily(net string, laddr, raddr sockaddr, mode string) (family int, ipv6only bool) {
- switch net[len(net)-1] {
+// Note that the latest DragonFly BSD and OpenBSD kernels allow
+// neither "net.inet6.ip6.v6only=1" change nor IPPROTO_IPV6 level
+// IPV6_V6ONLY socket option setting.
+func favoriteAddrFamily(network string, laddr, raddr sockaddr, mode string) (family int, ipv6only bool) {
+ switch network[len(network)-1] {
case '4':
return syscall.AF_INET, false
case '6':
@@ -129,7 +117,7 @@ func favoriteAddrFamily(net string, laddr, raddr sockaddr, mode string) (family
}
if mode == "listen" && (laddr == nil || laddr.isWildcard()) {
- if supportsIPv4map || !supportsIPv4 {
+ if supportsIPv4map() || !supportsIPv4() {
return syscall.AF_INET6, false
}
if laddr == nil {
@@ -145,7 +133,6 @@ func favoriteAddrFamily(net string, laddr, raddr sockaddr, mode string) (family
return syscall.AF_INET6, false
}
-// Internet sockets (TCP, UDP, IP)
func internetSocket(ctx context.Context, net string, laddr, raddr sockaddr, sotype, proto int, mode string) (fd *netFD, err error) {
if (runtime.GOOS == "windows" || runtime.GOOS == "openbsd" || runtime.GOOS == "nacl") && mode == "dial" && raddr.isWildcard() {
raddr = raddr.toLocal(net)
@@ -187,7 +174,7 @@ func ipToSockaddr(family int, ip IP, port int, zone string) (syscall.Sockaddr, e
if ip6 == nil {
return nil, &AddrError{Err: "non-IPv6 address", Addr: ip.String()}
}
- sa := &syscall.SockaddrInet6{Port: port, ZoneId: uint32(zoneToInt(zone))}
+ sa := &syscall.SockaddrInet6{Port: port, ZoneId: uint32(zoneCache.index(zone))}
copy(sa.Addr[:], ip6)
return sa, nil
}
diff --git a/libgo/go/net/ipsock_test.go b/libgo/go/net/ipsock_test.go
index 1d0f00ff5ee..aede3548447 100644
--- a/libgo/go/net/ipsock_test.go
+++ b/libgo/go/net/ipsock_test.go
@@ -215,7 +215,7 @@ var addrListTests = []struct {
}
func TestAddrList(t *testing.T) {
- if !supportsIPv4 || !supportsIPv6 {
+ if !supportsIPv4() || !supportsIPv6() {
t.Skip("both IPv4 and IPv6 are required")
}
diff --git a/libgo/go/net/listen_test.go b/libgo/go/net/listen_test.go
index 6037f3600df..21ad4462f68 100644
--- a/libgo/go/net/listen_test.go
+++ b/libgo/go/net/listen_test.go
@@ -225,7 +225,7 @@ func TestDualStackTCPListener(t *testing.T) {
case "nacl", "plan9":
t.Skipf("not supported on %s", runtime.GOOS)
}
- if !supportsIPv4 || !supportsIPv6 {
+ if !supportsIPv4() || !supportsIPv6() {
t.Skip("both IPv4 and IPv6 are required")
}
@@ -235,7 +235,7 @@ func TestDualStackTCPListener(t *testing.T) {
continue
}
- if !supportsIPv4map && differentWildcardAddr(tt.address1, tt.address2) {
+ if !supportsIPv4map() && differentWildcardAddr(tt.address1, tt.address2) {
tt.xerr = nil
}
var firstErr, secondErr error
@@ -315,7 +315,7 @@ func TestDualStackUDPListener(t *testing.T) {
case "nacl", "plan9":
t.Skipf("not supported on %s", runtime.GOOS)
}
- if !supportsIPv4 || !supportsIPv6 {
+ if !supportsIPv4() || !supportsIPv6() {
t.Skip("both IPv4 and IPv6 are required")
}
@@ -325,7 +325,7 @@ func TestDualStackUDPListener(t *testing.T) {
continue
}
- if !supportsIPv4map && differentWildcardAddr(tt.address1, tt.address2) {
+ if !supportsIPv4map() && differentWildcardAddr(tt.address1, tt.address2) {
tt.xerr = nil
}
var firstErr, secondErr error
@@ -454,7 +454,7 @@ func checkDualStackAddrFamily(fd *netFD) error {
// and IPv6 IPv4-mapping capability, we can assume
// that the node listens on a wildcard address with an
// AF_INET6 socket.
- if supportsIPv4map && fd.laddr.(*TCPAddr).isWildcard() {
+ if supportsIPv4map() && fd.laddr.(*TCPAddr).isWildcard() {
if fd.family != syscall.AF_INET6 {
return fmt.Errorf("Listen(%s, %v) returns %v; want %v", fd.net, fd.laddr, fd.family, syscall.AF_INET6)
}
@@ -468,7 +468,7 @@ func checkDualStackAddrFamily(fd *netFD) error {
// and IPv6 IPv4-mapping capability, we can assume
// that the node listens on a wildcard address with an
// AF_INET6 socket.
- if supportsIPv4map && fd.laddr.(*UDPAddr).isWildcard() {
+ if supportsIPv4map() && fd.laddr.(*UDPAddr).isWildcard() {
if fd.family != syscall.AF_INET6 {
return fmt.Errorf("ListenPacket(%s, %v) returns %v; want %v", fd.net, fd.laddr, fd.family, syscall.AF_INET6)
}
@@ -535,7 +535,7 @@ func TestIPv4MulticastListener(t *testing.T) {
case "solaris":
t.Skipf("not supported on solaris, see golang.org/issue/7399")
}
- if !supportsIPv4 {
+ if !supportsIPv4() {
t.Skip("IPv4 is not supported")
}
@@ -610,7 +610,7 @@ func TestIPv6MulticastListener(t *testing.T) {
case "solaris":
t.Skipf("not supported on solaris, see issue 7399")
}
- if !supportsIPv6 {
+ if !supportsIPv6() {
t.Skip("IPv6 is not supported")
}
if os.Getuid() != 0 {
diff --git a/libgo/go/net/lookup.go b/libgo/go/net/lookup.go
index cc2013e4325..c9f327050af 100644
--- a/libgo/go/net/lookup.go
+++ b/libgo/go/net/lookup.go
@@ -28,6 +28,9 @@ var protocols = map[string]int{
// services contains minimal mappings between services names and port
// numbers for platforms that don't have a complete list of port numbers
// (some Solaris distros, nacl, etc).
+//
+// See https://www.iana.org/assignments/service-names-port-numbers
+//
// On Unix, this map is augmented by readServices via goLookupPort.
var services = map[string]map[string]int{
"udp": {
@@ -63,7 +66,12 @@ func lookupProtocolMap(name string) (int, error) {
return proto, nil
}
-const maxServiceLength = len("mobility-header") + 10 // with room to grow
+// maxPortBufSize is the longest reasonable name of a service
+// (non-numeric port).
+// Currently the longest known IANA-unregistered name is
+// "mobility-header", so we use that length, plus some slop in case
+// something longer is added in the future.
+const maxPortBufSize = len("mobility-header") + 10
func lookupPortMap(network, service string) (port int, error error) {
switch network {
@@ -74,7 +82,7 @@ func lookupPortMap(network, service string) (port int, error error) {
}
if m, ok := services[network]; ok {
- var lowerService [maxServiceLength]byte
+ var lowerService [maxPortBufSize]byte
n := copy(lowerService[:], service)
lowerASCIIBytes(lowerService[:n])
if port, ok := m[string(lowerService[:n])]; ok && n == len(service) {
@@ -97,6 +105,29 @@ type Resolver struct {
// GODEBUG=netdns=go, but scoped to just this resolver.
PreferGo bool
+ // StrictErrors controls the behavior of temporary errors
+ // (including timeout, socket errors, and SERVFAIL) when using
+ // Go's built-in resolver. For a query composed of multiple
+ // sub-queries (such as an A+AAAA address lookup, or walking the
+ // DNS search list), this option causes such errors to abort the
+ // whole query instead of returning a partial result. This is
+ // not enabled by default because it may affect compatibility
+ // with resolvers that process AAAA queries incorrectly.
+ StrictErrors bool
+
+ // Dial optionally specifies an alternate dialer for use by
+ // Go's built-in DNS resolver to make TCP and UDP connections
+ // to DNS services. The host in the address parameter will
+ // always be a literal IP address and not a host name, and the
+ // port in the address parameter will be a literal port number
+ // and not a service name.
+ // If the Conn returned is also a PacketConn, sent and received DNS
+ // messages must adhere to RFC 1035 section 4.2.1, "UDP usage".
+ // Otherwise, DNS messages transmitted over Conn must adhere
+ // to RFC 7766 section 5, "Transport Protocol Selection".
+ // If nil, the default dialer is used.
+ Dial func(ctx context.Context, network, address string) (Conn, error)
+
// TODO(bradfitz): optional interface impl override hook
// TODO(bradfitz): Timeout time.Duration?
}
@@ -164,12 +195,15 @@ func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]IPAddr, err
select {
case <-ctx.Done():
- // The DNS lookup timed out for some reason. Force
+ // If the DNS lookup timed out for some reason, force
// future requests to start the DNS lookup again
// rather than waiting for the current lookup to
// complete. See issue 8602.
- err := mapErr(ctx.Err())
- lookupGroup.Forget(host)
+ ctxErr := ctx.Err()
+ if ctxErr == context.DeadlineExceeded {
+ lookupGroup.Forget(host)
+ }
+ err := mapErr(ctxErr)
if trace != nil && trace.DNSDone != nil {
trace.DNSDone(nil, false, err)
}
diff --git a/libgo/go/net/lookup_test.go b/libgo/go/net/lookup_test.go
index 36db56acd03..68a7abe95df 100644
--- a/libgo/go/net/lookup_test.go
+++ b/libgo/go/net/lookup_test.go
@@ -63,7 +63,7 @@ func TestLookupGoogleSRV(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
}
- if !supportsIPv4 || !*testIPv4 {
+ if !supportsIPv4() || !*testIPv4 {
t.Skip("IPv4 is required")
}
@@ -99,7 +99,7 @@ func TestLookupGmailMX(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
}
- if !supportsIPv4 || !*testIPv4 {
+ if !supportsIPv4() || !*testIPv4 {
t.Skip("IPv4 is required")
}
@@ -131,7 +131,7 @@ func TestLookupGmailNS(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
}
- if !supportsIPv4 || !*testIPv4 {
+ if !supportsIPv4() || !*testIPv4 {
t.Skip("IPv4 is required")
}
@@ -164,7 +164,7 @@ func TestLookupGmailTXT(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
}
- if !supportsIPv4 || !*testIPv4 {
+ if !supportsIPv4() || !*testIPv4 {
t.Skip("IPv4 is required")
}
@@ -199,7 +199,7 @@ func TestLookupGooglePublicDNSAddr(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
}
- if !supportsIPv4 || !supportsIPv6 || !*testIPv4 || !*testIPv6 {
+ if !supportsIPv4() || !supportsIPv6() || !*testIPv4 || !*testIPv6 {
t.Skip("both IPv4 and IPv6 are required")
}
@@ -220,7 +220,7 @@ func TestLookupGooglePublicDNSAddr(t *testing.T) {
}
func TestLookupIPv6LinkLocalAddr(t *testing.T) {
- if !supportsIPv6 || !*testIPv6 {
+ if !supportsIPv6() || !*testIPv6 {
t.Skip("IPv6 is required")
}
@@ -256,7 +256,7 @@ func TestLookupCNAME(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
}
- if !supportsIPv4 || !*testIPv4 {
+ if !supportsIPv4() || !*testIPv4 {
t.Skip("IPv4 is required")
}
@@ -283,7 +283,7 @@ func TestLookupGoogleHost(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
}
- if !supportsIPv4 || !*testIPv4 {
+ if !supportsIPv4() || !*testIPv4 {
t.Skip("IPv4 is required")
}
@@ -315,7 +315,7 @@ func TestLookupGoogleIP(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
}
- if !supportsIPv4 || !*testIPv4 {
+ if !supportsIPv4() || !*testIPv4 {
t.Skip("IPv4 is required")
}
@@ -450,7 +450,7 @@ func TestDNSFlood(t *testing.T) {
}
func TestLookupDotsWithLocalSource(t *testing.T) {
- if !supportsIPv4 || !*testIPv4 {
+ if !supportsIPv4() || !*testIPv4 {
t.Skip("IPv4 is required")
}
@@ -499,7 +499,7 @@ func TestLookupDotsWithRemoteSource(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
}
- if !supportsIPv4 || !*testIPv4 {
+ if !supportsIPv4() || !*testIPv4 {
t.Skip("IPv4 is required")
}
diff --git a/libgo/go/net/lookup_unix.go b/libgo/go/net/lookup_unix.go
index f96c8beb614..2813f14be74 100644
--- a/libgo/go/net/lookup_unix.go
+++ b/libgo/go/net/lookup_unix.go
@@ -16,28 +16,31 @@ var onceReadProtocols sync.Once
// readProtocols loads contents of /etc/protocols into protocols map
// for quick access.
func readProtocols() {
- if file, err := open("/etc/protocols"); err == nil {
- for line, ok := file.readLine(); ok; line, ok = file.readLine() {
- // tcp 6 TCP # transmission control protocol
- if i := byteIndex(line, '#'); i >= 0 {
- line = line[0:i]
- }
- f := getFields(line)
- if len(f) < 2 {
- continue
+ file, err := open("/etc/protocols")
+ if err != nil {
+ return
+ }
+ defer file.close()
+
+ for line, ok := file.readLine(); ok; line, ok = file.readLine() {
+ // tcp 6 TCP # transmission control protocol
+ if i := byteIndex(line, '#'); i >= 0 {
+ line = line[0:i]
+ }
+ f := getFields(line)
+ if len(f) < 2 {
+ continue
+ }
+ if proto, _, ok := dtoi(f[1]); ok {
+ if _, ok := protocols[f[0]]; !ok {
+ protocols[f[0]] = proto
}
- if proto, _, ok := dtoi(f[1]); ok {
- if _, ok := protocols[f[0]]; !ok {
- protocols[f[0]] = proto
- }
- for _, alias := range f[2:] {
- if _, ok := protocols[alias]; !ok {
- protocols[alias] = proto
- }
+ for _, alias := range f[2:] {
+ if _, ok := protocols[alias]; !ok {
+ protocols[alias] = proto
}
}
}
- file.close()
}
}
@@ -48,6 +51,29 @@ func lookupProtocol(_ context.Context, name string) (int, error) {
return lookupProtocolMap(name)
}
+func (r *Resolver) dial(ctx context.Context, network, server string) (dnsConn, error) {
+ // Calling Dial here is scary -- we have to be sure not to
+ // dial a name that will require a DNS lookup, or Dial will
+ // call back here to translate it. The DNS config parser has
+ // already checked that all the cfg.servers are IP
+ // addresses, which Dial will use without a DNS lookup.
+ var c Conn
+ var err error
+ if r.Dial != nil {
+ c, err = r.Dial(ctx, network, server)
+ } else {
+ var d Dialer
+ c, err = d.DialContext(ctx, network, server)
+ }
+ if err != nil {
+ return nil, mapErr(err)
+ }
+ if _, ok := c.(PacketConn); ok {
+ return &dnsPacketConn{c}, nil
+ }
+ return &dnsStreamConn{c}, nil
+}
+
func (r *Resolver) lookupHost(ctx context.Context, host string) (addrs []string, err error) {
order := systemConf().hostLookupOrder(host)
if !r.PreferGo && order == hostLookupCgo {
@@ -57,12 +83,12 @@ func (r *Resolver) lookupHost(ctx context.Context, host string) (addrs []string,
// cgo not available (or netgo); fall back to Go's DNS resolver
order = hostLookupFilesDNS
}
- return goLookupHostOrder(ctx, host, order)
+ return r.goLookupHostOrder(ctx, host, order)
}
func (r *Resolver) lookupIP(ctx context.Context, host string) (addrs []IPAddr, err error) {
if r.PreferGo {
- return goLookupIP(ctx, host)
+ return r.goLookupIP(ctx, host)
}
order := systemConf().hostLookupOrder(host)
if order == hostLookupCgo {
@@ -72,7 +98,7 @@ func (r *Resolver) lookupIP(ctx context.Context, host string) (addrs []IPAddr, e
// cgo not available (or netgo); fall back to Go's DNS resolver
order = hostLookupFilesDNS
}
- addrs, _, err = goLookupIPCNAMEOrder(ctx, host, order)
+ addrs, _, err = r.goLookupIPCNAMEOrder(ctx, host, order)
return
}
@@ -98,17 +124,17 @@ func (r *Resolver) lookupCNAME(ctx context.Context, name string) (string, error)
return cname, err
}
}
- return goLookupCNAME(ctx, name)
+ return r.goLookupCNAME(ctx, name)
}
-func (*Resolver) lookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) {
+func (r *Resolver) lookupSRV(ctx context.Context, service, proto, name string) (string, []*SRV, error) {
var target string
if service == "" && proto == "" {
target = name
} else {
target = "_" + service + "._" + proto + "." + name
}
- cname, rrs, err := lookup(ctx, target, dnsTypeSRV)
+ cname, rrs, err := r.lookup(ctx, target, dnsTypeSRV)
if err != nil {
return "", nil, err
}
@@ -121,8 +147,8 @@ func (*Resolver) lookupSRV(ctx context.Context, service, proto, name string) (st
return cname, srvs, nil
}
-func (*Resolver) lookupMX(ctx context.Context, name string) ([]*MX, error) {
- _, rrs, err := lookup(ctx, name, dnsTypeMX)
+func (r *Resolver) lookupMX(ctx context.Context, name string) ([]*MX, error) {
+ _, rrs, err := r.lookup(ctx, name, dnsTypeMX)
if err != nil {
return nil, err
}
@@ -135,8 +161,8 @@ func (*Resolver) lookupMX(ctx context.Context, name string) ([]*MX, error) {
return mxs, nil
}
-func (*Resolver) lookupNS(ctx context.Context, name string) ([]*NS, error) {
- _, rrs, err := lookup(ctx, name, dnsTypeNS)
+func (r *Resolver) lookupNS(ctx context.Context, name string) ([]*NS, error) {
+ _, rrs, err := r.lookup(ctx, name, dnsTypeNS)
if err != nil {
return nil, err
}
@@ -148,7 +174,7 @@ func (*Resolver) lookupNS(ctx context.Context, name string) ([]*NS, error) {
}
func (r *Resolver) lookupTXT(ctx context.Context, name string) ([]string, error) {
- _, rrs, err := lookup(ctx, name, dnsTypeTXT)
+ _, rrs, err := r.lookup(ctx, name, dnsTypeTXT)
if err != nil {
return nil, err
}
@@ -165,5 +191,5 @@ func (r *Resolver) lookupAddr(ctx context.Context, addr string) ([]string, error
return ptrs, err
}
}
- return goLookupPTR(ctx, addr)
+ return r.goLookupPTR(ctx, addr)
}
diff --git a/libgo/go/net/lookup_windows.go b/libgo/go/net/lookup_windows.go
index 5808293058a..0036d89d150 100644
--- a/libgo/go/net/lookup_windows.go
+++ b/libgo/go/net/lookup_windows.go
@@ -107,7 +107,7 @@ func (r *Resolver) lookupIP(ctx context.Context, name string) ([]IPAddr, error)
addrs = append(addrs, IPAddr{IP: IPv4(a[0], a[1], a[2], a[3])})
case syscall.AF_INET6:
a := (*syscall.RawSockaddrInet6)(addr).Addr
- zone := zoneToString(int((*syscall.RawSockaddrInet6)(addr).Scope_id))
+ zone := zoneCache.name(int((*syscall.RawSockaddrInet6)(addr).Scope_id))
addrs = append(addrs, IPAddr{IP: IP{a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14], a[15]}, Zone: zone})
default:
ch <- ret{err: &DNSError{Err: syscall.EWINDOWS.Error(), Name: name}}
diff --git a/libgo/go/net/mail/message.go b/libgo/go/net/mail/message.go
index 702b765c349..45a995ec720 100644
--- a/libgo/go/net/mail/message.go
+++ b/libgo/go/net/mail/message.go
@@ -49,7 +49,7 @@ type Message struct {
// ReadMessage reads a message from r.
// The headers are parsed, and the body of the message will be available
-// for reading from r.
+// for reading from msg.Body.
func ReadMessage(r io.Reader) (msg *Message, err error) {
tp := textproto.NewReader(bufio.NewReader(r))
@@ -387,13 +387,15 @@ func (p *addrParser) consumePhrase() (phrase string, err error) {
debug.Printf("consumePhrase: [%s]", p.s)
// phrase = 1*word
var words []string
+ var isPrevEncoded bool
for {
// word = atom / quoted-string
var word string
p.skipSpace()
if p.empty() {
- return "", errors.New("mail: missing phrase")
+ break
}
+ isEncoded := false
if p.peek() == '"' {
// quoted-string
word, err = p.consumeQuotedString()
@@ -403,7 +405,7 @@ func (p *addrParser) consumePhrase() (phrase string, err error) {
// than what RFC 5322 specifies.
word, err = p.consumeAtom(true, true)
if err == nil {
- word, err = p.decodeRFC2047Word(word)
+ word, isEncoded, err = p.decodeRFC2047Word(word)
}
}
@@ -411,7 +413,12 @@ func (p *addrParser) consumePhrase() (phrase string, err error) {
break
}
debug.Printf("consumePhrase: consumed %q", word)
- words = append(words, word)
+ if isPrevEncoded && isEncoded {
+ words[len(words)-1] += word
+ } else {
+ words = append(words, word)
+ }
+ isPrevEncoded = isEncoded
}
// Ignore any error if we got at least one word.
if err != nil && len(words) == 0 {
@@ -540,22 +547,23 @@ func (p *addrParser) len() int {
return len(p.s)
}
-func (p *addrParser) decodeRFC2047Word(s string) (string, error) {
+func (p *addrParser) decodeRFC2047Word(s string) (word string, isEncoded bool, err error) {
if p.dec != nil {
- return p.dec.DecodeHeader(s)
+ word, err = p.dec.Decode(s)
+ } else {
+ word, err = rfc2047Decoder.Decode(s)
}
- dec, err := rfc2047Decoder.Decode(s)
if err == nil {
- return dec, nil
+ return word, true, nil
}
if _, ok := err.(charsetError); ok {
- return s, err
+ return s, true, err
}
// Ignore invalid RFC 2047 encoded-word errors.
- return s, nil
+ return s, false, nil
}
var rfc2047Decoder = mime.WordDecoder{
diff --git a/libgo/go/net/mail/message_test.go b/libgo/go/net/mail/message_test.go
index f0761ab09fb..2106a0b97d6 100644
--- a/libgo/go/net/mail/message_test.go
+++ b/libgo/go/net/mail/message_test.go
@@ -136,6 +136,7 @@ func TestAddressParsingError(t *testing.T) {
4: {"\"\\" + string([]byte{0x80}) + "\" ", "invalid utf-8 in quoted-string"},
5: {"\"\x00\" ", "bad character in quoted-string"},
6: {"\"\\\x00\" ", "bad character in quoted-string"},
+ 7: {"John Doe", "no angle-addr"},
}
for i, tc := range mustErrTestCases {
@@ -235,6 +236,16 @@ func TestAddressParsing(t *testing.T) {
},
},
},
+ // RFC 2047 "Q"-encoded UTF-8 address with multiple encoded-words.
+ {
+ `=?utf-8?q?J=C3=B6rg?= =?utf-8?q?Doe?= `,
+ []*Address{
+ {
+ Name: `JörgDoe`,
+ Address: "joerg@example.com",
+ },
+ },
+ },
// RFC 2047, Section 8.
{
`=?ISO-8859-1?Q?Andr=E9?= Pirard `,
diff --git a/libgo/go/net/main_cloexec_test.go b/libgo/go/net/main_cloexec_test.go
index 79038195859..fa1ed020572 100644
--- a/libgo/go/net/main_cloexec_test.go
+++ b/libgo/go/net/main_cloexec_test.go
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build freebsd linux
+// +build dragonfly freebsd linux
package net
+import "internal/poll"
+
func init() {
extraTestHookInstallers = append(extraTestHookInstallers, installAccept4TestHook)
extraTestHookUninstallers = append(extraTestHookUninstallers, uninstallAccept4TestHook)
@@ -13,13 +15,13 @@ func init() {
var (
// Placeholders for saving original socket system calls.
- origAccept4 = accept4Func
+ origAccept4 = poll.Accept4Func
)
func installAccept4TestHook() {
- accept4Func = sw.Accept4
+ poll.Accept4Func = sw.Accept4
}
func uninstallAccept4TestHook() {
- accept4Func = origAccept4
+ poll.Accept4Func = origAccept4
}
diff --git a/libgo/go/net/main_test.go b/libgo/go/net/main_test.go
index 28a8ff66d65..3e7a85ad2d9 100644
--- a/libgo/go/net/main_test.go
+++ b/libgo/go/net/main_test.go
@@ -70,7 +70,7 @@ var (
)
func setupTestData() {
- if supportsIPv4 {
+ if supportsIPv4() {
resolveTCPAddrTests = append(resolveTCPAddrTests, []resolveTCPAddrTest{
{"tcp", "localhost:1", &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 1}, nil},
{"tcp4", "localhost:2", &TCPAddr{IP: IPv4(127, 0, 0, 1), Port: 2}, nil},
@@ -85,25 +85,31 @@ func setupTestData() {
}...)
}
- if supportsIPv6 {
+ if supportsIPv6() {
resolveTCPAddrTests = append(resolveTCPAddrTests, resolveTCPAddrTest{"tcp6", "localhost:3", &TCPAddr{IP: IPv6loopback, Port: 3}, nil})
resolveUDPAddrTests = append(resolveUDPAddrTests, resolveUDPAddrTest{"udp6", "localhost:3", &UDPAddr{IP: IPv6loopback, Port: 3}, nil})
resolveIPAddrTests = append(resolveIPAddrTests, resolveIPAddrTest{"ip6", "localhost", &IPAddr{IP: IPv6loopback}, nil})
+
+ // Issue 20911: don't return IPv4 addresses for
+ // Resolve*Addr calls of the IPv6 unspecified address.
+ resolveTCPAddrTests = append(resolveTCPAddrTests, resolveTCPAddrTest{"tcp", "[::]:4", &TCPAddr{IP: IPv6unspecified, Port: 4}, nil})
+ resolveUDPAddrTests = append(resolveUDPAddrTests, resolveUDPAddrTest{"udp", "[::]:4", &UDPAddr{IP: IPv6unspecified, Port: 4}, nil})
+ resolveIPAddrTests = append(resolveIPAddrTests, resolveIPAddrTest{"ip", "::", &IPAddr{IP: IPv6unspecified}, nil})
}
ifi := loopbackInterface()
if ifi != nil {
index := fmt.Sprintf("%v", ifi.Index)
resolveTCPAddrTests = append(resolveTCPAddrTests, []resolveTCPAddrTest{
- {"tcp6", "[fe80::1%" + ifi.Name + "]:1", &TCPAddr{IP: ParseIP("fe80::1"), Port: 1, Zone: zoneToString(ifi.Index)}, nil},
+ {"tcp6", "[fe80::1%" + ifi.Name + "]:1", &TCPAddr{IP: ParseIP("fe80::1"), Port: 1, Zone: zoneCache.name(ifi.Index)}, nil},
{"tcp6", "[fe80::1%" + index + "]:2", &TCPAddr{IP: ParseIP("fe80::1"), Port: 2, Zone: index}, nil},
}...)
resolveUDPAddrTests = append(resolveUDPAddrTests, []resolveUDPAddrTest{
- {"udp6", "[fe80::1%" + ifi.Name + "]:1", &UDPAddr{IP: ParseIP("fe80::1"), Port: 1, Zone: zoneToString(ifi.Index)}, nil},
+ {"udp6", "[fe80::1%" + ifi.Name + "]:1", &UDPAddr{IP: ParseIP("fe80::1"), Port: 1, Zone: zoneCache.name(ifi.Index)}, nil},
{"udp6", "[fe80::1%" + index + "]:2", &UDPAddr{IP: ParseIP("fe80::1"), Port: 2, Zone: index}, nil},
}...)
resolveIPAddrTests = append(resolveIPAddrTests, []resolveIPAddrTest{
- {"ip6", "fe80::1%" + ifi.Name, &IPAddr{IP: ParseIP("fe80::1"), Zone: zoneToString(ifi.Index)}, nil},
+ {"ip6", "fe80::1%" + ifi.Name, &IPAddr{IP: ParseIP("fe80::1"), Zone: zoneCache.name(ifi.Index)}, nil},
{"ip6", "fe80::1%" + index, &IPAddr{IP: ParseIP("fe80::1"), Zone: index}, nil},
}...)
}
diff --git a/libgo/go/net/main_unix_test.go b/libgo/go/net/main_unix_test.go
index 8c8f94479de..34a8a104e82 100644
--- a/libgo/go/net/main_unix_test.go
+++ b/libgo/go/net/main_unix_test.go
@@ -6,13 +6,15 @@
package net
+import "internal/poll"
+
var (
// Placeholders for saving original socket system calls.
origSocket = socketFunc
- origClose = closeFunc
+ origClose = poll.CloseFunc
origConnect = connectFunc
origListen = listenFunc
- origAccept = acceptFunc
+ origAccept = poll.AcceptFunc
origGetsockoptInt = getsockoptIntFunc
extraTestHookInstallers []func()
@@ -21,10 +23,10 @@ var (
func installTestHooks() {
socketFunc = sw.Socket
- closeFunc = sw.Close
+ poll.CloseFunc = sw.Close
connectFunc = sw.Connect
listenFunc = sw.Listen
- acceptFunc = sw.Accept
+ poll.AcceptFunc = sw.Accept
getsockoptIntFunc = sw.GetsockoptInt
for _, fn := range extraTestHookInstallers {
@@ -34,10 +36,10 @@ func installTestHooks() {
func uninstallTestHooks() {
socketFunc = origSocket
- closeFunc = origClose
+ poll.CloseFunc = origClose
connectFunc = origConnect
listenFunc = origListen
- acceptFunc = origAccept
+ poll.AcceptFunc = origAccept
getsockoptIntFunc = origGetsockoptInt
for _, fn := range extraTestHookUninstallers {
@@ -48,6 +50,6 @@ func uninstallTestHooks() {
// forceCloseSockets must be called only from TestMain.
func forceCloseSockets() {
for s := range sw.Sockets() {
- closeFunc(s)
+ poll.CloseFunc(s)
}
}
diff --git a/libgo/go/net/main_windows_test.go b/libgo/go/net/main_windows_test.go
index 6ea318c2a5f..f38a3a0d668 100644
--- a/libgo/go/net/main_windows_test.go
+++ b/libgo/go/net/main_windows_test.go
@@ -4,37 +4,39 @@
package net
+import "internal/poll"
+
var (
// Placeholders for saving original socket system calls.
origSocket = socketFunc
- origClosesocket = closeFunc
+ origClosesocket = poll.CloseFunc
origConnect = connectFunc
- origConnectEx = connectExFunc
+ origConnectEx = poll.ConnectExFunc
origListen = listenFunc
- origAccept = acceptFunc
+ origAccept = poll.AcceptFunc
)
func installTestHooks() {
socketFunc = sw.Socket
- closeFunc = sw.Closesocket
+ poll.CloseFunc = sw.Closesocket
connectFunc = sw.Connect
- connectExFunc = sw.ConnectEx
+ poll.ConnectExFunc = sw.ConnectEx
listenFunc = sw.Listen
- acceptFunc = sw.AcceptEx
+ poll.AcceptFunc = sw.AcceptEx
}
func uninstallTestHooks() {
socketFunc = origSocket
- closeFunc = origClosesocket
+ poll.CloseFunc = origClosesocket
connectFunc = origConnect
- connectExFunc = origConnectEx
+ poll.ConnectExFunc = origConnectEx
listenFunc = origListen
- acceptFunc = origAccept
+ poll.AcceptFunc = origAccept
}
// forceCloseSockets must be called only from TestMain.
func forceCloseSockets() {
for s := range sw.Sockets() {
- closeFunc(s)
+ poll.CloseFunc(s)
}
}
diff --git a/libgo/go/net/mockserver_test.go b/libgo/go/net/mockserver_test.go
index 766de6a815b..44581d90498 100644
--- a/libgo/go/net/mockserver_test.go
+++ b/libgo/go/net/mockserver_test.go
@@ -31,20 +31,20 @@ func testUnixAddr() string {
func newLocalListener(network string) (Listener, error) {
switch network {
case "tcp":
- if supportsIPv4 {
+ if supportsIPv4() {
if ln, err := Listen("tcp4", "127.0.0.1:0"); err == nil {
return ln, nil
}
}
- if supportsIPv6 {
+ if supportsIPv6() {
return Listen("tcp6", "[::1]:0")
}
case "tcp4":
- if supportsIPv4 {
+ if supportsIPv4() {
return Listen("tcp4", "127.0.0.1:0")
}
case "tcp6":
- if supportsIPv6 {
+ if supportsIPv6() {
return Listen("tcp6", "[::1]:0")
}
case "unix", "unixpacket":
@@ -333,18 +333,18 @@ func timeoutTransmitter(c Conn, d, min, max time.Duration, ch chan<- error) {
func newLocalPacketListener(network string) (PacketConn, error) {
switch network {
case "udp":
- if supportsIPv4 {
+ if supportsIPv4() {
return ListenPacket("udp4", "127.0.0.1:0")
}
- if supportsIPv6 {
+ if supportsIPv6() {
return ListenPacket("udp6", "[::1]:0")
}
case "udp4":
- if supportsIPv4 {
+ if supportsIPv4() {
return ListenPacket("udp4", "127.0.0.1:0")
}
case "udp6":
- if supportsIPv6 {
+ if supportsIPv6() {
return ListenPacket("udp6", "[::1]:0")
}
case "unixgram":
diff --git a/libgo/go/net/net.go b/libgo/go/net/net.go
index a8b57361e6c..91ec048e0be 100644
--- a/libgo/go/net/net.go
+++ b/libgo/go/net/net.go
@@ -81,6 +81,7 @@ package net
import (
"context"
"errors"
+ "internal/poll"
"io"
"os"
"syscall"
@@ -95,12 +96,6 @@ var (
netCgo bool // set true in conf_netcgo.go for build tag "netcgo"
)
-func init() {
- sysInit()
- supportsIPv4 = probeIPv4Stack()
- supportsIPv6, supportsIPv4map = probeIPv6Stack()
-}
-
// Addr represents a network end point address.
//
// The two methods Network and String conventionally return strings
@@ -234,7 +229,7 @@ func (c *conn) SetDeadline(t time.Time) error {
if !c.ok() {
return syscall.EINVAL
}
- if err := c.fd.setDeadline(t); err != nil {
+ if err := c.fd.pfd.SetDeadline(t); err != nil {
return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
}
return nil
@@ -245,7 +240,7 @@ func (c *conn) SetReadDeadline(t time.Time) error {
if !c.ok() {
return syscall.EINVAL
}
- if err := c.fd.setReadDeadline(t); err != nil {
+ if err := c.fd.pfd.SetReadDeadline(t); err != nil {
return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
}
return nil
@@ -256,7 +251,7 @@ func (c *conn) SetWriteDeadline(t time.Time) error {
if !c.ok() {
return syscall.EINVAL
}
- if err := c.fd.setWriteDeadline(t); err != nil {
+ if err := c.fd.pfd.SetWriteDeadline(t); err != nil {
return &OpError{Op: "set", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
}
return nil
@@ -391,10 +386,8 @@ var (
errMissingAddress = errors.New("missing address")
// For both read and write operations.
- errTimeout error = &timeoutError{}
- errCanceled = errors.New("operation was canceled")
- errClosing = errors.New("use of closed network connection")
- ErrWriteToConnected = errors.New("use of WriteTo with pre-connected connection")
+ errCanceled = errors.New("operation was canceled")
+ ErrWriteToConnected = errors.New("use of WriteTo with pre-connected connection")
)
// mapErr maps from the context errors to the historical internal net
@@ -407,7 +400,7 @@ func mapErr(err error) error {
case context.Canceled:
return errCanceled
case context.DeadlineExceeded:
- return errTimeout
+ return poll.ErrTimeout
default:
return err
}
@@ -502,12 +495,6 @@ func (e *OpError) Temporary() bool {
return ok && t.Temporary()
}
-type timeoutError struct{}
-
-func (e *timeoutError) Error() string { return "i/o timeout" }
-func (e *timeoutError) Timeout() bool { return true }
-func (e *timeoutError) Temporary() bool { return true }
-
// A ParseError is the error type of literal network address parsers.
type ParseError struct {
// Type is the type of string that was expected, such as
@@ -632,8 +619,6 @@ type buffersWriter interface {
writeBuffers(*Buffers) (int64, error)
}
-var testHookDidWritev = func(wrote int) {}
-
// Buffers contains zero or more runs of bytes to write.
//
// On certain machines, for certain types of connections, this is
diff --git a/libgo/go/net/net_test.go b/libgo/go/net/net_test.go
index 9a9a7e552c4..024505e7c62 100644
--- a/libgo/go/net/net_test.go
+++ b/libgo/go/net/net_test.go
@@ -54,7 +54,7 @@ func TestCloseRead(t *testing.T) {
err = c.CloseRead()
}
if err != nil {
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, true); perr != nil {
t.Error(perr)
}
t.Fatal(err)
@@ -94,7 +94,7 @@ func TestCloseWrite(t *testing.T) {
err = c.CloseWrite()
}
if err != nil {
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, true); perr != nil {
t.Error(perr)
}
t.Error(err)
@@ -139,7 +139,7 @@ func TestCloseWrite(t *testing.T) {
err = c.CloseWrite()
}
if err != nil {
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, true); perr != nil {
t.Error(perr)
}
t.Fatal(err)
@@ -184,7 +184,7 @@ func TestConnClose(t *testing.T) {
defer c.Close()
if err := c.Close(); err != nil {
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, false); perr != nil {
t.Error(perr)
}
t.Fatal(err)
@@ -215,7 +215,7 @@ func TestListenerClose(t *testing.T) {
dst := ln.Addr().String()
if err := ln.Close(); err != nil {
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, false); perr != nil {
t.Error(perr)
}
t.Fatal(err)
@@ -269,7 +269,7 @@ func TestPacketConnClose(t *testing.T) {
defer c.Close()
if err := c.Close(); err != nil {
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, false); perr != nil {
t.Error(perr)
}
t.Fatal(err)
@@ -292,7 +292,7 @@ func TestListenCloseListen(t *testing.T) {
}
addr := ln.Addr().String()
if err := ln.Close(); err != nil {
- if perr := parseCloseError(err); perr != nil {
+ if perr := parseCloseError(err, false); perr != nil {
t.Error(perr)
}
t.Fatal(err)
diff --git a/libgo/go/net/platform_test.go b/libgo/go/net/platform_test.go
index 2a14095cc28..5841ca35a00 100644
--- a/libgo/go/net/platform_test.go
+++ b/libgo/go/net/platform_test.go
@@ -50,11 +50,11 @@ func testableNetwork(network string) bool {
}
switch ss[0] {
case "tcp4", "udp4", "ip4":
- if !supportsIPv4 {
+ if !supportsIPv4() {
return false
}
case "tcp6", "udp6", "ip6":
- if !supportsIPv6 {
+ if !supportsIPv6() {
return false
}
}
@@ -117,25 +117,25 @@ func testableListenArgs(network, address, client string) bool {
// Test functionality of IPv4 communication using AF_INET and
// IPv6 communication using AF_INET6 sockets.
- if !supportsIPv4 && ip.To4() != nil {
+ if !supportsIPv4() && ip.To4() != nil {
return false
}
- if !supportsIPv6 && ip.To16() != nil && ip.To4() == nil {
+ if !supportsIPv6() && ip.To16() != nil && ip.To4() == nil {
return false
}
cip := ParseIP(client)
if cip != nil {
- if !supportsIPv4 && cip.To4() != nil {
+ if !supportsIPv4() && cip.To4() != nil {
return false
}
- if !supportsIPv6 && cip.To16() != nil && cip.To4() == nil {
+ if !supportsIPv6() && cip.To16() != nil && cip.To4() == nil {
return false
}
}
// Test functionality of IPv4 communication using AF_INET6
// sockets.
- if !supportsIPv4map && supportsIPv4 && (network == "tcp" || network == "udp" || network == "ip") && wildcard {
+ if !supportsIPv4map() && supportsIPv4() && (network == "tcp" || network == "udp" || network == "ip") && wildcard {
// At this point, we prefer IPv4 when ip is nil.
// See favoriteAddrFamily for further information.
if ip.To16() != nil && ip.To4() == nil && cip.To4() != nil { // a pair of IPv6 server and IPv4 client
diff --git a/libgo/go/net/port_unix.go b/libgo/go/net/port_unix.go
index 3120ba1c7e2..8dd1c32f95d 100644
--- a/libgo/go/net/port_unix.go
+++ b/libgo/go/net/port_unix.go
@@ -17,6 +17,8 @@ func readServices() {
if err != nil {
return
}
+ defer file.close()
+
for line, ok := file.readLine(); ok; line, ok = file.readLine() {
// "http 80/tcp www www-http # World Wide Web HTTP"
if i := byteIndex(line, '#'); i >= 0 {
@@ -43,7 +45,6 @@ func readServices() {
}
}
}
- file.close()
}
// goLookupPort is the native Go implementation of LookupPort.
diff --git a/libgo/go/net/rawconn.go b/libgo/go/net/rawconn.go
new file mode 100644
index 00000000000..d67be644a34
--- /dev/null
+++ b/libgo/go/net/rawconn.go
@@ -0,0 +1,62 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "runtime"
+ "syscall"
+)
+
+// BUG(mikio): On Windows, the Read and Write methods of
+// syscall.RawConn are not implemented.
+
+// BUG(mikio): On NaCl and Plan 9, the Control, Read and Write methods
+// of syscall.RawConn are not implemented.
+
+type rawConn struct {
+ fd *netFD
+}
+
+func (c *rawConn) ok() bool { return c != nil && c.fd != nil }
+
+func (c *rawConn) Control(f func(uintptr)) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ err := c.fd.pfd.RawControl(f)
+ runtime.KeepAlive(c.fd)
+ if err != nil {
+ err = &OpError{Op: "raw-control", Net: c.fd.net, Source: nil, Addr: c.fd.laddr, Err: err}
+ }
+ return err
+}
+
+func (c *rawConn) Read(f func(uintptr) bool) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ err := c.fd.pfd.RawRead(f)
+ runtime.KeepAlive(c.fd)
+ if err != nil {
+ err = &OpError{Op: "raw-read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return err
+}
+
+func (c *rawConn) Write(f func(uintptr) bool) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ err := c.fd.pfd.RawWrite(f)
+ runtime.KeepAlive(c.fd)
+ if err != nil {
+ err = &OpError{Op: "raw-write", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return err
+}
+
+func newRawConn(fd *netFD) (*rawConn, error) {
+ return &rawConn{fd: fd}, nil
+}
diff --git a/libgo/go/net/rawconn_unix_test.go b/libgo/go/net/rawconn_unix_test.go
new file mode 100644
index 00000000000..294249ba5d1
--- /dev/null
+++ b/libgo/go/net/rawconn_unix_test.go
@@ -0,0 +1,94 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package net
+
+import (
+ "bytes"
+ "syscall"
+ "testing"
+)
+
+func TestRawConn(t *testing.T) {
+ handler := func(ls *localServer, ln Listener) {
+ c, err := ln.Accept()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer c.Close()
+ var b [32]byte
+ n, err := c.Read(b[:])
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if _, err := c.Write(b[:n]); err != nil {
+ t.Error(err)
+ return
+ }
+ }
+ ls, err := newLocalServer("tcp")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ls.teardown()
+ if err := ls.buildup(handler); err != nil {
+ t.Fatal(err)
+ }
+
+ c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ cc, err := c.(*TCPConn).SyscallConn()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var operr error
+ data := []byte("HELLO-R-U-THERE")
+ err = cc.Write(func(s uintptr) bool {
+ _, operr = syscall.Write(int(s), data)
+ if operr == syscall.EAGAIN {
+ return false
+ }
+ return true
+ })
+ if err != nil || operr != nil {
+ t.Fatal(err, operr)
+ }
+
+ var nr int
+ var b [32]byte
+ err = cc.Read(func(s uintptr) bool {
+ nr, operr = syscall.Read(int(s), b[:])
+ if operr == syscall.EAGAIN {
+ return false
+ }
+ return true
+ })
+ if err != nil || operr != nil {
+ t.Fatal(err, operr)
+ }
+ if bytes.Compare(b[:nr], data) != 0 {
+ t.Fatalf("got %#v; want %#v", b[:nr], data)
+ }
+
+ fn := func(s uintptr) {
+ operr = syscall.SetsockoptInt(int(s), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+ }
+ err = cc.Control(fn)
+ if err != nil || operr != nil {
+ t.Fatal(err, operr)
+ }
+ c.Close()
+ err = cc.Control(fn)
+ if err == nil {
+ t.Fatal("should fail")
+ }
+}
diff --git a/libgo/go/net/rawconn_windows_test.go b/libgo/go/net/rawconn_windows_test.go
new file mode 100644
index 00000000000..5fb6de75393
--- /dev/null
+++ b/libgo/go/net/rawconn_windows_test.go
@@ -0,0 +1,36 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "syscall"
+ "testing"
+)
+
+func TestRawConn(t *testing.T) {
+ c, err := newLocalPacketListener("udp")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ cc, err := c.(*UDPConn).SyscallConn()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var operr error
+ fn := func(s uintptr) {
+ operr = syscall.SetsockoptInt(syscall.Handle(s), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)
+ }
+ err = cc.Control(fn)
+ if err != nil || operr != nil {
+ t.Fatal(err, operr)
+ }
+ c.Close()
+ err = cc.Control(fn)
+ if err == nil {
+ t.Fatal("should fail")
+ }
+}
diff --git a/libgo/go/net/rpc/debug.go b/libgo/go/net/rpc/debug.go
index 98b2c1c6c4a..a1d799ff19a 100644
--- a/libgo/go/net/rpc/debug.go
+++ b/libgo/go/net/rpc/debug.go
@@ -71,20 +71,17 @@ type debugHTTP struct {
// Runs at /debug/rpc
func (server debugHTTP) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// Build a sorted version of the data.
- var services = make(serviceArray, len(server.serviceMap))
- i := 0
- server.mu.Lock()
- for sname, service := range server.serviceMap {
- services[i] = debugService{service, sname, make(methodArray, len(service.method))}
- j := 0
- for mname, method := range service.method {
- services[i].Method[j] = debugMethod{method, mname}
- j++
+ var services serviceArray
+ server.serviceMap.Range(func(snamei, svci interface{}) bool {
+ svc := svci.(*service)
+ ds := debugService{svc, snamei.(string), make(methodArray, 0, len(svc.method))}
+ for mname, method := range svc.method {
+ ds.Method = append(ds.Method, debugMethod{method, mname})
}
- sort.Sort(services[i].Method)
- i++
- }
- server.mu.Unlock()
+ sort.Sort(ds.Method)
+ services = append(services, ds)
+ return true
+ })
sort.Sort(services)
err := debug.Execute(w, services)
if err != nil {
diff --git a/libgo/go/net/rpc/jsonrpc/all_test.go b/libgo/go/net/rpc/jsonrpc/all_test.go
index b811d3c0c7c..bbb8eb02918 100644
--- a/libgo/go/net/rpc/jsonrpc/all_test.go
+++ b/libgo/go/net/rpc/jsonrpc/all_test.go
@@ -13,6 +13,7 @@ import (
"io/ioutil"
"net"
"net/rpc"
+ "reflect"
"strings"
"testing"
)
@@ -55,8 +56,26 @@ func (t *Arith) Error(args *Args, reply *Reply) error {
panic("ERROR")
}
+type BuiltinTypes struct{}
+
+func (BuiltinTypes) Map(i int, reply *map[int]int) error {
+ (*reply)[i] = i
+ return nil
+}
+
+func (BuiltinTypes) Slice(i int, reply *[]int) error {
+ *reply = append(*reply, i)
+ return nil
+}
+
+func (BuiltinTypes) Array(i int, reply *[1]int) error {
+ (*reply)[0] = i
+ return nil
+}
+
func init() {
rpc.Register(new(Arith))
+ rpc.Register(BuiltinTypes{})
}
func TestServerNoParams(t *testing.T) {
@@ -182,6 +201,45 @@ func TestClient(t *testing.T) {
}
}
+func TestBuiltinTypes(t *testing.T) {
+ cli, srv := net.Pipe()
+ go ServeConn(srv)
+
+ client := NewClient(cli)
+ defer client.Close()
+
+ // Map
+ arg := 7
+ replyMap := map[int]int{}
+ err := client.Call("BuiltinTypes.Map", arg, &replyMap)
+ if err != nil {
+ t.Errorf("Map: expected no error but got string %q", err.Error())
+ }
+ if replyMap[arg] != arg {
+ t.Errorf("Map: expected %d got %d", arg, replyMap[arg])
+ }
+
+ // Slice
+ replySlice := []int{}
+ err = client.Call("BuiltinTypes.Slice", arg, &replySlice)
+ if err != nil {
+ t.Errorf("Slice: expected no error but got string %q", err.Error())
+ }
+ if e := []int{arg}; !reflect.DeepEqual(replySlice, e) {
+ t.Errorf("Slice: expected %v got %v", e, replySlice)
+ }
+
+ // Array
+ replyArray := [1]int{}
+ err = client.Call("BuiltinTypes.Array", arg, &replyArray)
+ if err != nil {
+ t.Errorf("Array: expected no error but got string %q", err.Error())
+ }
+ if e := [1]int{arg}; !reflect.DeepEqual(replyArray, e) {
+ t.Errorf("Array: expected %v got %v", e, replyArray)
+ }
+}
+
func TestMalformedInput(t *testing.T) {
cli, srv := net.Pipe()
go cli.Write([]byte(`{id:1}`)) // invalid json
diff --git a/libgo/go/net/rpc/jsonrpc/client.go b/libgo/go/net/rpc/jsonrpc/client.go
index da1b8165fc7..e6359bed598 100644
--- a/libgo/go/net/rpc/jsonrpc/client.go
+++ b/libgo/go/net/rpc/jsonrpc/client.go
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package jsonrpc implements a JSON-RPC ClientCodec and ServerCodec
+// Package jsonrpc implements a JSON-RPC 1.0 ClientCodec and ServerCodec
// for the rpc package.
+// For JSON-RPC 2.0 support, see https://godoc.org/?q=json-rpc+2.0
package jsonrpc
import (
diff --git a/libgo/go/net/rpc/server.go b/libgo/go/net/rpc/server.go
index 18ea629b0d6..29aae7ee7ff 100644
--- a/libgo/go/net/rpc/server.go
+++ b/libgo/go/net/rpc/server.go
@@ -187,8 +187,7 @@ type Response struct {
// Server represents an RPC Server.
type Server struct {
- mu sync.RWMutex // protects the serviceMap
- serviceMap map[string]*service
+ serviceMap sync.Map // map[string]*service
reqLock sync.Mutex // protects freeReq
freeReq *Request
respLock sync.Mutex // protects freeResp
@@ -197,7 +196,7 @@ type Server struct {
// NewServer returns a new Server.
func NewServer() *Server {
- return &Server{serviceMap: make(map[string]*service)}
+ return &Server{}
}
// DefaultServer is the default instance of *Server.
@@ -240,11 +239,6 @@ func (server *Server) RegisterName(name string, rcvr interface{}) error {
}
func (server *Server) register(rcvr interface{}, name string, useName bool) error {
- server.mu.Lock()
- defer server.mu.Unlock()
- if server.serviceMap == nil {
- server.serviceMap = make(map[string]*service)
- }
s := new(service)
s.typ = reflect.TypeOf(rcvr)
s.rcvr = reflect.ValueOf(rcvr)
@@ -262,9 +256,6 @@ func (server *Server) register(rcvr interface{}, name string, useName bool) erro
log.Print(s)
return errors.New(s)
}
- if _, present := server.serviceMap[sname]; present {
- return errors.New("rpc: service already defined: " + sname)
- }
s.name = sname
// Install the methods
@@ -283,7 +274,10 @@ func (server *Server) register(rcvr interface{}, name string, useName bool) erro
log.Print(str)
return errors.New(str)
}
- server.serviceMap[s.name] = s
+
+ if _, dup := server.serviceMap.LoadOrStore(sname, s); dup {
+ return errors.New("rpc: service already defined: " + sname)
+ }
return nil
}
@@ -571,10 +565,17 @@ func (server *Server) readRequest(codec ServerCodec) (service *service, mtype *m
}
replyv = reflect.New(mtype.ReplyType.Elem())
+
+ switch mtype.ReplyType.Elem().Kind() {
+ case reflect.Map:
+ replyv.Elem().Set(reflect.MakeMap(mtype.ReplyType.Elem()))
+ case reflect.Slice:
+ replyv.Elem().Set(reflect.MakeSlice(mtype.ReplyType.Elem(), 0, 0))
+ }
return
}
-func (server *Server) readRequestHeader(codec ServerCodec) (service *service, mtype *methodType, req *Request, keepReading bool, err error) {
+func (server *Server) readRequestHeader(codec ServerCodec) (svc *service, mtype *methodType, req *Request, keepReading bool, err error) {
// Grab the request header.
req = server.getRequest()
err = codec.ReadRequestHeader(req)
@@ -600,14 +601,13 @@ func (server *Server) readRequestHeader(codec ServerCodec) (service *service, mt
methodName := req.ServiceMethod[dot+1:]
// Look up the request.
- server.mu.RLock()
- service = server.serviceMap[serviceName]
- server.mu.RUnlock()
- if service == nil {
+ svci, ok := server.serviceMap.Load(serviceName)
+ if !ok {
err = errors.New("rpc: can't find service " + req.ServiceMethod)
return
}
- mtype = service.method[methodName]
+ svc = svci.(*service)
+ mtype = svc.method[methodName]
if mtype == nil {
err = errors.New("rpc: can't find method " + req.ServiceMethod)
}
diff --git a/libgo/go/net/rpc/server_test.go b/libgo/go/net/rpc/server_test.go
index 8369c9dec7b..fb97f82a2f7 100644
--- a/libgo/go/net/rpc/server_test.go
+++ b/libgo/go/net/rpc/server_test.go
@@ -11,6 +11,7 @@ import (
"log"
"net"
"net/http/httptest"
+ "reflect"
"runtime"
"strings"
"sync"
@@ -85,6 +86,24 @@ type Embed struct {
hidden
}
+type BuiltinTypes struct{}
+
+func (BuiltinTypes) Map(args *Args, reply *map[int]int) error {
+ (*reply)[args.A] = args.B
+ return nil
+}
+
+func (BuiltinTypes) Slice(args *Args, reply *[]int) error {
+ *reply = append(*reply, args.A, args.B)
+ return nil
+}
+
+func (BuiltinTypes) Array(args *Args, reply *[2]int) error {
+ (*reply)[0] = args.A
+ (*reply)[1] = args.B
+ return nil
+}
+
func listenTCP() (net.Listener, string) {
l, e := net.Listen("tcp", "127.0.0.1:0") // any available address
if e != nil {
@@ -97,6 +116,7 @@ func startServer() {
Register(new(Arith))
Register(new(Embed))
RegisterName("net.rpc.Arith", new(Arith))
+ Register(BuiltinTypes{})
var l net.Listener
l, serverAddr = listenTCP()
@@ -326,6 +346,49 @@ func testHTTPRPC(t *testing.T, path string) {
}
}
+func TestBuiltinTypes(t *testing.T) {
+ once.Do(startServer)
+
+ client, err := DialHTTP("tcp", httpServerAddr)
+ if err != nil {
+ t.Fatal("dialing", err)
+ }
+ defer client.Close()
+
+ // Map
+ args := &Args{7, 8}
+ replyMap := map[int]int{}
+ err = client.Call("BuiltinTypes.Map", args, &replyMap)
+ if err != nil {
+ t.Errorf("Map: expected no error but got string %q", err.Error())
+ }
+ if replyMap[args.A] != args.B {
+ t.Errorf("Map: expected %d got %d", args.B, replyMap[args.A])
+ }
+
+ // Slice
+ args = &Args{7, 8}
+ replySlice := []int{}
+ err = client.Call("BuiltinTypes.Slice", args, &replySlice)
+ if err != nil {
+ t.Errorf("Slice: expected no error but got string %q", err.Error())
+ }
+ if e := []int{args.A, args.B}; !reflect.DeepEqual(replySlice, e) {
+ t.Errorf("Slice: expected %v got %v", e, replySlice)
+ }
+
+ // Array
+ args = &Args{7, 8}
+ replyArray := [2]int{}
+ err = client.Call("BuiltinTypes.Array", args, &replyArray)
+ if err != nil {
+ t.Errorf("Array: expected no error but got string %q", err.Error())
+ }
+ if e := [2]int{args.A, args.B}; !reflect.DeepEqual(replyArray, e) {
+ t.Errorf("Array: expected %v got %v", e, replyArray)
+ }
+}
+
// CodecEmulator provides a client-like api and a ServerCodec interface.
// Can be used to test ServeRequest.
type CodecEmulator struct {
@@ -619,13 +682,13 @@ func TestErrorAfterClientClose(t *testing.T) {
// Tests the fix to issue 11221. Without the fix, this loops forever or crashes.
func TestAcceptExitAfterListenerClose(t *testing.T) {
- newServer = NewServer()
+ newServer := NewServer()
newServer.Register(new(Arith))
newServer.RegisterName("net.rpc.Arith", new(Arith))
newServer.RegisterName("newServer.Arith", new(Arith))
var l net.Listener
- l, newServerAddr = listenTCP()
+ l, _ = listenTCP()
l.Close()
newServer.Accept(l)
}
diff --git a/libgo/go/net/sendfile_bsd.go b/libgo/go/net/sendfile_bsd.go
new file mode 100644
index 00000000000..7a2b48c6cfa
--- /dev/null
+++ b/libgo/go/net/sendfile_bsd.go
@@ -0,0 +1,67 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly freebsd
+
+package net
+
+import (
+ "internal/poll"
+ "io"
+ "os"
+)
+
+// sendFile copies the contents of r to c using the sendfile
+// system call to minimize copies.
+//
+// if handled == true, sendFile returns the number of bytes copied and any
+// non-EOF error.
+//
+// if handled == false, sendFile performed no work.
+func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) {
+ // FreeBSD and DragonFly use 0 as the "until EOF" value.
+ // If you pass in more bytes than the file contains, it will
+ // loop back to the beginning ad nauseam until it's sent
+ // exactly the number of bytes told to. As such, we need to
+ // know exactly how many bytes to send.
+ var remain int64 = 0
+
+ lr, ok := r.(*io.LimitedReader)
+ if ok {
+ remain, r = lr.N, lr.R
+ if remain <= 0 {
+ return 0, nil, true
+ }
+ }
+ f, ok := r.(*os.File)
+ if !ok {
+ return 0, nil, false
+ }
+
+ if remain == 0 {
+ fi, err := f.Stat()
+ if err != nil {
+ return 0, err, false
+ }
+
+ remain = fi.Size()
+ }
+
+ // The other quirk with FreeBSD/DragonFly's sendfile
+ // implementation is that it doesn't use the current position
+ // of the file -- if you pass it offset 0, it starts from
+ // offset 0. There's no way to tell it "start from current
+ // position", so we have to manage that explicitly.
+ pos, err := f.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return 0, err, false
+ }
+
+ written, err = poll.SendFile(&c.pfd, int(f.Fd()), pos, remain)
+
+ if lr != nil {
+ lr.N = remain - written
+ }
+ return written, wrapSyscallError("sendfile", err), written > 0
+}
diff --git a/libgo/go/net/sendfile_dragonfly.go b/libgo/go/net/sendfile_dragonfly.go
deleted file mode 100644
index d4b825c3705..00000000000
--- a/libgo/go/net/sendfile_dragonfly.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-import (
- "io"
- "os"
- "syscall"
-)
-
-// maxSendfileSize is the largest chunk size we ask the kernel to copy
-// at a time.
-const maxSendfileSize int = 4 << 20
-
-// sendFile copies the contents of r to c using the sendfile
-// system call to minimize copies.
-//
-// if handled == true, sendFile returns the number of bytes copied and any
-// non-EOF error.
-//
-// if handled == false, sendFile performed no work.
-func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) {
- // DragonFly uses 0 as the "until EOF" value. If you pass in more bytes than the
- // file contains, it will loop back to the beginning ad nauseam until it's sent
- // exactly the number of bytes told to. As such, we need to know exactly how many
- // bytes to send.
- var remain int64 = 0
-
- lr, ok := r.(*io.LimitedReader)
- if ok {
- remain, r = lr.N, lr.R
- if remain <= 0 {
- return 0, nil, true
- }
- }
- f, ok := r.(*os.File)
- if !ok {
- return 0, nil, false
- }
-
- if remain == 0 {
- fi, err := f.Stat()
- if err != nil {
- return 0, err, false
- }
-
- remain = fi.Size()
- }
-
- // The other quirk with DragonFly's sendfile implementation is that it doesn't
- // use the current position of the file -- if you pass it offset 0, it starts
- // from offset 0. There's no way to tell it "start from current position", so
- // we have to manage that explicitly.
- pos, err := f.Seek(0, io.SeekCurrent)
- if err != nil {
- return 0, err, false
- }
-
- if err := c.writeLock(); err != nil {
- return 0, err, true
- }
- defer c.writeUnlock()
-
- dst := c.sysfd
- src := int(f.Fd())
- for remain > 0 {
- n := maxSendfileSize
- if int64(n) > remain {
- n = int(remain)
- }
- pos1 := pos
- n, err1 := syscall.Sendfile(dst, src, &pos1, n)
- if n > 0 {
- pos += int64(n)
- written += int64(n)
- remain -= int64(n)
- }
- if n == 0 && err1 == nil {
- break
- }
- if err1 == syscall.EAGAIN {
- if err1 = c.pd.waitWrite(); err1 == nil {
- continue
- }
- }
- if err1 == syscall.EINTR {
- continue
- }
- if err1 != nil {
- // This includes syscall.ENOSYS (no kernel
- // support) and syscall.EINVAL (fd types which
- // don't implement sendfile)
- err = err1
- break
- }
- }
- if lr != nil {
- lr.N = remain
- }
- if err != nil {
- err = os.NewSyscallError("sendfile", err)
- }
- return written, err, written > 0
-}
diff --git a/libgo/go/net/sendfile_freebsd.go b/libgo/go/net/sendfile_freebsd.go
deleted file mode 100644
index 18cbb27b533..00000000000
--- a/libgo/go/net/sendfile_freebsd.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package net
-
-import (
- "io"
- "os"
- "syscall"
-)
-
-// maxSendfileSize is the largest chunk size we ask the kernel to copy
-// at a time.
-const maxSendfileSize int = 4 << 20
-
-// sendFile copies the contents of r to c using the sendfile
-// system call to minimize copies.
-//
-// if handled == true, sendFile returns the number of bytes copied and any
-// non-EOF error.
-//
-// if handled == false, sendFile performed no work.
-func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) {
- // FreeBSD uses 0 as the "until EOF" value. If you pass in more bytes than the
- // file contains, it will loop back to the beginning ad nauseam until it's sent
- // exactly the number of bytes told to. As such, we need to know exactly how many
- // bytes to send.
- var remain int64 = 0
-
- lr, ok := r.(*io.LimitedReader)
- if ok {
- remain, r = lr.N, lr.R
- if remain <= 0 {
- return 0, nil, true
- }
- }
- f, ok := r.(*os.File)
- if !ok {
- return 0, nil, false
- }
-
- if remain == 0 {
- fi, err := f.Stat()
- if err != nil {
- return 0, err, false
- }
-
- remain = fi.Size()
- }
-
- // The other quirk with FreeBSD's sendfile implementation is that it doesn't
- // use the current position of the file -- if you pass it offset 0, it starts
- // from offset 0. There's no way to tell it "start from current position", so
- // we have to manage that explicitly.
- pos, err := f.Seek(0, io.SeekCurrent)
- if err != nil {
- return 0, err, false
- }
-
- if err := c.writeLock(); err != nil {
- return 0, err, true
- }
- defer c.writeUnlock()
-
- dst := c.sysfd
- src := int(f.Fd())
- for remain > 0 {
- n := maxSendfileSize
- if int64(n) > remain {
- n = int(remain)
- }
- pos1 := pos
- n, err1 := syscall.Sendfile(dst, src, &pos1, n)
- if n > 0 {
- pos += int64(n)
- written += int64(n)
- remain -= int64(n)
- }
- if n == 0 && err1 == nil {
- break
- }
- if err1 == syscall.EAGAIN {
- if err1 = c.pd.waitWrite(); err1 == nil {
- continue
- }
- }
- if err1 == syscall.EINTR {
- continue
- }
- if err1 != nil {
- // This includes syscall.ENOSYS (no kernel
- // support) and syscall.EINVAL (fd types which
- // don't implement sendfile)
- err = err1
- break
- }
- }
- if lr != nil {
- lr.N = remain
- }
- if err != nil {
- err = os.NewSyscallError("sendfile", err)
- }
- return written, err, written > 0
-}
diff --git a/libgo/go/net/sendfile_linux.go b/libgo/go/net/sendfile_linux.go
index 7e741f97941..c537ea68b2b 100644
--- a/libgo/go/net/sendfile_linux.go
+++ b/libgo/go/net/sendfile_linux.go
@@ -5,15 +5,11 @@
package net
import (
+ "internal/poll"
"io"
"os"
- "syscall"
)
-// maxSendfileSize is the largest chunk size we ask the kernel to copy
-// at a time.
-const maxSendfileSize int = 4 << 20
-
// sendFile copies the contents of r to c using the sendfile
// system call to minimize copies.
//
@@ -36,44 +32,10 @@ func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) {
return 0, nil, false
}
- if err := c.writeLock(); err != nil {
- return 0, err, true
- }
- defer c.writeUnlock()
+ written, err = poll.SendFile(&c.pfd, int(f.Fd()), remain)
- dst := c.sysfd
- src := int(f.Fd())
- for remain > 0 {
- n := maxSendfileSize
- if int64(n) > remain {
- n = int(remain)
- }
- n, err1 := syscall.Sendfile(dst, src, nil, n)
- if n > 0 {
- written += int64(n)
- remain -= int64(n)
- }
- if n == 0 && err1 == nil {
- break
- }
- if err1 == syscall.EAGAIN {
- if err1 = c.pd.waitWrite(); err1 == nil {
- continue
- }
- }
- if err1 != nil {
- // This includes syscall.ENOSYS (no kernel
- // support) and syscall.EINVAL (fd types which
- // don't implement sendfile)
- err = err1
- break
- }
- }
if lr != nil {
- lr.N = remain
- }
- if err != nil {
- err = os.NewSyscallError("sendfile", err)
+ lr.N = remain - written
}
- return written, err, written > 0
+ return written, wrapSyscallError("sendfile", err), written > 0
}
diff --git a/libgo/go/net/sendfile_solaris.go b/libgo/go/net/sendfile_solaris.go
index add70c3147e..63ca9d47b8a 100644
--- a/libgo/go/net/sendfile_solaris.go
+++ b/libgo/go/net/sendfile_solaris.go
@@ -5,19 +5,11 @@
package net
import (
+ "internal/poll"
"io"
"os"
- "syscall"
)
-// Not strictly needed, but very helpful for debugging, see issue #10221.
-//go:cgo_import_dynamic _ _ "libsendfile.so"
-//go:cgo_import_dynamic _ _ "libsocket.so"
-
-// maxSendfileSize is the largest chunk size we ask the kernel to copy
-// at a time.
-const maxSendfileSize int = 4 << 20
-
// sendFile copies the contents of r to c using the sendfile
// system call to minimize copies.
//
@@ -62,56 +54,10 @@ func sendFile(c *netFD, r io.Reader) (written int64, err error, handled bool) {
return 0, err, false
}
- if err := c.writeLock(); err != nil {
- return 0, err, true
- }
- defer c.writeUnlock()
+ written, err = poll.SendFile(&c.pfd, int(f.Fd()), pos, remain)
- dst := c.sysfd
- src := int(f.Fd())
- for remain > 0 {
- n := maxSendfileSize
- if int64(n) > remain {
- n = int(remain)
- }
- pos1 := pos
- n, err1 := syscall.Sendfile(dst, src, &pos1, n)
- if err1 == syscall.EAGAIN || err1 == syscall.EINTR {
- // partial write may have occurred
- if n = int(pos1 - pos); n == 0 {
- // nothing more to write
- err1 = nil
- }
- }
- if n > 0 {
- pos += int64(n)
- written += int64(n)
- remain -= int64(n)
- }
- if n == 0 && err1 == nil {
- break
- }
- if err1 == syscall.EAGAIN {
- if err1 = c.pd.waitWrite(); err1 == nil {
- continue
- }
- }
- if err1 == syscall.EINTR {
- continue
- }
- if err1 != nil {
- // This includes syscall.ENOSYS (no kernel
- // support) and syscall.EINVAL (fd types which
- // don't implement sendfile)
- err = err1
- break
- }
- }
if lr != nil {
- lr.N = remain
- }
- if err != nil {
- err = os.NewSyscallError("sendfile", err)
+ lr.N = remain - written
}
- return written, err, written > 0
+ return written, wrapSyscallError("sendfile", err), written > 0
}
diff --git a/libgo/go/net/sendfile_windows.go b/libgo/go/net/sendfile_windows.go
index bc0b7fb5b25..bccd8b149f7 100644
--- a/libgo/go/net/sendfile_windows.go
+++ b/libgo/go/net/sendfile_windows.go
@@ -5,6 +5,7 @@
package net
import (
+ "internal/poll"
"io"
"os"
"syscall"
@@ -34,19 +35,10 @@ func sendFile(fd *netFD, r io.Reader) (written int64, err error, handled bool) {
return 0, nil, false
}
- if err := fd.writeLock(); err != nil {
- return 0, err, true
- }
- defer fd.writeUnlock()
+ done, err := poll.SendFile(&fd.pfd, syscall.Handle(f.Fd()), n)
- o := &fd.wop
- o.qty = uint32(n)
- o.handle = syscall.Handle(f.Fd())
- done, err := wsrv.ExecIO(o, "TransmitFile", func(o *operation) error {
- return syscall.TransmitFile(o.fd.sysfd, o.handle, o.qty, 0, &o.o, nil, syscall.TF_WRITE_BEHIND)
- })
if err != nil {
- return 0, os.NewSyscallError("transmitfile", err), false
+ return 0, wrapSyscallError("transmitfile", err), false
}
if lr != nil {
lr.N -= int64(done)
diff --git a/libgo/go/net/smtp/smtp.go b/libgo/go/net/smtp/smtp.go
index a408fa53363..28472e447b5 100644
--- a/libgo/go/net/smtp/smtp.go
+++ b/libgo/go/net/smtp/smtp.go
@@ -298,7 +298,7 @@ var testHookStartTLS func(*tls.Config) // nil, except for tests
// messages is accomplished by including an email address in the to
// parameter but not including it in the msg headers.
//
-// The SendMail function and the the net/smtp package are low-level
+// The SendMail function and the net/smtp package are low-level
// mechanisms and provide no support for DKIM signing, MIME
// attachments (see the mime/multipart package), or other mail
// functionality. Higher-level packages exist outside of the standard
diff --git a/libgo/go/net/smtp/smtp_test.go b/libgo/go/net/smtp/smtp_test.go
index c48fae6d5ac..9dbe3eb9ecb 100644
--- a/libgo/go/net/smtp/smtp_test.go
+++ b/libgo/go/net/smtp/smtp_test.go
@@ -9,9 +9,11 @@ import (
"bytes"
"crypto/tls"
"crypto/x509"
+ "internal/testenv"
"io"
"net"
"net/textproto"
+ "runtime"
"strings"
"testing"
"time"
@@ -592,6 +594,9 @@ QUIT
`
func TestTLSClient(t *testing.T) {
+ if runtime.GOOS == "freebsd" && runtime.GOARCH == "amd64" {
+ testenv.SkipFlaky(t, 19229)
+ }
ln := newLocalListener(t)
defer ln.Close()
errc := make(chan error)
diff --git a/libgo/go/net/sock_cloexec.go b/libgo/go/net/sock_cloexec.go
index 616a101eacb..06ff10d834a 100644
--- a/libgo/go/net/sock_cloexec.go
+++ b/libgo/go/net/sock_cloexec.go
@@ -5,11 +5,12 @@
// This file implements sysSocket and accept for platforms that
// provide a fast path for setting SetNonblock and CloseOnExec.
-// +build freebsd linux
+// +build dragonfly freebsd linux
package net
import (
+ "internal/poll"
"os"
"syscall"
)
@@ -42,46 +43,8 @@ func sysSocket(family, sotype, proto int) (int, error) {
return -1, os.NewSyscallError("socket", err)
}
if err = syscall.SetNonblock(s, true); err != nil {
- closeFunc(s)
+ poll.CloseFunc(s)
return -1, os.NewSyscallError("setnonblock", err)
}
return s, nil
}
-
-// Wrapper around the accept system call that marks the returned file
-// descriptor as nonblocking and close-on-exec.
-func accept(s int) (int, syscall.Sockaddr, error) {
- ns, sa, err := accept4Func(s, syscall.SOCK_NONBLOCK|syscall.SOCK_CLOEXEC)
- // On Linux the accept4 system call was introduced in 2.6.28
- // kernel and on FreeBSD it was introduced in 10 kernel. If we
- // get an ENOSYS error on both Linux and FreeBSD, or EINVAL
- // error on Linux, fall back to using accept.
- switch err {
- case nil:
- return ns, sa, nil
- default: // errors other than the ones listed
- return -1, sa, os.NewSyscallError("accept4", err)
- case syscall.ENOSYS: // syscall missing
- case syscall.EINVAL: // some Linux use this instead of ENOSYS
- case syscall.EACCES: // some Linux use this instead of ENOSYS
- case syscall.EFAULT: // some Linux use this instead of ENOSYS
- }
-
- // See ../syscall/exec_unix.go for description of ForkLock.
- // It is probably okay to hold the lock across syscall.Accept
- // because we have put fd.sysfd into non-blocking mode.
- // However, a call to the File method will put it back into
- // blocking mode. We can't take that risk, so no use of ForkLock here.
- ns, sa, err = acceptFunc(s)
- if err == nil {
- syscall.CloseOnExec(ns)
- }
- if err != nil {
- return -1, nil, os.NewSyscallError("accept", err)
- }
- if err = syscall.SetNonblock(ns, true); err != nil {
- closeFunc(ns)
- return -1, nil, os.NewSyscallError("setnonblock", err)
- }
- return ns, sa, nil
-}
diff --git a/libgo/go/net/sock_posix.go b/libgo/go/net/sock_posix.go
index 6bbfd1208ee..a30efe2336a 100644
--- a/libgo/go/net/sock_posix.go
+++ b/libgo/go/net/sock_posix.go
@@ -8,6 +8,7 @@ package net
import (
"context"
+ "internal/poll"
"os"
"syscall"
)
@@ -43,11 +44,11 @@ func socket(ctx context.Context, net string, family, sotype, proto int, ipv6only
return nil, err
}
if err = setDefaultSockopts(s, family, sotype, ipv6only); err != nil {
- closeFunc(s)
+ poll.CloseFunc(s)
return nil, err
}
if fd, err = newFD(s, family, sotype, net); err != nil {
- closeFunc(s)
+ poll.CloseFunc(s)
return nil, err
}
@@ -127,17 +128,18 @@ func (fd *netFD) dial(ctx context.Context, laddr, raddr sockaddr) error {
if lsa, err = laddr.sockaddr(fd.family); err != nil {
return err
} else if lsa != nil {
- if err := syscall.Bind(fd.sysfd, lsa); err != nil {
+ if err := syscall.Bind(fd.pfd.Sysfd, lsa); err != nil {
return os.NewSyscallError("bind", err)
}
}
}
- var rsa syscall.Sockaddr
+ var rsa syscall.Sockaddr // remote address from the user
+ var crsa syscall.Sockaddr // remote address we actually connected to
if raddr != nil {
if rsa, err = raddr.sockaddr(fd.family); err != nil {
return err
}
- if err := fd.connect(ctx, lsa, rsa); err != nil {
+ if crsa, err = fd.connect(ctx, lsa, rsa); err != nil {
return err
}
fd.isConnected = true
@@ -146,8 +148,16 @@ func (fd *netFD) dial(ctx context.Context, laddr, raddr sockaddr) error {
return err
}
}
- lsa, _ = syscall.Getsockname(fd.sysfd)
- if rsa, _ = syscall.Getpeername(fd.sysfd); rsa != nil {
+ // Record the local and remote addresses from the actual socket.
+ // Get the local address by calling Getsockname.
+ // For the remote address, use
+ // 1) the one returned by the connect method, if any; or
+ // 2) the one from Getpeername, if it succeeds; or
+ // 3) the one passed to us as the raddr parameter.
+ lsa, _ = syscall.Getsockname(fd.pfd.Sysfd)
+ if crsa != nil {
+ fd.setAddr(fd.addrFunc()(lsa), fd.addrFunc()(crsa))
+ } else if rsa, _ = syscall.Getpeername(fd.pfd.Sysfd); rsa != nil {
fd.setAddr(fd.addrFunc()(lsa), fd.addrFunc()(rsa))
} else {
fd.setAddr(fd.addrFunc()(lsa), raddr)
@@ -156,23 +166,23 @@ func (fd *netFD) dial(ctx context.Context, laddr, raddr sockaddr) error {
}
func (fd *netFD) listenStream(laddr sockaddr, backlog int) error {
- if err := setDefaultListenerSockopts(fd.sysfd); err != nil {
+ if err := setDefaultListenerSockopts(fd.pfd.Sysfd); err != nil {
return err
}
if lsa, err := laddr.sockaddr(fd.family); err != nil {
return err
} else if lsa != nil {
- if err := syscall.Bind(fd.sysfd, lsa); err != nil {
+ if err := syscall.Bind(fd.pfd.Sysfd, lsa); err != nil {
return os.NewSyscallError("bind", err)
}
}
- if err := listenFunc(fd.sysfd, backlog); err != nil {
+ if err := listenFunc(fd.pfd.Sysfd, backlog); err != nil {
return os.NewSyscallError("listen", err)
}
if err := fd.init(); err != nil {
return err
}
- lsa, _ := syscall.Getsockname(fd.sysfd)
+ lsa, _ := syscall.Getsockname(fd.pfd.Sysfd)
fd.setAddr(fd.addrFunc()(lsa), nil)
return nil
}
@@ -188,7 +198,7 @@ func (fd *netFD) listenDatagram(laddr sockaddr) error {
// multiple UDP listeners that listen on the same UDP
// port to join the same group address.
if addr.IP != nil && addr.IP.IsMulticast() {
- if err := setDefaultMulticastSockopts(fd.sysfd); err != nil {
+ if err := setDefaultMulticastSockopts(fd.pfd.Sysfd); err != nil {
return err
}
addr := *addr
@@ -204,14 +214,14 @@ func (fd *netFD) listenDatagram(laddr sockaddr) error {
if lsa, err := laddr.sockaddr(fd.family); err != nil {
return err
} else if lsa != nil {
- if err := syscall.Bind(fd.sysfd, lsa); err != nil {
+ if err := syscall.Bind(fd.pfd.Sysfd, lsa); err != nil {
return os.NewSyscallError("bind", err)
}
}
if err := fd.init(); err != nil {
return err
}
- lsa, _ := syscall.Getsockname(fd.sysfd)
+ lsa, _ := syscall.Getsockname(fd.pfd.Sysfd)
fd.setAddr(fd.addrFunc()(lsa), nil)
return nil
}
diff --git a/libgo/go/net/sockopt_bsd.go b/libgo/go/net/sockopt_bsd.go
index 734a1093d68..1aae88a3e8d 100644
--- a/libgo/go/net/sockopt_bsd.go
+++ b/libgo/go/net/sockopt_bsd.go
@@ -25,7 +25,7 @@ func setDefaultSockopts(s, family, sotype int, ipv6only bool) error {
syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_PORTRANGE, syscall.IPV6_PORTRANGE_HIGH)
}
}
- if supportsIPv4map && family == syscall.AF_INET6 && sotype != syscall.SOCK_RAW {
+ if supportsIPv4map() && family == syscall.AF_INET6 && sotype != syscall.SOCK_RAW {
// Allow both IP versions even if the OS default
// is otherwise. Note that some operating systems
// never admit this option.
diff --git a/libgo/go/net/sockopt_posix.go b/libgo/go/net/sockopt_posix.go
index cacd04889d9..29edddbf374 100644
--- a/libgo/go/net/sockopt_posix.go
+++ b/libgo/go/net/sockopt_posix.go
@@ -7,7 +7,7 @@
package net
import (
- "os"
+ "runtime"
"syscall"
)
@@ -101,27 +101,21 @@ done:
}
func setReadBuffer(fd *netFD, bytes int) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_RCVBUF, bytes))
+ err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_RCVBUF, bytes)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
func setWriteBuffer(fd *netFD, bytes int) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_SNDBUF, bytes))
+ err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_SNDBUF, bytes)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
func setKeepAlive(fd *netFD, keepalive bool) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_KEEPALIVE, boolint(keepalive)))
+ err := fd.pfd.SetsockoptInt(syscall.SOL_SOCKET, syscall.SO_KEEPALIVE, boolint(keepalive))
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
func setLinger(fd *netFD, sec int) error {
@@ -133,9 +127,7 @@ func setLinger(fd *netFD, sec int) error {
l.Onoff = 0
l.Linger = 0
}
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptLinger(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_LINGER, &l))
+ err := fd.pfd.SetsockoptLinger(syscall.SOL_SOCKET, syscall.SO_LINGER, &l)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
diff --git a/libgo/go/net/sockoptip_bsd.go b/libgo/go/net/sockoptip_bsd.go
index b15c6396ba1..b11f3a4edbe 100644
--- a/libgo/go/net/sockoptip_bsd.go
+++ b/libgo/go/net/sockoptip_bsd.go
@@ -7,28 +7,24 @@
package net
import (
- "os"
+ "runtime"
"syscall"
)
func setIPv4MulticastInterface(fd *netFD, ifi *Interface) error {
ip, err := interfaceToIPv4Addr(ifi)
if err != nil {
- return os.NewSyscallError("setsockopt", err)
+ return wrapSyscallError("setsockopt", err)
}
var a [4]byte
copy(a[:], ip.To4())
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInet4Addr(fd.sysfd, syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, a))
+ err = fd.pfd.SetsockoptInet4Addr(syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, a)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
func setIPv4MulticastLoopback(fd *netFD, v bool) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptByte(fd.sysfd, syscall.IPPROTO_IP, syscall.IP_MULTICAST_LOOP, byte(boolint(v))))
+ err := fd.pfd.SetsockoptByte(syscall.IPPROTO_IP, syscall.IP_MULTICAST_LOOP, byte(boolint(v)))
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
diff --git a/libgo/go/net/sockoptip_linux.go b/libgo/go/net/sockoptip_linux.go
index c1dcc911c73..bd7d8344258 100644
--- a/libgo/go/net/sockoptip_linux.go
+++ b/libgo/go/net/sockoptip_linux.go
@@ -5,7 +5,7 @@
package net
import (
- "os"
+ "runtime"
"syscall"
)
@@ -15,17 +15,13 @@ func setIPv4MulticastInterface(fd *netFD, ifi *Interface) error {
v = int32(ifi.Index)
}
mreq := &syscall.IPMreqn{Ifindex: v}
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptIPMreqn(fd.sysfd, syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, mreq))
+ err := fd.pfd.SetsockoptIPMreqn(syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, mreq)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
func setIPv4MulticastLoopback(fd *netFD, v bool) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_IP, syscall.IP_MULTICAST_LOOP, boolint(v)))
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IP, syscall.IP_MULTICAST_LOOP, boolint(v))
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
diff --git a/libgo/go/net/sockoptip_posix.go b/libgo/go/net/sockoptip_posix.go
index 4afd4c8ea3f..92af7646ef9 100644
--- a/libgo/go/net/sockoptip_posix.go
+++ b/libgo/go/net/sockoptip_posix.go
@@ -7,7 +7,7 @@
package net
import (
- "os"
+ "runtime"
"syscall"
)
@@ -16,11 +16,9 @@ func joinIPv4Group(fd *netFD, ifi *Interface, ip IP) error {
if err := setIPv4MreqToInterface(mreq, ifi); err != nil {
return err
}
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptIPMreq(fd.sysfd, syscall.IPPROTO_IP, syscall.IP_ADD_MEMBERSHIP, mreq))
+ err := fd.pfd.SetsockoptIPMreq(syscall.IPPROTO_IP, syscall.IP_ADD_MEMBERSHIP, mreq)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
func setIPv6MulticastInterface(fd *netFD, ifi *Interface) error {
@@ -28,19 +26,15 @@ func setIPv6MulticastInterface(fd *netFD, ifi *Interface) error {
if ifi != nil {
v = ifi.Index
}
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_IPV6, syscall.IPV6_MULTICAST_IF, v))
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IPV6, syscall.IPV6_MULTICAST_IF, v)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
func setIPv6MulticastLoopback(fd *netFD, v bool) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_IPV6, syscall.IPV6_MULTICAST_LOOP, boolint(v)))
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IPV6, syscall.IPV6_MULTICAST_LOOP, boolint(v))
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
func joinIPv6Group(fd *netFD, ifi *Interface, ip IP) error {
@@ -49,9 +43,7 @@ func joinIPv6Group(fd *netFD, ifi *Interface, ip IP) error {
if ifi != nil {
mreq.Interface = uint32(ifi.Index)
}
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptIPv6Mreq(fd.sysfd, syscall.IPPROTO_IPV6, syscall.IPV6_JOIN_GROUP, mreq))
+ err := fd.pfd.SetsockoptIPv6Mreq(syscall.IPPROTO_IPV6, syscall.IPV6_JOIN_GROUP, mreq)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
diff --git a/libgo/go/net/sockoptip_windows.go b/libgo/go/net/sockoptip_windows.go
index 916debebc6f..62676039a3b 100644
--- a/libgo/go/net/sockoptip_windows.go
+++ b/libgo/go/net/sockoptip_windows.go
@@ -6,6 +6,7 @@ package net
import (
"os"
+ "runtime"
"syscall"
"unsafe"
)
@@ -17,17 +18,13 @@ func setIPv4MulticastInterface(fd *netFD, ifi *Interface) error {
}
var a [4]byte
copy(a[:], ip.To4())
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.Setsockopt(fd.sysfd, syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, (*byte)(unsafe.Pointer(&a[0])), 4))
+ err = fd.pfd.Setsockopt(syscall.IPPROTO_IP, syscall.IP_MULTICAST_IF, (*byte)(unsafe.Pointer(&a[0])), 4)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
func setIPv4MulticastLoopback(fd *netFD, v bool) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_IP, syscall.IP_MULTICAST_LOOP, boolint(v)))
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_IP, syscall.IP_MULTICAST_LOOP, boolint(v))
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
diff --git a/libgo/go/net/sys_cloexec.go b/libgo/go/net/sys_cloexec.go
index f2ea8425493..def05cb5a7d 100644
--- a/libgo/go/net/sys_cloexec.go
+++ b/libgo/go/net/sys_cloexec.go
@@ -5,11 +5,12 @@
// This file implements sysSocket and accept for platforms that do not
// provide a fast path for setting SetNonblock and CloseOnExec.
-// +build aix darwin dragonfly nacl netbsd openbsd solaris
+// +build aix darwin nacl netbsd openbsd solaris
package net
import (
+ "internal/poll"
"os"
"syscall"
)
@@ -28,30 +29,8 @@ func sysSocket(family, sotype, proto int) (int, error) {
return -1, os.NewSyscallError("socket", err)
}
if err = syscall.SetNonblock(s, true); err != nil {
- closeFunc(s)
+ poll.CloseFunc(s)
return -1, os.NewSyscallError("setnonblock", err)
}
return s, nil
}
-
-// Wrapper around the accept system call that marks the returned file
-// descriptor as nonblocking and close-on-exec.
-func accept(s int) (int, syscall.Sockaddr, error) {
- // See ../syscall/exec_unix.go for description of ForkLock.
- // It is probably okay to hold the lock across syscall.Accept
- // because we have put fd.sysfd into non-blocking mode.
- // However, a call to the File method will put it back into
- // blocking mode. We can't take that risk, so no use of ForkLock here.
- ns, sa, err := acceptFunc(s)
- if err == nil {
- syscall.CloseOnExec(ns)
- }
- if err != nil {
- return -1, nil, os.NewSyscallError("accept", err)
- }
- if err = syscall.SetNonblock(ns, true); err != nil {
- closeFunc(ns)
- return -1, nil, os.NewSyscallError("setnonblock", err)
- }
- return ns, sa, nil
-}
diff --git a/libgo/go/net/tcpsock.go b/libgo/go/net/tcpsock.go
index 69731ebc914..e957aa3005a 100644
--- a/libgo/go/net/tcpsock.go
+++ b/libgo/go/net/tcpsock.go
@@ -50,28 +50,34 @@ func (a *TCPAddr) opAddr() Addr {
return a
}
-// ResolveTCPAddr parses addr as a TCP address of the form "host:port"
-// or "[ipv6-host%zone]:port" and resolves a pair of domain name and
-// port name on the network net, which must be "tcp", "tcp4" or
-// "tcp6". A literal address or host name for IPv6 must be enclosed
-// in square brackets, as in "[::1]:80", "[ipv6-host]:http" or
-// "[ipv6-host%zone]:80".
+// ResolveTCPAddr returns an address of TCP end point.
//
-// Resolving a hostname is not recommended because this returns at most
-// one of its IP addresses.
-func ResolveTCPAddr(net, addr string) (*TCPAddr, error) {
- switch net {
+// The network must be a TCP network name.
+//
+// If the host in the address parameter is not a literal IP address or
+// the port is not a literal port number, ResolveTCPAddr resolves the
+// address to an address of TCP end point.
+// Otherwise, it parses the address as a pair of literal IP address
+// and port number.
+// The address parameter can use a host name, but this is not
+// recommended, because it will return at most one of the host name's
+// IP addresses.
+//
+// See func Dial for a description of the network and address
+// parameters.
+func ResolveTCPAddr(network, address string) (*TCPAddr, error) {
+ switch network {
case "tcp", "tcp4", "tcp6":
case "": // a hint wildcard for Go 1.0 undocumented behavior
- net = "tcp"
+ network = "tcp"
default:
- return nil, UnknownNetworkError(net)
+ return nil, UnknownNetworkError(network)
}
- addrs, err := DefaultResolver.internetAddrList(context.Background(), net, addr)
+ addrs, err := DefaultResolver.internetAddrList(context.Background(), network, address)
if err != nil {
return nil, err
}
- return addrs.first(isIPv4).(*TCPAddr), nil
+ return addrs.forResolve(network, address).(*TCPAddr), nil
}
// TCPConn is an implementation of the Conn interface for TCP network
@@ -80,6 +86,15 @@ type TCPConn struct {
conn
}
+// SyscallConn returns a raw network connection.
+// This implements the syscall.Conn interface.
+func (c *TCPConn) SyscallConn() (syscall.RawConn, error) {
+ if !c.ok() {
+ return nil, syscall.EINVAL
+ }
+ return newRawConn(c.fd)
+}
+
// ReadFrom implements the io.ReaderFrom ReadFrom method.
func (c *TCPConn) ReadFrom(r io.Reader) (int64, error) {
if !c.ok() {
@@ -181,21 +196,25 @@ func newTCPConn(fd *netFD) *TCPConn {
return c
}
-// DialTCP connects to the remote address raddr on the network net,
-// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is
-// used as the local address for the connection.
-func DialTCP(net string, laddr, raddr *TCPAddr) (*TCPConn, error) {
- switch net {
+// DialTCP acts like Dial for TCP networks.
+//
+// The network must be a TCP network name; see func Dial for details.
+//
+// If laddr is nil, a local address is automatically chosen.
+// If the IP field of raddr is nil or an unspecified IP address, the
+// local system is assumed.
+func DialTCP(network string, laddr, raddr *TCPAddr) (*TCPConn, error) {
+ switch network {
case "tcp", "tcp4", "tcp6":
default:
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(net)}
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(network)}
}
if raddr == nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
}
- c, err := dialTCP(context.Background(), net, laddr, raddr)
+ c, err := dialTCP(context.Background(), network, laddr, raddr)
if err != nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
}
return c, nil
}
@@ -255,7 +274,7 @@ func (l *TCPListener) SetDeadline(t time.Time) error {
if !l.ok() {
return syscall.EINVAL
}
- if err := l.fd.setDeadline(t); err != nil {
+ if err := l.fd.pfd.SetDeadline(t); err != nil {
return &OpError{Op: "set", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
}
return nil
@@ -279,22 +298,27 @@ func (l *TCPListener) File() (f *os.File, err error) {
return
}
-// ListenTCP announces on the TCP address laddr and returns a TCP
-// listener. Net must be "tcp", "tcp4", or "tcp6". If laddr has a
-// port of 0, ListenTCP will choose an available port. The caller can
-// use the Addr method of TCPListener to retrieve the chosen address.
-func ListenTCP(net string, laddr *TCPAddr) (*TCPListener, error) {
- switch net {
+// ListenTCP acts like Listen for TCP networks.
+//
+// The network must be a TCP network name; see func Dial for details.
+//
+// If the IP field of laddr is nil or an unspecified IP address,
+// ListenTCP listens on all available unicast and anycast IP addresses
+// of the local system.
+// If the Port field of laddr is 0, a port number is automatically
+// chosen.
+func ListenTCP(network string, laddr *TCPAddr) (*TCPListener, error) {
+ switch network {
case "tcp", "tcp4", "tcp6":
default:
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(net)}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(network)}
}
if laddr == nil {
laddr = &TCPAddr{}
}
- ln, err := listenTCP(context.Background(), net, laddr)
+ ln, err := listenTCP(context.Background(), network, laddr)
if err != nil {
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: err}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err}
}
return ln, nil
}
diff --git a/libgo/go/net/tcpsock_plan9.go b/libgo/go/net/tcpsock_plan9.go
index d2860607f8b..e37f0657c07 100644
--- a/libgo/go/net/tcpsock_plan9.go
+++ b/libgo/go/net/tcpsock_plan9.go
@@ -48,6 +48,9 @@ func (ln *TCPListener) accept() (*TCPConn, error) {
}
func (ln *TCPListener) close() error {
+ if err := ln.fd.pfd.Close(); err != nil {
+ return err
+ }
if _, err := ln.fd.ctl.WriteString("hangup"); err != nil {
ln.fd.ctl.Close()
return err
diff --git a/libgo/go/net/tcpsock_posix.go b/libgo/go/net/tcpsock_posix.go
index 7533c24d122..9ba199dfb02 100644
--- a/libgo/go/net/tcpsock_posix.go
+++ b/libgo/go/net/tcpsock_posix.go
@@ -18,7 +18,7 @@ func sockaddrToTCP(sa syscall.Sockaddr) Addr {
case *syscall.SockaddrInet4:
return &TCPAddr{IP: sa.Addr[0:], Port: sa.Port}
case *syscall.SockaddrInet6:
- return &TCPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneToString(int(sa.ZoneId))}
+ return &TCPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneCache.name(int(sa.ZoneId))}
}
return nil
}
diff --git a/libgo/go/net/tcpsock_test.go b/libgo/go/net/tcpsock_test.go
index 51154221d0a..660f4249d40 100644
--- a/libgo/go/net/tcpsock_test.go
+++ b/libgo/go/net/tcpsock_test.go
@@ -32,28 +32,28 @@ func BenchmarkTCP4PersistentTimeout(b *testing.B) {
}
func BenchmarkTCP6OneShot(b *testing.B) {
- if !supportsIPv6 {
+ if !supportsIPv6() {
b.Skip("ipv6 is not supported")
}
benchmarkTCP(b, false, false, "[::1]:0")
}
func BenchmarkTCP6OneShotTimeout(b *testing.B) {
- if !supportsIPv6 {
+ if !supportsIPv6() {
b.Skip("ipv6 is not supported")
}
benchmarkTCP(b, false, true, "[::1]:0")
}
func BenchmarkTCP6Persistent(b *testing.B) {
- if !supportsIPv6 {
+ if !supportsIPv6() {
b.Skip("ipv6 is not supported")
}
benchmarkTCP(b, true, false, "[::1]:0")
}
func BenchmarkTCP6PersistentTimeout(b *testing.B) {
- if !supportsIPv6 {
+ if !supportsIPv6() {
b.Skip("ipv6 is not supported")
}
benchmarkTCP(b, true, true, "[::1]:0")
@@ -163,7 +163,7 @@ func BenchmarkTCP4ConcurrentReadWrite(b *testing.B) {
}
func BenchmarkTCP6ConcurrentReadWrite(b *testing.B) {
- if !supportsIPv6 {
+ if !supportsIPv6() {
b.Skip("ipv6 is not supported")
}
benchmarkTCPConcurrentReadWrite(b, "[::1]:0")
@@ -372,7 +372,7 @@ func TestTCPListenerName(t *testing.T) {
func TestIPv6LinkLocalUnicastTCP(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
- if !supportsIPv6 {
+ if !supportsIPv6() {
t.Skip("IPv6 is not supported")
}
diff --git a/libgo/go/net/tcpsock_unix_test.go b/libgo/go/net/tcpsock_unix_test.go
index 2375fe24dc4..3af1834455b 100644
--- a/libgo/go/net/tcpsock_unix_test.go
+++ b/libgo/go/net/tcpsock_unix_test.go
@@ -2,11 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin
+// +build !plan9,!windows
package net
import (
+ "context"
+ "internal/testenv"
+ "math/rand"
"runtime"
"sync"
"syscall"
@@ -77,3 +80,37 @@ func TestTCPSpuriousConnSetupCompletion(t *testing.T) {
ln.Close()
wg.Wait()
}
+
+// Issue 19289.
+// Test that a canceled Dial does not cause a subsequent Dial to succeed.
+func TestTCPSpuriousConnSetupCompletionWithCancel(t *testing.T) {
+ if testenv.Builder() == "" {
+ testenv.MustHaveExternalNetwork(t)
+ }
+ t.Parallel()
+ const tries = 10000
+ var wg sync.WaitGroup
+ wg.Add(tries * 2)
+ sem := make(chan bool, 5)
+ for i := 0; i < tries; i++ {
+ sem <- true
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ defer wg.Done()
+ time.Sleep(time.Duration(rand.Int63n(int64(5 * time.Millisecond))))
+ cancel()
+ }()
+ go func(i int) {
+ defer wg.Done()
+ var dialer Dialer
+ // Try to connect to a real host on a port
+ // that it is not listening on.
+ _, err := dialer.DialContext(ctx, "tcp", "golang.org:3")
+ if err == nil {
+ t.Errorf("Dial to unbound port succeeded on attempt %d", i)
+ }
+ <-sem
+ }(i)
+ }
+ wg.Wait()
+}
diff --git a/libgo/go/net/tcpsockopt_darwin.go b/libgo/go/net/tcpsockopt_darwin.go
index 0d1310eaf9e..7415c763c50 100644
--- a/libgo/go/net/tcpsockopt_darwin.go
+++ b/libgo/go/net/tcpsockopt_darwin.go
@@ -5,7 +5,7 @@
package net
import (
- "os"
+ "runtime"
"syscall"
"time"
)
@@ -13,17 +13,15 @@ import (
const sysTCP_KEEPINTVL = 0x101
func setKeepAlivePeriod(fd *netFD, d time.Duration) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
// The kernel expects seconds so round to next highest second.
d += (time.Second - time.Nanosecond)
secs := int(d.Seconds())
- switch err := syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, sysTCP_KEEPINTVL, secs); err {
+ switch err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, sysTCP_KEEPINTVL, secs); err {
case nil, syscall.ENOPROTOOPT: // OS X 10.7 and earlier don't support this option
default:
- return os.NewSyscallError("setsockopt", err)
+ return wrapSyscallError("setsockopt", err)
}
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE, secs))
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE, secs)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
diff --git a/libgo/go/net/tcpsockopt_dragonfly.go b/libgo/go/net/tcpsockopt_dragonfly.go
index 7cc716bad10..2b018f2bb2b 100644
--- a/libgo/go/net/tcpsockopt_dragonfly.go
+++ b/libgo/go/net/tcpsockopt_dragonfly.go
@@ -5,22 +5,20 @@
package net
import (
- "os"
+ "runtime"
"syscall"
"time"
)
func setKeepAlivePeriod(fd *netFD, d time.Duration) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
// The kernel expects milliseconds so round to next highest
// millisecond.
d += (time.Millisecond - time.Nanosecond)
msecs := int(d / time.Millisecond)
- if err := syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, msecs); err != nil {
- return os.NewSyscallError("setsockopt", err)
+ if err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, msecs); err != nil {
+ return wrapSyscallError("setsockopt", err)
}
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, msecs))
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, msecs)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
diff --git a/libgo/go/net/tcpsockopt_posix.go b/libgo/go/net/tcpsockopt_posix.go
index 36866ac0418..5e00ba15647 100644
--- a/libgo/go/net/tcpsockopt_posix.go
+++ b/libgo/go/net/tcpsockopt_posix.go
@@ -7,14 +7,12 @@
package net
import (
- "os"
+ "runtime"
"syscall"
)
func setNoDelay(fd *netFD, noDelay bool) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_NODELAY, boolint(noDelay)))
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_NODELAY, boolint(noDelay))
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
diff --git a/libgo/go/net/tcpsockopt_solaris.go b/libgo/go/net/tcpsockopt_solaris.go
index 347c17d7e27..aa86a29063e 100644
--- a/libgo/go/net/tcpsockopt_solaris.go
+++ b/libgo/go/net/tcpsockopt_solaris.go
@@ -2,26 +2,20 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// TCP socket options for solaris
-
package net
import (
- "os"
+ "runtime"
"syscall"
"time"
)
-// Set keep alive period.
func setKeepAlivePeriod(fd *netFD, d time.Duration) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
-
// The kernel expects seconds so round to next highest second.
d += (time.Second - time.Nanosecond)
secs := int(d.Seconds())
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.SO_KEEPALIVE, secs))
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.SO_KEEPALIVE, secs)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
diff --git a/libgo/go/net/tcpsockopt_unix.go b/libgo/go/net/tcpsockopt_unix.go
index 46e5e6d23fe..d5892588fea 100644
--- a/libgo/go/net/tcpsockopt_unix.go
+++ b/libgo/go/net/tcpsockopt_unix.go
@@ -7,21 +7,19 @@
package net
import (
- "os"
+ "runtime"
"syscall"
"time"
)
func setKeepAlivePeriod(fd *netFD, d time.Duration) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
// The kernel expects seconds so round to next highest second.
d += (time.Second - time.Nanosecond)
secs := int(d.Seconds())
- if err := syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs); err != nil {
- return os.NewSyscallError("setsockopt", err)
+ if err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs); err != nil {
+ return wrapSyscallError("setsockopt", err)
}
- return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, secs))
+ err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, secs)
+ runtime.KeepAlive(fd)
+ return wrapSyscallError("setsockopt", err)
}
diff --git a/libgo/go/net/tcpsockopt_windows.go b/libgo/go/net/tcpsockopt_windows.go
index 45a4dca5257..73dead11d00 100644
--- a/libgo/go/net/tcpsockopt_windows.go
+++ b/libgo/go/net/tcpsockopt_windows.go
@@ -6,16 +6,13 @@ package net
import (
"os"
+ "runtime"
"syscall"
"time"
"unsafe"
)
func setKeepAlivePeriod(fd *netFD, d time.Duration) error {
- if err := fd.incref(); err != nil {
- return err
- }
- defer fd.decref()
// The kernel expects milliseconds so round to next highest
// millisecond.
d += (time.Millisecond - time.Nanosecond)
@@ -27,6 +24,7 @@ func setKeepAlivePeriod(fd *netFD, d time.Duration) error {
}
ret := uint32(0)
size := uint32(unsafe.Sizeof(ka))
- err := syscall.WSAIoctl(fd.sysfd, syscall.SIO_KEEPALIVE_VALS, (*byte)(unsafe.Pointer(&ka)), size, nil, 0, &ret, nil, 0)
+ err := fd.pfd.WSAIoctl(syscall.SIO_KEEPALIVE_VALS, (*byte)(unsafe.Pointer(&ka)), size, nil, 0, &ret, nil, 0)
+ runtime.KeepAlive(fd)
return os.NewSyscallError("wsaioctl", err)
}
diff --git a/libgo/go/net/timeout_test.go b/libgo/go/net/timeout_test.go
index 55bbf4402d9..9de7801ad10 100644
--- a/libgo/go/net/timeout_test.go
+++ b/libgo/go/net/timeout_test.go
@@ -6,6 +6,7 @@ package net
import (
"fmt"
+ "internal/poll"
"internal/testenv"
"io"
"io/ioutil"
@@ -145,9 +146,9 @@ var acceptTimeoutTests = []struct {
}{
// Tests that accept deadlines in the past work, even if
// there's incoming connections available.
- {-5 * time.Second, [2]error{errTimeout, errTimeout}},
+ {-5 * time.Second, [2]error{poll.ErrTimeout, poll.ErrTimeout}},
- {50 * time.Millisecond, [2]error{nil, errTimeout}},
+ {50 * time.Millisecond, [2]error{nil, poll.ErrTimeout}},
}
func TestAcceptTimeout(t *testing.T) {
@@ -299,9 +300,9 @@ var readTimeoutTests = []struct {
}{
// Tests that read deadlines work, even if there's data ready
// to be read.
- {-5 * time.Second, [2]error{errTimeout, errTimeout}},
+ {-5 * time.Second, [2]error{poll.ErrTimeout, poll.ErrTimeout}},
- {50 * time.Millisecond, [2]error{nil, errTimeout}},
+ {50 * time.Millisecond, [2]error{nil, poll.ErrTimeout}},
}
func TestReadTimeout(t *testing.T) {
@@ -423,9 +424,9 @@ var readFromTimeoutTests = []struct {
}{
// Tests that read deadlines work, even if there's data ready
// to be read.
- {-5 * time.Second, [2]error{errTimeout, errTimeout}},
+ {-5 * time.Second, [2]error{poll.ErrTimeout, poll.ErrTimeout}},
- {50 * time.Millisecond, [2]error{nil, errTimeout}},
+ {50 * time.Millisecond, [2]error{nil, poll.ErrTimeout}},
}
func TestReadFromTimeout(t *testing.T) {
@@ -496,9 +497,9 @@ var writeTimeoutTests = []struct {
}{
// Tests that write deadlines work, even if there's buffer
// space available to write.
- {-5 * time.Second, [2]error{errTimeout, errTimeout}},
+ {-5 * time.Second, [2]error{poll.ErrTimeout, poll.ErrTimeout}},
- {10 * time.Millisecond, [2]error{nil, errTimeout}},
+ {10 * time.Millisecond, [2]error{nil, poll.ErrTimeout}},
}
func TestWriteTimeout(t *testing.T) {
@@ -610,9 +611,9 @@ var writeToTimeoutTests = []struct {
}{
// Tests that write deadlines work, even if there's buffer
// space available to write.
- {-5 * time.Second, [2]error{errTimeout, errTimeout}},
+ {-5 * time.Second, [2]error{poll.ErrTimeout, poll.ErrTimeout}},
- {10 * time.Millisecond, [2]error{nil, errTimeout}},
+ {10 * time.Millisecond, [2]error{nil, poll.ErrTimeout}},
}
func TestWriteToTimeout(t *testing.T) {
diff --git a/libgo/go/net/udpsock.go b/libgo/go/net/udpsock.go
index 841ef533590..2c0f74fdabd 100644
--- a/libgo/go/net/udpsock.go
+++ b/libgo/go/net/udpsock.go
@@ -53,28 +53,34 @@ func (a *UDPAddr) opAddr() Addr {
return a
}
-// ResolveUDPAddr parses addr as a UDP address of the form "host:port"
-// or "[ipv6-host%zone]:port" and resolves a pair of domain name and
-// port name on the network net, which must be "udp", "udp4" or
-// "udp6". A literal address or host name for IPv6 must be enclosed
-// in square brackets, as in "[::1]:80", "[ipv6-host]:http" or
-// "[ipv6-host%zone]:80".
+// ResolveUDPAddr returns an address of UDP end point.
//
-// Resolving a hostname is not recommended because this returns at most
-// one of its IP addresses.
-func ResolveUDPAddr(net, addr string) (*UDPAddr, error) {
- switch net {
+// The network must be a UDP network name.
+//
+// If the host in the address parameter is not a literal IP address or
+// the port is not a literal port number, ResolveUDPAddr resolves the
+// address to an address of UDP end point.
+// Otherwise, it parses the address as a pair of literal IP address
+// and port number.
+// The address parameter can use a host name, but this is not
+// recommended, because it will return at most one of the host name's
+// IP addresses.
+//
+// See func Dial for a description of the network and address
+// parameters.
+func ResolveUDPAddr(network, address string) (*UDPAddr, error) {
+ switch network {
case "udp", "udp4", "udp6":
case "": // a hint wildcard for Go 1.0 undocumented behavior
- net = "udp"
+ network = "udp"
default:
- return nil, UnknownNetworkError(net)
+ return nil, UnknownNetworkError(network)
}
- addrs, err := DefaultResolver.internetAddrList(context.Background(), net, addr)
+ addrs, err := DefaultResolver.internetAddrList(context.Background(), network, address)
if err != nil {
return nil, err
}
- return addrs.first(isIPv4).(*UDPAddr), nil
+ return addrs.forResolve(network, address).(*UDPAddr), nil
}
// UDPConn is the implementation of the Conn and PacketConn interfaces
@@ -83,13 +89,16 @@ type UDPConn struct {
conn
}
-// ReadFromUDP reads a UDP packet from c, copying the payload into b.
-// It returns the number of bytes copied into b and the return address
-// that was on the packet.
-//
-// ReadFromUDP can be made to time out and return an error with
-// Timeout() == true after a fixed time limit; see SetDeadline and
-// SetReadDeadline.
+// SyscallConn returns a raw network connection.
+// This implements the syscall.Conn interface.
+func (c *UDPConn) SyscallConn() (syscall.RawConn, error) {
+ if !c.ok() {
+ return nil, syscall.EINVAL
+ }
+ return newRawConn(c.fd)
+}
+
+// ReadFromUDP acts like ReadFrom but returns a UDPAddr.
func (c *UDPConn) ReadFromUDP(b []byte) (int, *UDPAddr, error) {
if !c.ok() {
return 0, nil, syscall.EINVAL
@@ -116,11 +125,13 @@ func (c *UDPConn) ReadFrom(b []byte) (int, Addr, error) {
return n, addr, err
}
-// ReadMsgUDP reads a packet from c, copying the payload into b and
-// the associated out-of-band data into oob. It returns the number
-// of bytes copied into b, the number of bytes copied into oob, the
-// flags that were set on the packet and the source address of the
-// packet.
+// ReadMsgUDP reads a message from c, copying the payload into b and
+// the associated out-of-band data into oob. It returns the number of
+// bytes copied into b, the number of bytes copied into oob, the flags
+// that were set on the message and the source address of the message.
+//
+// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be
+// used to manipulate IP-level socket options in oob.
func (c *UDPConn) ReadMsgUDP(b, oob []byte) (n, oobn, flags int, addr *UDPAddr, err error) {
if !c.ok() {
return 0, 0, 0, nil, syscall.EINVAL
@@ -132,13 +143,7 @@ func (c *UDPConn) ReadMsgUDP(b, oob []byte) (n, oobn, flags int, addr *UDPAddr,
return
}
-// WriteToUDP writes a UDP packet to addr via c, copying the payload
-// from b.
-//
-// WriteToUDP can be made to time out and return an error with
-// Timeout() == true after a fixed time limit; see SetDeadline and
-// SetWriteDeadline. On packet-oriented connections, write timeouts
-// are rare.
+// WriteToUDP acts like WriteTo but takes a UDPAddr.
func (c *UDPConn) WriteToUDP(b []byte, addr *UDPAddr) (int, error) {
if !c.ok() {
return 0, syscall.EINVAL
@@ -166,11 +171,14 @@ func (c *UDPConn) WriteTo(b []byte, addr Addr) (int, error) {
return n, err
}
-// WriteMsgUDP writes a packet to addr via c if c isn't connected, or
-// to c's remote destination address if c is connected (in which case
-// addr must be nil). The payload is copied from b and the associated
-// out-of-band data is copied from oob. It returns the number of
-// payload and out-of-band bytes written.
+// WriteMsgUDP writes a message to addr via c if c isn't connected, or
+// to c's remote address if c is connected (in which case addr must be
+// nil). The payload is copied from b and the associated out-of-band
+// data is copied from oob. It returns the number of payload and
+// out-of-band bytes written.
+//
+// The packages golang.org/x/net/ipv4 and golang.org/x/net/ipv6 can be
+// used to manipulate IP-level socket options in oob.
func (c *UDPConn) WriteMsgUDP(b, oob []byte, addr *UDPAddr) (n, oobn int, err error) {
if !c.ok() {
return 0, 0, syscall.EINVAL
@@ -184,55 +192,67 @@ func (c *UDPConn) WriteMsgUDP(b, oob []byte, addr *UDPAddr) (n, oobn int, err er
func newUDPConn(fd *netFD) *UDPConn { return &UDPConn{conn{fd}} }
-// DialUDP connects to the remote address raddr on the network net,
-// which must be "udp", "udp4", or "udp6". If laddr is not nil, it is
-// used as the local address for the connection.
-func DialUDP(net string, laddr, raddr *UDPAddr) (*UDPConn, error) {
- switch net {
+// DialUDP acts like Dial for UDP networks.
+//
+// The network must be a UDP network name; see func Dial for details.
+//
+// If laddr is nil, a local address is automatically chosen.
+// If the IP field of raddr is nil or an unspecified IP address, the
+// local system is assumed.
+func DialUDP(network string, laddr, raddr *UDPAddr) (*UDPConn, error) {
+ switch network {
case "udp", "udp4", "udp6":
default:
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(net)}
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(network)}
}
if raddr == nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
}
- c, err := dialUDP(context.Background(), net, laddr, raddr)
+ c, err := dialUDP(context.Background(), network, laddr, raddr)
if err != nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
}
return c, nil
}
-// ListenUDP listens for incoming UDP packets addressed to the local
-// address laddr. Net must be "udp", "udp4", or "udp6". If laddr has
-// a port of 0, ListenUDP will choose an available port.
-// The LocalAddr method of the returned UDPConn can be used to
-// discover the port. The returned connection's ReadFrom and WriteTo
-// methods can be used to receive and send UDP packets with per-packet
-// addressing.
-func ListenUDP(net string, laddr *UDPAddr) (*UDPConn, error) {
- switch net {
+// ListenUDP acts like ListenPacket for UDP networks.
+//
+// The network must be a UDP network name; see func Dial for details.
+//
+// If the IP field of laddr is nil or an unspecified IP address,
+// ListenUDP listens on all available IP addresses of the local system
+// except multicast IP addresses.
+// If the Port field of laddr is 0, a port number is automatically
+// chosen.
+func ListenUDP(network string, laddr *UDPAddr) (*UDPConn, error) {
+ switch network {
case "udp", "udp4", "udp6":
default:
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(net)}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(network)}
}
if laddr == nil {
laddr = &UDPAddr{}
}
- c, err := listenUDP(context.Background(), net, laddr)
+ c, err := listenUDP(context.Background(), network, laddr)
if err != nil {
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: err}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err}
}
return c, nil
}
-// ListenMulticastUDP listens for incoming multicast UDP packets
-// addressed to the group address gaddr on the interface ifi.
-// Network must be "udp", "udp4" or "udp6".
-// ListenMulticastUDP uses the system-assigned multicast interface
-// when ifi is nil, although this is not recommended because the
+// ListenMulticastUDP acts like ListenPacket for UDP networks but
+// takes a group address on a specific network interface.
+//
+// The network must be a UDP network name; see func Dial for details.
+//
+// ListenMulticastUDP listens on all available IP addresses of the
+// local system including the group, multicast IP address.
+// If ifi is nil, ListenMulticastUDP uses the system-assigned
+// multicast interface, although this is not recommended because the
// assignment depends on platforms and sometimes it might require
// routing configuration.
+// If the Port field of gaddr is 0, a port number is automatically
+// chosen.
//
// ListenMulticastUDP is just for convenience of simple, small
// applications. There are golang.org/x/net/ipv4 and
diff --git a/libgo/go/net/udpsock_posix.go b/libgo/go/net/udpsock_posix.go
index 0c905afafcc..fe552ba929f 100644
--- a/libgo/go/net/udpsock_posix.go
+++ b/libgo/go/net/udpsock_posix.go
@@ -16,7 +16,7 @@ func sockaddrToUDP(sa syscall.Sockaddr) Addr {
case *syscall.SockaddrInet4:
return &UDPAddr{IP: sa.Addr[0:], Port: sa.Port}
case *syscall.SockaddrInet6:
- return &UDPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneToString(int(sa.ZoneId))}
+ return &UDPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneCache.name(int(sa.ZoneId))}
}
return nil
}
@@ -49,7 +49,7 @@ func (c *UDPConn) readFrom(b []byte) (int, *UDPAddr, error) {
case *syscall.SockaddrInet4:
addr = &UDPAddr{IP: sa.Addr[0:], Port: sa.Port}
case *syscall.SockaddrInet6:
- addr = &UDPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneToString(int(sa.ZoneId))}
+ addr = &UDPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneCache.name(int(sa.ZoneId))}
}
return n, addr, err
}
@@ -61,7 +61,7 @@ func (c *UDPConn) readMsg(b, oob []byte) (n, oobn, flags int, addr *UDPAddr, err
case *syscall.SockaddrInet4:
addr = &UDPAddr{IP: sa.Addr[0:], Port: sa.Port}
case *syscall.SockaddrInet6:
- addr = &UDPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneToString(int(sa.ZoneId))}
+ addr = &UDPAddr{IP: sa.Addr[0:], Port: sa.Port, Zone: zoneCache.name(int(sa.ZoneId))}
}
return
}
diff --git a/libgo/go/net/udpsock_test.go b/libgo/go/net/udpsock_test.go
index 708cc101209..6d4974e3e49 100644
--- a/libgo/go/net/udpsock_test.go
+++ b/libgo/go/net/udpsock_test.go
@@ -15,7 +15,7 @@ import (
func BenchmarkUDP6LinkLocalUnicast(b *testing.B) {
testHookUninstaller.Do(uninstallTestHooks)
- if !supportsIPv6 {
+ if !supportsIPv6() {
b.Skip("IPv6 is not supported")
}
ifi := loopbackInterface()
@@ -279,7 +279,7 @@ func TestUDPConnLocalAndRemoteNames(t *testing.T) {
func TestIPv6LinkLocalUnicastUDP(t *testing.T) {
testenv.MustHaveExternalNetwork(t)
- if !supportsIPv6 {
+ if !supportsIPv6() {
t.Skip("IPv6 is not supported")
}
diff --git a/libgo/go/net/unixsock.go b/libgo/go/net/unixsock.go
index b25d492f591..057940acf65 100644
--- a/libgo/go/net/unixsock.go
+++ b/libgo/go/net/unixsock.go
@@ -42,15 +42,18 @@ func (a *UnixAddr) opAddr() Addr {
return a
}
-// ResolveUnixAddr parses addr as a Unix domain socket address.
-// The string net gives the network name, "unix", "unixgram" or
-// "unixpacket".
-func ResolveUnixAddr(net, addr string) (*UnixAddr, error) {
- switch net {
+// ResolveUnixAddr returns an address of Unix domain socket end point.
+//
+// The network must be a Unix network name.
+//
+// See func Dial for a description of the network and address
+// parameters.
+func ResolveUnixAddr(network, address string) (*UnixAddr, error) {
+ switch network {
case "unix", "unixgram", "unixpacket":
- return &UnixAddr{Name: addr, Net: net}, nil
+ return &UnixAddr{Name: address, Net: network}, nil
default:
- return nil, UnknownNetworkError(net)
+ return nil, UnknownNetworkError(network)
}
}
@@ -60,6 +63,15 @@ type UnixConn struct {
conn
}
+// SyscallConn returns a raw network connection.
+// This implements the syscall.Conn interface.
+func (c *UnixConn) SyscallConn() (syscall.RawConn, error) {
+ if !c.ok() {
+ return nil, syscall.EINVAL
+ }
+ return newRawConn(c.fd)
+}
+
// CloseRead shuts down the reading side of the Unix domain connection.
// Most callers should just use Close.
func (c *UnixConn) CloseRead() error {
@@ -84,13 +96,7 @@ func (c *UnixConn) CloseWrite() error {
return nil
}
-// ReadFromUnix reads a packet from c, copying the payload into b. It
-// returns the number of bytes copied into b and the source address of
-// the packet.
-//
-// ReadFromUnix can be made to time out and return an error with
-// Timeout() == true after a fixed time limit; see SetDeadline and
-// SetReadDeadline.
+// ReadFromUnix acts like ReadFrom but returns a UnixAddr.
func (c *UnixConn) ReadFromUnix(b []byte) (int, *UnixAddr, error) {
if !c.ok() {
return 0, nil, syscall.EINVAL
@@ -117,10 +123,10 @@ func (c *UnixConn) ReadFrom(b []byte) (int, Addr, error) {
return n, addr, err
}
-// ReadMsgUnix reads a packet from c, copying the payload into b and
+// ReadMsgUnix reads a message from c, copying the payload into b and
// the associated out-of-band data into oob. It returns the number of
// bytes copied into b, the number of bytes copied into oob, the flags
-// that were set on the packet, and the source address of the packet.
+// that were set on the message and the source address of the message.
//
// Note that if len(b) == 0 and len(oob) > 0, this function will still
// read (and discard) 1 byte from the connection.
@@ -135,12 +141,7 @@ func (c *UnixConn) ReadMsgUnix(b, oob []byte) (n, oobn, flags int, addr *UnixAdd
return
}
-// WriteToUnix writes a packet to addr via c, copying the payload from b.
-//
-// WriteToUnix can be made to time out and return an error with
-// Timeout() == true after a fixed time limit; see SetDeadline and
-// SetWriteDeadline. On packet-oriented connections, write timeouts
-// are rare.
+// WriteToUnix acts like WriteTo but takes a UnixAddr.
func (c *UnixConn) WriteToUnix(b []byte, addr *UnixAddr) (int, error) {
if !c.ok() {
return 0, syscall.EINVAL
@@ -168,9 +169,9 @@ func (c *UnixConn) WriteTo(b []byte, addr Addr) (int, error) {
return n, err
}
-// WriteMsgUnix writes a packet to addr via c, copying the payload
-// from b and the associated out-of-band data from oob. It returns
-// the number of payload and out-of-band bytes written.
+// WriteMsgUnix writes a message to addr via c, copying the payload
+// from b and the associated out-of-band data from oob. It returns the
+// number of payload and out-of-band bytes written.
//
// Note that if len(b) == 0 and len(oob) > 0, this function will still
// write 1 byte to the connection.
@@ -187,18 +188,21 @@ func (c *UnixConn) WriteMsgUnix(b, oob []byte, addr *UnixAddr) (n, oobn int, err
func newUnixConn(fd *netFD) *UnixConn { return &UnixConn{conn{fd}} }
-// DialUnix connects to the remote address raddr on the network net,
-// which must be "unix", "unixgram" or "unixpacket". If laddr is not
-// nil, it is used as the local address for the connection.
-func DialUnix(net string, laddr, raddr *UnixAddr) (*UnixConn, error) {
- switch net {
+// DialUnix acts like Dial for Unix networks.
+//
+// The network must be a Unix network name; see func Dial for details.
+//
+// If laddr is non-nil, it is used as the local address for the
+// connection.
+func DialUnix(network string, laddr, raddr *UnixAddr) (*UnixConn, error) {
+ switch network {
case "unix", "unixgram", "unixpacket":
default:
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(net)}
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(network)}
}
- c, err := dialUnix(context.Background(), net, laddr, raddr)
+ c, err := dialUnix(context.Background(), network, laddr, raddr)
if err != nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ return nil, &OpError{Op: "dial", Net: network, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
}
return c, nil
}
@@ -264,7 +268,7 @@ func (l *UnixListener) SetDeadline(t time.Time) error {
if !l.ok() {
return syscall.EINVAL
}
- if err := l.fd.setDeadline(t); err != nil {
+ if err := l.fd.pfd.SetDeadline(t); err != nil {
return &OpError{Op: "set", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
}
return nil
@@ -288,40 +292,40 @@ func (l *UnixListener) File() (f *os.File, err error) {
return
}
-// ListenUnix announces on the Unix domain socket laddr and returns a
-// Unix listener. The network net must be "unix" or "unixpacket".
-func ListenUnix(net string, laddr *UnixAddr) (*UnixListener, error) {
- switch net {
+// ListenUnix acts like Listen for Unix networks.
+//
+// The network must be "unix" or "unixpacket".
+func ListenUnix(network string, laddr *UnixAddr) (*UnixListener, error) {
+ switch network {
case "unix", "unixpacket":
default:
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(net)}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(network)}
}
if laddr == nil {
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: errMissingAddress}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: errMissingAddress}
}
- ln, err := listenUnix(context.Background(), net, laddr)
+ ln, err := listenUnix(context.Background(), network, laddr)
if err != nil {
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: err}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err}
}
return ln, nil
}
-// ListenUnixgram listens for incoming Unix datagram packets addressed
-// to the local address laddr. The network net must be "unixgram".
-// The returned connection's ReadFrom and WriteTo methods can be used
-// to receive and send packets with per-packet addressing.
-func ListenUnixgram(net string, laddr *UnixAddr) (*UnixConn, error) {
- switch net {
+// ListenUnixgram acts like ListenPacket for Unix networks.
+//
+// The network must be "unixgram".
+func ListenUnixgram(network string, laddr *UnixAddr) (*UnixConn, error) {
+ switch network {
case "unixgram":
default:
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(net)}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(network)}
}
if laddr == nil {
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: nil, Err: errMissingAddress}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: nil, Err: errMissingAddress}
}
- c, err := listenUnixgram(context.Background(), net, laddr)
+ c, err := listenUnixgram(context.Background(), network, laddr)
if err != nil {
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: err}
+ return nil, &OpError{Op: "listen", Net: network, Source: nil, Addr: laddr.opAddr(), Err: err}
}
return c, nil
}
diff --git a/libgo/go/net/url/url.go b/libgo/go/net/url/url.go
index 42a514bbc12..2ac24725692 100644
--- a/libgo/go/net/url/url.go
+++ b/libgo/go/net/url/url.go
@@ -309,9 +309,10 @@ func escape(s string, mode encoding) string {
}
// A URL represents a parsed URL (technically, a URI reference).
+//
// The general form represented is:
//
-// scheme://[userinfo@]host/path[?query][#fragment]
+// [scheme:][//[userinfo@]host][/]path[?query][#fragment]
//
// URLs that do not start with a slash after the scheme are interpreted as:
//
@@ -321,26 +322,19 @@ func escape(s string, mode encoding) string {
// A consequence is that it is impossible to tell which slashes in the Path were
// slashes in the raw URL and which were %2f. This distinction is rarely important,
// but when it is, code must not use Path directly.
-//
-// Go 1.5 introduced the RawPath field to hold the encoded form of Path.
// The Parse function sets both Path and RawPath in the URL it returns,
// and URL's String method uses RawPath if it is a valid encoding of Path,
// by calling the EscapedPath method.
-//
-// In earlier versions of Go, the more indirect workarounds were that an
-// HTTP server could consult req.RequestURI and an HTTP client could
-// construct a URL struct directly and set the Opaque field instead of Path.
-// These still work as well.
type URL struct {
Scheme string
Opaque string // encoded opaque data
User *Userinfo // username and password information
Host string // host or host:port
- Path string
- RawPath string // encoded path hint (Go 1.5 and later only; see EscapedPath method)
- ForceQuery bool // append a query ('?') even if RawQuery is empty
- RawQuery string // encoded query values, without '?'
- Fragment string // fragment for references, without '#'
+ Path string // path (relative paths may omit leading slash)
+ RawPath string // encoded path hint (see EscapedPath method)
+ ForceQuery bool // append a query ('?') even if RawQuery is empty
+ RawQuery string // encoded query values, without '?'
+ Fragment string // fragment for references, without '#'
}
// User returns a Userinfo containing the provided username
@@ -351,6 +345,7 @@ func User(username string) *Userinfo {
// UserPassword returns a Userinfo containing the provided username
// and password.
+//
// This functionality should only be used with legacy web sites.
// RFC 2396 warns that interpreting Userinfo this way
// ``is NOT RECOMMENDED, because the passing of authentication
@@ -974,6 +969,8 @@ func (u *URL) ResolveReference(ref *URL) *URL {
}
// Query parses RawQuery and returns the corresponding values.
+// It silently discards malformed value pairs.
+// To check errors use ParseQuery.
func (u *URL) Query() Values {
v, _ := ParseQuery(u.RawQuery)
return v
diff --git a/libgo/go/net/writev_test.go b/libgo/go/net/writev_test.go
index 7160d28c3a0..4c05be473d9 100644
--- a/libgo/go/net/writev_test.go
+++ b/libgo/go/net/writev_test.go
@@ -7,6 +7,7 @@ package net
import (
"bytes"
"fmt"
+ "internal/poll"
"io"
"io/ioutil"
"reflect"
@@ -99,13 +100,13 @@ func TestBuffers_WriteTo(t *testing.T) {
}
func testBuffer_writeTo(t *testing.T, chunks int, useCopy bool) {
- oldHook := testHookDidWritev
- defer func() { testHookDidWritev = oldHook }()
+ oldHook := poll.TestHookDidWritev
+ defer func() { poll.TestHookDidWritev = oldHook }()
var writeLog struct {
sync.Mutex
log []int
}
- testHookDidWritev = func(size int) {
+ poll.TestHookDidWritev = func(size int) {
writeLog.Lock()
writeLog.log = append(writeLog.log, size)
writeLog.Unlock()
diff --git a/libgo/go/net/writev_unix.go b/libgo/go/net/writev_unix.go
index 174e6bc51e3..bf0fbf8a136 100644
--- a/libgo/go/net/writev_unix.go
+++ b/libgo/go/net/writev_unix.go
@@ -7,10 +7,8 @@
package net
import (
- "io"
- "os"
+ "runtime"
"syscall"
- "unsafe"
)
func (c *conn) writeBuffers(v *Buffers) (int64, error) {
@@ -25,71 +23,7 @@ func (c *conn) writeBuffers(v *Buffers) (int64, error) {
}
func (fd *netFD) writeBuffers(v *Buffers) (n int64, err error) {
- if err := fd.writeLock(); err != nil {
- return 0, err
- }
- defer fd.writeUnlock()
- if err := fd.pd.prepareWrite(); err != nil {
- return 0, err
- }
-
- var iovecs []syscall.Iovec
- if fd.iovecs != nil {
- iovecs = *fd.iovecs
- }
- // TODO: read from sysconf(_SC_IOV_MAX)? The Linux default is
- // 1024 and this seems conservative enough for now. Darwin's
- // UIO_MAXIOV also seems to be 1024.
- maxVec := 1024
-
- for len(*v) > 0 {
- iovecs = iovecs[:0]
- for _, chunk := range *v {
- if len(chunk) == 0 {
- continue
- }
- iovecs = append(iovecs, syscall.Iovec{Base: &chunk[0]})
- if fd.isStream && len(chunk) > 1<<30 {
- iovecs[len(iovecs)-1].SetLen(1 << 30)
- break // continue chunk on next writev
- }
- iovecs[len(iovecs)-1].SetLen(len(chunk))
- if len(iovecs) == maxVec {
- break
- }
- }
- if len(iovecs) == 0 {
- break
- }
- fd.iovecs = &iovecs // cache
-
- wrote, _, e0 := syscall.Syscall(syscall.SYS_WRITEV,
- uintptr(fd.sysfd),
- uintptr(unsafe.Pointer(&iovecs[0])),
- uintptr(len(iovecs)))
- if wrote == ^uintptr(0) {
- wrote = 0
- }
- testHookDidWritev(int(wrote))
- n += int64(wrote)
- v.consume(int64(wrote))
- if e0 == syscall.EAGAIN {
- if err = fd.pd.waitWrite(); err == nil {
- continue
- }
- } else if e0 != 0 {
- err = syscall.Errno(e0)
- }
- if err != nil {
- break
- }
- if n == 0 {
- err = io.ErrUnexpectedEOF
- break
- }
- }
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("writev", err)
- }
- return n, err
+ n, err = fd.pfd.Writev((*[][]byte)(v))
+ runtime.KeepAlive(fd)
+ return n, wrapSyscallError("writev", err)
}
diff --git a/libgo/go/os/error_posix.go b/libgo/go/os/error_posix.go
new file mode 100644
index 00000000000..2049e448e8f
--- /dev/null
+++ b/libgo/go/os/error_posix.go
@@ -0,0 +1,18 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
+
+package os
+
+import "syscall"
+
+// wrapSyscallError takes an error and a syscall name. If the error is
+// a syscall.Errno, it wraps it in a os.SyscallError using the syscall name.
+func wrapSyscallError(name string, err error) error {
+ if _, ok := err.(syscall.Errno); ok {
+ err = NewSyscallError(name, err)
+ }
+ return err
+}
diff --git a/libgo/go/os/example_test.go b/libgo/go/os/example_test.go
index 07f9c769590..5749194871b 100644
--- a/libgo/go/os/example_test.go
+++ b/libgo/go/os/example_test.go
@@ -21,6 +21,20 @@ func ExampleOpenFile() {
}
}
+func ExampleOpenFile_append() {
+ // If the file doesn't exist, create it, or append to the file
+ f, err := os.OpenFile("access.log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if _, err := f.Write([]byte("appended some data\n")); err != nil {
+ log.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ log.Fatal(err)
+ }
+}
+
func ExampleChmod() {
if err := os.Chmod("some-filename", 0644); err != nil {
log.Fatal(err)
@@ -36,7 +50,7 @@ func ExampleChtimes() {
}
func ExampleFileMode() {
- fi, err := os.Stat("some-filename")
+ fi, err := os.Lstat("some-filename")
if err != nil {
log.Fatal(err)
}
diff --git a/libgo/go/os/exec/env_test.go b/libgo/go/os/exec/env_test.go
new file mode 100644
index 00000000000..b5ac398c274
--- /dev/null
+++ b/libgo/go/os/exec/env_test.go
@@ -0,0 +1,39 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package exec
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestDedupEnv(t *testing.T) {
+ tests := []struct {
+ noCase bool
+ in []string
+ want []string
+ }{
+ {
+ noCase: true,
+ in: []string{"k1=v1", "k2=v2", "K1=v3"},
+ want: []string{"K1=v3", "k2=v2"},
+ },
+ {
+ noCase: false,
+ in: []string{"k1=v1", "K1=V2", "k1=v3"},
+ want: []string{"k1=v3", "K1=V2"},
+ },
+ {
+ in: []string{"=a", "=b", "foo", "bar"},
+ want: []string{"=b", "foo", "bar"},
+ },
+ }
+ for _, tt := range tests {
+ got := dedupEnvCase(tt.noCase, tt.in)
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("Dedup(%v, %q) = %q; want %q", tt.noCase, tt.in, got, tt.want)
+ }
+ }
+}
diff --git a/libgo/go/os/exec/exec.go b/libgo/go/os/exec/exec.go
index c4c5168b980..893d8ee99a8 100644
--- a/libgo/go/os/exec/exec.go
+++ b/libgo/go/os/exec/exec.go
@@ -6,6 +6,15 @@
// easier to remap stdin and stdout, connect I/O with pipes, and do other
// adjustments.
//
+// Unlike the "system" library call from C and other languages, the
+// os/exec package intentionally does not invoke the system shell and
+// does not expand any glob patterns or handle other expansions,
+// pipelines, or redirections typically done by shells. The package
+// behaves more like C's "exec" family of functions. To expand glob
+// patterns, either call the shell directly, taking care to escape any
+// dangerous input, or use the path/filepath package's Glob function.
+// To expand environment variables, use package os's ExpandEnv.
+//
// Note that the examples in this package assume a Unix system.
// They may not run on Windows, and they do not run in the Go Playground
// used by golang.org and godoc.org.
@@ -55,7 +64,11 @@ type Cmd struct {
Args []string
// Env specifies the environment of the process.
- // If Env is nil, Run uses the current process's environment.
+ // Each entry is of the form "key=value".
+ // If Env is nil, the new process uses the current process's
+ // environment.
+ // If Env contains duplicate environment keys, only the last
+ // value in the slice for each duplicate key is used.
Env []string
// Dir specifies the working directory of the command.
@@ -79,17 +92,14 @@ type Cmd struct {
// If either is nil, Run connects the corresponding file descriptor
// to the null device (os.DevNull).
//
- // If Stdout and Stderr are the same writer, at most one
- // goroutine at a time will call Write.
+ // If Stdout and Stderr are the same writer, and have a type that can be compared with ==,
+ // at most one goroutine at a time will call Write.
Stdout io.Writer
Stderr io.Writer
// ExtraFiles specifies additional open files to be inherited by the
// new process. It does not include standard input, standard output, or
// standard error. If non-nil, entry i becomes file descriptor 3+i.
- //
- // BUG(rsc): On OS X 10.6, child processes may sometimes inherit unwanted fds.
- // https://golang.org/issue/2603
ExtraFiles []*os.File
// SysProcAttr holds optional, operating system-specific attributes.
@@ -270,9 +280,8 @@ func (c *Cmd) closeDescriptors(closers []io.Closer) {
// copying stdin, stdout, and stderr, and exits with a zero exit
// status.
//
-// If the command fails to run or doesn't complete successfully, the
-// error is of type *ExitError. Other error types may be
-// returned for I/O problems.
+// If the command starts but does not complete successfully, the error is of
+// type *ExitError. Other error types may be returned for other situations.
func (c *Cmd) Run() error {
if err := c.Start(); err != nil {
return err
@@ -354,7 +363,7 @@ func (c *Cmd) Start() error {
c.Process, err = os.StartProcess(c.Path, c.argv(), &os.ProcAttr{
Dir: c.Dir,
Files: c.childFiles,
- Env: c.envv(),
+ Env: dedupEnv(c.envv()),
Sys: c.SysProcAttr,
})
if err != nil {
@@ -407,8 +416,10 @@ func (e *ExitError) Error() string {
return e.ProcessState.String()
}
-// Wait waits for the command to exit.
-// It must have been started by Start.
+// Wait waits for the command to exit and waits for any copying to
+// stdin or copying from stdout or stderr to complete.
+//
+// The command must have been started by Start.
//
// The returned error is nil if the command runs, has no problems
// copying stdin, stdout, and stderr, and exits with a zero exit
@@ -712,3 +723,35 @@ func minInt(a, b int) int {
}
return b
}
+
+// dedupEnv returns a copy of env with any duplicates removed, in favor of
+// later values.
+// Items not of the normal environment "key=value" form are preserved unchanged.
+func dedupEnv(env []string) []string {
+ return dedupEnvCase(runtime.GOOS == "windows", env)
+}
+
+// dedupEnvCase is dedupEnv with a case option for testing.
+// If caseInsensitive is true, the case of keys is ignored.
+func dedupEnvCase(caseInsensitive bool, env []string) []string {
+ out := make([]string, 0, len(env))
+ saw := map[string]int{} // key => index into out
+ for _, kv := range env {
+ eq := strings.Index(kv, "=")
+ if eq < 0 {
+ out = append(out, kv)
+ continue
+ }
+ k := kv[:eq]
+ if caseInsensitive {
+ k = strings.ToLower(k)
+ }
+ if dupIdx, isDup := saw[k]; isDup {
+ out[dupIdx] = kv
+ continue
+ }
+ saw[k] = len(out)
+ out = append(out, kv)
+ }
+ return out
+}
diff --git a/libgo/go/os/exec/exec_posix.go b/libgo/go/os/exec/exec_posix.go
deleted file mode 100644
index 5e1113748cd..00000000000
--- a/libgo/go/os/exec/exec_posix.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !plan9
-
-package exec
-
-import (
- "os"
- "syscall"
-)
-
-func init() {
- skipStdinCopyError = func(err error) bool {
- // Ignore EPIPE errors copying to stdin if the program
- // completed successfully otherwise.
- // See Issue 9173.
- pe, ok := err.(*os.PathError)
- return ok &&
- pe.Op == "write" && pe.Path == "|1" &&
- pe.Err == syscall.EPIPE
- }
-}
diff --git a/libgo/go/os/exec/exec_posix_test.go b/libgo/go/os/exec/exec_posix_test.go
new file mode 100644
index 00000000000..865b6c3ced2
--- /dev/null
+++ b/libgo/go/os/exec/exec_posix_test.go
@@ -0,0 +1,83 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package exec_test
+
+import (
+ "os/user"
+ "strconv"
+ "syscall"
+ "testing"
+ "time"
+)
+
+func TestCredentialNoSetGroups(t *testing.T) {
+ u, err := user.Current()
+ if err != nil {
+ t.Fatalf("error getting current user: %v", err)
+ }
+
+ uid, err := strconv.Atoi(u.Uid)
+ if err != nil {
+ t.Fatalf("error converting Uid=%s to integer: %v", u.Uid, err)
+ }
+
+ gid, err := strconv.Atoi(u.Gid)
+ if err != nil {
+ t.Fatalf("error converting Gid=%s to integer: %v", u.Gid, err)
+ }
+
+ // If NoSetGroups is true, setgroups isn't called and cmd.Run should succeed
+ cmd := helperCommand(t, "echo", "foo")
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Credential: &syscall.Credential{
+ Uid: uint32(uid),
+ Gid: uint32(gid),
+ NoSetGroups: true,
+ },
+ }
+
+ if err = cmd.Run(); err != nil {
+ t.Errorf("Failed to run command: %v", err)
+ }
+}
+
+// For issue #19314: make sure that SIGSTOP does not cause the process
+// to appear done.
+func TestWaitid(t *testing.T) {
+ t.Parallel()
+
+ cmd := helperCommand(t, "sleep")
+ if err := cmd.Start(); err != nil {
+ t.Fatal(err)
+ }
+
+ // The sleeps here are unnecessary in the sense that the test
+ // should still pass, but they are useful to make it more
+ // likely that we are testing the expected state of the child.
+ time.Sleep(100 * time.Millisecond)
+
+ if err := cmd.Process.Signal(syscall.SIGSTOP); err != nil {
+ cmd.Process.Kill()
+ t.Fatal(err)
+ }
+
+ ch := make(chan error)
+ go func() {
+ ch <- cmd.Wait()
+ }()
+
+ time.Sleep(100 * time.Millisecond)
+
+ if err := cmd.Process.Signal(syscall.SIGCONT); err != nil {
+ t.Error(err)
+ syscall.Kill(cmd.Process.Pid, syscall.SIGCONT)
+ }
+
+ cmd.Process.Kill()
+
+ <-ch
+}
diff --git a/libgo/go/os/exec/exec_test.go b/libgo/go/os/exec/exec_test.go
index f13635138a1..a877d8ae232 100644
--- a/libgo/go/os/exec/exec_test.go
+++ b/libgo/go/os/exec/exec_test.go
@@ -12,6 +12,7 @@ import (
"bytes"
"context"
"fmt"
+ "internal/poll"
"internal/testenv"
"io"
"io/ioutil"
@@ -292,8 +293,51 @@ func TestStdinCloseRace(t *testing.T) {
// Issue 5071
func TestPipeLookPathLeak(t *testing.T) {
- fd0, lsof0 := numOpenFDS(t)
- for i := 0; i < 4; i++ {
+ // If we are reading from /proc/self/fd we (should) get an exact result.
+ tolerance := 0
+
+ // Reading /proc/self/fd is more reliable than calling lsof, so try that
+ // first.
+ numOpenFDs := func() (int, []byte, error) {
+ fds, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return 0, nil, err
+ }
+ return len(fds), nil, nil
+ }
+ want, before, err := numOpenFDs()
+ if err != nil {
+ // We encountered a problem reading /proc/self/fd (we might be on
+ // a platform that doesn't have it). Fall back onto lsof.
+ t.Logf("using lsof because: %v", err)
+ numOpenFDs = func() (int, []byte, error) {
+ // Android's stock lsof does not obey the -p option,
+ // so extra filtering is needed.
+ // https://golang.org/issue/10206
+ if runtime.GOOS == "android" {
+ // numOpenFDsAndroid handles errors itself and
+ // might skip or fail the test.
+ n, lsof := numOpenFDsAndroid(t)
+ return n, lsof, nil
+ }
+ lsof, err := exec.Command("lsof", "-b", "-n", "-p", strconv.Itoa(os.Getpid())).Output()
+ return bytes.Count(lsof, []byte("\n")), lsof, err
+ }
+
+ // lsof may see file descriptors associated with the fork itself,
+ // so we allow some extra margin if we have to use it.
+ // https://golang.org/issue/19243
+ tolerance = 5
+
+ // Retry reading the number of open file descriptors.
+ want, before, err = numOpenFDs()
+ if err != nil {
+ t.Log(err)
+ t.Skipf("skipping test; error finding or running lsof")
+ }
+ }
+
+ for i := 0; i < 6; i++ {
cmd := exec.Command("something-that-does-not-exist-binary")
cmd.StdoutPipe()
cmd.StderrPipe()
@@ -302,36 +346,20 @@ func TestPipeLookPathLeak(t *testing.T) {
t.Fatal("unexpected success")
}
}
- for triesLeft := 3; triesLeft >= 0; triesLeft-- {
- open, lsof := numOpenFDS(t)
- fdGrowth := open - fd0
- if fdGrowth > 2 {
- if triesLeft > 0 {
- // Work around what appears to be a race with Linux's
- // proc filesystem (as used by lsof). It seems to only
- // be eventually consistent. Give it awhile to settle.
- // See golang.org/issue/7808
- time.Sleep(100 * time.Millisecond)
- continue
- }
- t.Errorf("leaked %d fds; want ~0; have:\n%s\noriginally:\n%s", fdGrowth, lsof, lsof0)
- }
- break
- }
-}
-
-func numOpenFDS(t *testing.T) (n int, lsof []byte) {
- if runtime.GOOS == "android" {
- // Android's stock lsof does not obey the -p option,
- // so extra filtering is needed. (golang.org/issue/10206)
- return numOpenFDsAndroid(t)
- }
-
- lsof, err := exec.Command("lsof", "-b", "-n", "-p", strconv.Itoa(os.Getpid())).Output()
+ got, after, err := numOpenFDs()
if err != nil {
- t.Skip("skipping test; error finding or running lsof")
+ // numOpenFDs has already succeeded once, it should work here.
+ t.Errorf("unexpected failure: %v", err)
+ }
+ if got-want > tolerance {
+ t.Errorf("number of open file descriptors changed: got %v, want %v", got, want)
+ if before != nil {
+ t.Errorf("before:\n%v\n", before)
+ }
+ if after != nil {
+ t.Errorf("after:\n%v\n", after)
+ }
}
- return bytes.Count(lsof, []byte("\n")), lsof
}
func numOpenFDsAndroid(t *testing.T) (n int, lsof []byte) {
@@ -377,12 +405,16 @@ var testedAlreadyLeaked = false
// basefds returns the number of expected file descriptors
// to be present in a process at start.
+// stdin, stdout, stderr, epoll/kqueue
func basefds() uintptr {
return os.Stderr.Fd() + 1
}
func closeUnexpectedFds(t *testing.T, m string) {
for fd := basefds(); fd <= 101; fd++ {
+ if fd == poll.PollDescriptor() {
+ continue
+ }
err := os.NewFile(fd, "").Close()
if err == nil {
t.Logf("%s: Something already leaked - closed fd %d", m, fd)
@@ -665,6 +697,11 @@ func TestHelperProcess(*testing.T) {
iargs = append(iargs, s)
}
fmt.Println(iargs...)
+ case "echoenv":
+ for _, s := range args {
+ fmt.Println(os.Getenv(s))
+ }
+ os.Exit(0)
case "cat":
if len(args) == 0 {
io.Copy(os.Stdout, os.Stdin)
@@ -740,6 +777,9 @@ func TestHelperProcess(*testing.T) {
// Now verify that there are no other open fds.
var files []*os.File
for wantfd := basefds() + 1; wantfd <= 100; wantfd++ {
+ if wantfd == poll.PollDescriptor() {
+ continue
+ }
f, err := os.Open(os.Args[0])
if err != nil {
fmt.Printf("error opening file with expected fd %d: %v", wantfd, err)
@@ -832,31 +872,50 @@ func TestHelperProcess(*testing.T) {
case "stderrfail":
fmt.Fprintf(os.Stderr, "some stderr text\n")
os.Exit(1)
+ case "sleep":
+ time.Sleep(3 * time.Second)
+ os.Exit(0)
default:
fmt.Fprintf(os.Stderr, "Unknown command %q\n", cmd)
os.Exit(2)
}
}
+type delayedInfiniteReader struct{}
+
+func (delayedInfiniteReader) Read(b []byte) (int, error) {
+ time.Sleep(100 * time.Millisecond)
+ for i := range b {
+ b[i] = 'x'
+ }
+ return len(b), nil
+}
+
// Issue 9173: ignore stdin pipe writes if the program completes successfully.
func TestIgnorePipeErrorOnSuccess(t *testing.T) {
testenv.MustHaveExec(t)
- // We really only care about testing this on Unixy things.
- if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
+ // We really only care about testing this on Unixy and Windowsy things.
+ if runtime.GOOS == "plan9" {
t.Skipf("skipping test on %q", runtime.GOOS)
}
- cmd := helperCommand(t, "echo", "foo")
- var out bytes.Buffer
- cmd.Stdin = strings.NewReader(strings.Repeat("x", 10<<20))
- cmd.Stdout = &out
- if err := cmd.Run(); err != nil {
- t.Fatal(err)
- }
- if got, want := out.String(), "foo\n"; got != want {
- t.Errorf("output = %q; want %q", got, want)
+ testWith := func(r io.Reader) func(*testing.T) {
+ return func(t *testing.T) {
+ cmd := helperCommand(t, "echo", "foo")
+ var out bytes.Buffer
+ cmd.Stdin = r
+ cmd.Stdout = &out
+ if err := cmd.Run(); err != nil {
+ t.Fatal(err)
+ }
+ if got, want := out.String(), "foo\n"; got != want {
+ t.Errorf("output = %q; want %q", got, want)
+ }
+ }
}
+ t.Run("10MB", testWith(strings.NewReader(strings.Repeat("x", 10<<20))))
+ t.Run("Infinite", testWith(delayedInfiniteReader{}))
}
type badWriter struct{}
@@ -1012,3 +1071,18 @@ func TestContextCancel(t *testing.T) {
t.Logf("exit status: %v", err)
}
}
+
+// test that environment variables are de-duped.
+func TestDedupEnvEcho(t *testing.T) {
+ testenv.MustHaveExec(t)
+
+ cmd := helperCommand(t, "echoenv", "FOO")
+ cmd.Env = append(cmd.Env, "FOO=bad", "FOO=good")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got, want := strings.TrimSpace(string(out)), "good"; got != want {
+ t.Errorf("output = %q; want %q", got, want)
+ }
+}
diff --git a/libgo/go/os/exec/exec_unix.go b/libgo/go/os/exec/exec_unix.go
new file mode 100644
index 00000000000..9c3e17d23ab
--- /dev/null
+++ b/libgo/go/os/exec/exec_unix.go
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!windows
+
+package exec
+
+import (
+ "os"
+ "syscall"
+)
+
+func init() {
+ skipStdinCopyError = func(err error) bool {
+ // Ignore EPIPE errors copying to stdin if the program
+ // completed successfully otherwise.
+ // See Issue 9173.
+ pe, ok := err.(*os.PathError)
+ return ok &&
+ pe.Op == "write" && pe.Path == "|1" &&
+ pe.Err == syscall.EPIPE
+ }
+}
diff --git a/libgo/go/os/exec/exec_windows.go b/libgo/go/os/exec/exec_windows.go
new file mode 100644
index 00000000000..af8cd97218e
--- /dev/null
+++ b/libgo/go/os/exec/exec_windows.go
@@ -0,0 +1,23 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package exec
+
+import (
+ "os"
+ "syscall"
+)
+
+func init() {
+ skipStdinCopyError = func(err error) bool {
+ // Ignore ERROR_BROKEN_PIPE and ERROR_NO_DATA errors copying
+ // to stdin if the program completed successfully otherwise.
+ // See Issue 20445.
+ const _ERROR_NO_DATA = syscall.Errno(0xe8)
+ pe, ok := err.(*os.PathError)
+ return ok &&
+ pe.Op == "write" && pe.Path == "|1" &&
+ (pe.Err == syscall.ERROR_BROKEN_PIPE || pe.Err == _ERROR_NO_DATA)
+ }
+}
diff --git a/libgo/go/os/exec_windows.go b/libgo/go/os/exec_windows.go
index d89db2022cc..d5d553a2f6b 100644
--- a/libgo/go/os/exec_windows.go
+++ b/libgo/go/os/exec_windows.go
@@ -97,17 +97,79 @@ func findProcess(pid int) (p *Process, err error) {
}
func init() {
- var argc int32
- cmd := syscall.GetCommandLine()
- argv, e := syscall.CommandLineToArgv(cmd, &argc)
- if e != nil {
- return
+ p := syscall.GetCommandLine()
+ cmd := syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(p))[:])
+ if len(cmd) == 0 {
+ arg0, _ := Executable()
+ Args = []string{arg0}
+ } else {
+ Args = commandLineToArgv(cmd)
}
- defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv))))
- Args = make([]string, argc)
- for i, v := range (*argv)[:argc] {
- Args[i] = syscall.UTF16ToString((*v)[:])
+}
+
+// appendBSBytes appends n '\\' bytes to b and returns the resulting slice.
+func appendBSBytes(b []byte, n int) []byte {
+ for ; n > 0; n-- {
+ b = append(b, '\\')
+ }
+ return b
+}
+
+// readNextArg splits command line string cmd into next
+// argument and command line remainder.
+func readNextArg(cmd string) (arg []byte, rest string) {
+ var b []byte
+ var inquote bool
+ var nslash int
+ for ; len(cmd) > 0; cmd = cmd[1:] {
+ c := cmd[0]
+ switch c {
+ case ' ', '\t':
+ if !inquote {
+ return appendBSBytes(b, nslash), cmd[1:]
+ }
+ case '"':
+ b = appendBSBytes(b, nslash/2)
+ if nslash%2 == 0 {
+ // use "Prior to 2008" rule from
+ // http://daviddeley.com/autohotkey/parameters/parameters.htm
+ // section 5.2 to deal with double double quotes
+ if inquote && len(cmd) > 1 && cmd[1] == '"' {
+ b = append(b, c)
+ cmd = cmd[1:]
+ }
+ inquote = !inquote
+ } else {
+ b = append(b, c)
+ }
+ nslash = 0
+ continue
+ case '\\':
+ nslash++
+ continue
+ }
+ b = appendBSBytes(b, nslash)
+ nslash = 0
+ b = append(b, c)
+ }
+ return appendBSBytes(b, nslash), ""
+}
+
+// commandLineToArgv splits a command line into individual argument
+// strings, following the Windows conventions documented
+// at http://daviddeley.com/autohotkey/parameters/parameters.htm#WINARGV
+func commandLineToArgv(cmd string) []string {
+ var args []string
+ for len(cmd) > 0 {
+ if cmd[0] == ' ' || cmd[0] == '\t' {
+ cmd = cmd[1:]
+ continue
+ }
+ var arg []byte
+ arg, cmd = readNextArg(cmd)
+ args = append(args, string(arg))
}
+ return args
}
func ftToDuration(ft *syscall.Filetime) time.Duration {
diff --git a/libgo/go/os/executable.go b/libgo/go/os/executable.go
index 8c21246f5a8..17eed10bc9f 100644
--- a/libgo/go/os/executable.go
+++ b/libgo/go/os/executable.go
@@ -16,8 +16,7 @@ package os
// The main use case is finding resources located relative to an
// executable.
//
-// Executable is not supported on nacl or OpenBSD (unless procfs is
-// mounted.)
+// Executable is not supported on nacl.
func Executable() (string, error) {
return executable()
}
diff --git a/libgo/go/os/executable_path.go b/libgo/go/os/executable_path.go
index 117320d7949..7b8b83652c8 100644
--- a/libgo/go/os/executable_path.go
+++ b/libgo/go/os/executable_path.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build aix
+// +build aix openbsd
package os
@@ -12,18 +12,19 @@ package os
var initWd, errWd = Getwd()
func executable() (string, error) {
- var err error
var exePath string
if len(Args) == 0 || Args[0] == "" {
return "", ErrNotExist
}
- // Args[0] is an absolute path : this is the executable
if IsPathSeparator(Args[0][0]) {
+ // Args[0] is an absolute path, so it is the executable.
+ // Note that we only need to worry about Unix paths here.
exePath = Args[0]
} else {
for i := 1; i < len(Args[0]); i++ {
- // Args[0] is a relative path : append current directory
if IsPathSeparator(Args[0][i]) {
+ // Args[0] is a relative path: prepend the
+ // initial working directory.
if errWd != nil {
return "", errWd
}
@@ -33,18 +34,15 @@ func executable() (string, error) {
}
}
if exePath != "" {
- err = isExecutable(exePath)
- if err == nil {
- return exePath, nil
+ if err := isExecutable(exePath); err != nil {
+ return "", err
}
- // File does not exist or is not executable,
- // this is an unexpected situation !
- return "", err
+ return exePath, nil
}
- // Search for executable in $PATH
+ // Search for executable in $PATH.
for _, dir := range splitPathList(Getenv("PATH")) {
if len(dir) == 0 {
- continue
+ dir = "."
}
if !IsPathSeparator(dir[0]) {
if errWd != nil {
@@ -53,12 +51,11 @@ func executable() (string, error) {
dir = initWd + string(PathSeparator) + dir
}
exePath = dir + string(PathSeparator) + Args[0]
- err = isExecutable(exePath)
- if err == nil {
+ switch isExecutable(exePath) {
+ case nil:
return exePath, nil
- }
- if err == ErrPermission {
- return "", err
+ case ErrPermission:
+ return "", ErrPermission
}
}
return "", ErrNotExist
@@ -74,15 +71,18 @@ func isExecutable(path string) error {
if !mode.IsRegular() {
return ErrPermission
}
- if (mode & 0111) != 0 {
- return nil
+ if (mode & 0111) == 0 {
+ return ErrPermission
}
- return ErrPermission
+ return nil
}
// splitPathList splits a path list.
// This is based on genSplit from strings/strings.go
func splitPathList(pathList string) []string {
+ if pathList == "" {
+ return nil
+ }
n := 1
for i := 0; i < len(pathList); i++ {
if pathList[i] == PathListSeparator {
diff --git a/libgo/go/os/executable_procfs.go b/libgo/go/os/executable_procfs.go
index 69a70e18df0..b5fae590468 100644
--- a/libgo/go/os/executable_procfs.go
+++ b/libgo/go/os/executable_procfs.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build linux netbsd openbsd dragonfly nacl
+// +build linux netbsd dragonfly nacl
package os
@@ -23,8 +23,6 @@ var executablePath, executablePathErr = func() (string, error) {
procfn = "/proc/self/exe"
case "netbsd":
procfn = "/proc/curproc/exe"
- case "openbsd":
- procfn = "/proc/curproc/file"
case "dragonfly":
procfn = "/proc/curproc/file"
}
diff --git a/libgo/go/os/executable_test.go b/libgo/go/os/executable_test.go
index a4d89092ac6..7800844e420 100644
--- a/libgo/go/os/executable_test.go
+++ b/libgo/go/os/executable_test.go
@@ -20,10 +20,6 @@ func TestExecutable(t *testing.T) {
testenv.MustHaveExec(t) // will also execlude nacl, which doesn't support Executable anyway
ep, err := os.Executable()
if err != nil {
- switch goos := runtime.GOOS; goos {
- case "openbsd": // procfs is not mounted by default
- t.Skipf("Executable failed on %s: %v, expected", goos, err)
- }
t.Fatalf("Executable failed: %v", err)
}
// we want fn to be of the form "dir/prog"
@@ -32,6 +28,7 @@ func TestExecutable(t *testing.T) {
if err != nil {
t.Fatalf("filepath.Rel: %v", err)
}
+
cmd := &osexec.Cmd{}
// make child start with a relative program path
cmd.Dir = dir
@@ -39,6 +36,10 @@ func TestExecutable(t *testing.T) {
// forge argv[0] for child, so that we can verify we could correctly
// get real path of the executable without influenced by argv[0].
cmd.Args = []string{"-", "-test.run=XXXX"}
+ if runtime.GOOS == "openbsd" {
+ // OpenBSD relies on argv[0]
+ cmd.Args[0] = fn
+ }
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", executable_EnvVar))
out, err := cmd.CombinedOutput()
if err != nil {
diff --git a/libgo/go/os/export_windows_test.go b/libgo/go/os/export_windows_test.go
index 3bb2d2015f7..f36fadb58bc 100644
--- a/libgo/go/os/export_windows_test.go
+++ b/libgo/go/os/export_windows_test.go
@@ -7,7 +7,7 @@ package os
// Export for testing.
var (
- FixLongPath = fixLongPath
- NewConsoleFile = newConsoleFile
- ReadConsoleFunc = &readConsole
+ FixLongPath = fixLongPath
+ NewConsoleFile = newConsoleFile
+ CommandLineToArgv = commandLineToArgv
)
diff --git a/libgo/go/os/file.go b/libgo/go/os/file.go
index d45a00b1239..4b4d8fb0367 100644
--- a/libgo/go/os/file.go
+++ b/libgo/go/os/file.go
@@ -37,6 +37,8 @@
package os
import (
+ "errors"
+ "internal/poll"
"io"
"syscall"
)
@@ -99,13 +101,7 @@ func (f *File) Read(b []byte) (n int, err error) {
return 0, err
}
n, e := f.read(b)
- if n == 0 && len(b) > 0 && e == nil {
- return 0, io.EOF
- }
- if e != nil {
- err = &PathError{"read", f.name, e}
- }
- return n, err
+ return n, f.wrapErr("read", e)
}
// ReadAt reads len(b) bytes from the File starting at byte offset off.
@@ -116,13 +112,15 @@ func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
if err := f.checkValid("read"); err != nil {
return 0, err
}
+
+ if off < 0 {
+ return 0, &PathError{"readat", f.name, errors.New("negative offset")}
+ }
+
for len(b) > 0 {
m, e := f.pread(b, off)
- if m == 0 && e == nil {
- return n, io.EOF
- }
if e != nil {
- err = &PathError{"read", f.name, e}
+ err = f.wrapErr("read", e)
break
}
n += m
@@ -150,8 +148,9 @@ func (f *File) Write(b []byte) (n int, err error) {
epipecheck(f, e)
if e != nil {
- err = &PathError{"write", f.name, e}
+ err = f.wrapErr("write", e)
}
+
return n, err
}
@@ -162,10 +161,15 @@ func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
if err := f.checkValid("write"); err != nil {
return 0, err
}
+
+ if off < 0 {
+ return 0, &PathError{"writeat", f.name, errors.New("negative offset")}
+ }
+
for len(b) > 0 {
m, e := f.pwrite(b, off)
if e != nil {
- err = &PathError{"write", f.name, e}
+ err = f.wrapErr("write", e)
break
}
n += m
@@ -189,7 +193,7 @@ func (f *File) Seek(offset int64, whence int) (ret int64, err error) {
e = syscall.EISDIR
}
if e != nil {
- return 0, &PathError{"seek", f.name, e}
+ return 0, f.wrapErr("seek", e)
}
return r, nil
}
@@ -226,19 +230,6 @@ func Chdir(dir string) error {
return nil
}
-// Chdir changes the current working directory to the file,
-// which must be a directory.
-// If there is an error, it will be of type *PathError.
-func (f *File) Chdir() error {
- if err := f.checkValid("chdir"); err != nil {
- return err
- }
- if e := syscall.Fchdir(f.fd); e != nil {
- return &PathError{"chdir", f.name, e}
- }
- return nil
-}
-
// Open opens the named file for reading. If successful, methods on
// the returned file can be used for reading; the associated file
// descriptor has mode O_RDONLY.
@@ -276,14 +267,52 @@ func fixCount(n int, err error) (int, error) {
return n, err
}
-// checkValid checks whether f is valid for use.
-// If not, it returns an appropriate error, perhaps incorporating the operation name op.
-func (f *File) checkValid(op string) error {
- if f == nil {
- return ErrInvalid
+// wrapErr wraps an error that occurred during an operation on an open file.
+// It passes io.EOF through unchanged, otherwise converts
+// poll.ErrFileClosing to ErrClosed and wraps the error in a PathError.
+func (f *File) wrapErr(op string, err error) error {
+ if err == nil || err == io.EOF {
+ return err
}
- if f.fd == badFd {
- return &PathError{op, f.name, ErrClosed}
+ if err == poll.ErrFileClosing {
+ err = ErrClosed
}
- return nil
+ return &PathError{op, f.name, err}
+}
+
+// TempDir returns the default directory to use for temporary files.
+//
+// On Unix systems, it returns $TMPDIR if non-empty, else /tmp.
+// On Windows, it uses GetTempPath, returning the first non-empty
+// value from %TMP%, %TEMP%, %USERPROFILE%, or the Windows directory.
+// On Plan 9, it returns /tmp.
+//
+// The directory is neither guaranteed to exist nor have accessible
+// permissions.
+func TempDir() string {
+ return tempDir()
}
+
+// Chmod changes the mode of the named file to mode.
+// If the file is a symbolic link, it changes the mode of the link's target.
+// If there is an error, it will be of type *PathError.
+//
+// A different subset of the mode bits are used, depending on the
+// operating system.
+//
+// On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and
+// ModeSticky are used.
+//
+// On Windows, the mode must be non-zero but otherwise only the 0200
+// bit (owner writable) of mode is used; it controls whether the
+// file's read-only attribute is set or cleared. attribute. The other
+// bits are currently unused. Use mode 0400 for a read-only file and
+// 0600 for a readable+writable file.
+//
+// On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive,
+// and ModeTemporary are used.
+func Chmod(name string, mode FileMode) error { return chmod(name, mode) }
+
+// Chmod changes the mode of the file to mode.
+// If there is an error, it will be of type *PathError.
+func (f *File) Chmod(mode FileMode) error { return f.chmod(mode) }
diff --git a/libgo/go/os/file_plan9.go b/libgo/go/os/file_plan9.go
index 5276a7ec541..0f4a736c269 100644
--- a/libgo/go/os/file_plan9.go
+++ b/libgo/go/os/file_plan9.go
@@ -35,7 +35,9 @@ func (f *File) Fd() uintptr {
return uintptr(f.fd)
}
-// NewFile returns a new File with the given file descriptor and name.
+// NewFile returns a new File with the given file descriptor and
+// name. The returned value will be nil if fd is not a valid file
+// descriptor.
func NewFile(fd uintptr, name string) *File {
fdi := int(fd)
if fdi < 0 {
@@ -194,9 +196,7 @@ func (f *File) Truncate(size int64) error {
const chmodMask = uint32(syscall.DMAPPEND | syscall.DMEXCL | syscall.DMTMP | ModePerm)
-// Chmod changes the mode of the file to mode.
-// If there is an error, it will be of type *PathError.
-func (f *File) Chmod(mode FileMode) error {
+func (f *File) chmod(mode FileMode) error {
if f == nil {
return ErrInvalid
}
@@ -244,14 +244,22 @@ func (f *File) Sync() error {
// read reads up to len(b) bytes from the File.
// It returns the number of bytes read and an error, if any.
func (f *File) read(b []byte) (n int, err error) {
- return fixCount(syscall.Read(f.fd, b))
+ n, e := fixCount(syscall.Read(f.fd, b))
+ if n == 0 && len(b) > 0 && e == nil {
+ return 0, io.EOF
+ }
+ return n, e
}
// pread reads len(b) bytes from the File starting at byte offset off.
// It returns the number of bytes read and the error, if any.
// EOF is signaled by a zero count with err set to nil.
func (f *File) pread(b []byte, off int64) (n int, err error) {
- return fixCount(syscall.Pread(f.fd, b, off))
+ n, e := fixCount(syscall.Pread(f.fd, b, off))
+ if n == 0 && len(b) > 0 && e == nil {
+ return 0, io.EOF
+ }
+ return n, e
}
// write writes len(b) bytes to the File.
@@ -365,10 +373,8 @@ func rename(oldname, newname string) error {
return nil
}
-// Chmod changes the mode of the named file to mode.
-// If the file is a symbolic link, it changes the mode of the link's target.
-// If there is an error, it will be of type *PathError.
-func Chmod(name string, mode FileMode) error {
+// See docs in file.go:Chmod.
+func chmod(name string, mode FileMode) error {
var d syscall.Dir
odir, e := dirstat(name)
@@ -468,7 +474,31 @@ func (f *File) Chown(uid, gid int) error {
return &PathError{"chown", f.name, syscall.EPLAN9}
}
-// TempDir returns the default directory to use for temporary files.
-func TempDir() string {
+func tempDir() string {
return "/tmp"
}
+
+// Chdir changes the current working directory to the file,
+// which must be a directory.
+// If there is an error, it will be of type *PathError.
+func (f *File) Chdir() error {
+ if err := f.checkValid("chdir"); err != nil {
+ return err
+ }
+ if e := syscall.Fchdir(f.fd); e != nil {
+ return &PathError{"chdir", f.name, e}
+ }
+ return nil
+}
+
+// checkValid checks whether f is valid for use.
+// If not, it returns an appropriate error, perhaps incorporating the operation name op.
+func (f *File) checkValid(op string) error {
+ if f == nil {
+ return ErrInvalid
+ }
+ if f.fd == badFd {
+ return &PathError{op, f.name, ErrClosed}
+ }
+ return nil
+}
diff --git a/libgo/go/os/file_posix.go b/libgo/go/os/file_posix.go
index 6634112efce..51cae9de6de 100644
--- a/libgo/go/os/file_posix.go
+++ b/libgo/go/os/file_posix.go
@@ -48,24 +48,21 @@ func syscallMode(i FileMode) (o uint32) {
return
}
-// Chmod changes the mode of the named file to mode.
-// If the file is a symbolic link, it changes the mode of the link's target.
-// If there is an error, it will be of type *PathError.
-func Chmod(name string, mode FileMode) error {
- if e := syscall.Chmod(name, syscallMode(mode)); e != nil {
+// See docs in file.go:Chmod.
+func chmod(name string, mode FileMode) error {
+ if e := syscall.Chmod(fixLongPath(name), syscallMode(mode)); e != nil {
return &PathError{"chmod", name, e}
}
return nil
}
-// Chmod changes the mode of the file to mode.
-// If there is an error, it will be of type *PathError.
-func (f *File) Chmod(mode FileMode) error {
+// See docs in file.go:(*File).Chmod.
+func (f *File) chmod(mode FileMode) error {
if err := f.checkValid("chmod"); err != nil {
return err
}
- if e := syscall.Fchmod(f.fd, syscallMode(mode)); e != nil {
- return &PathError{"chmod", f.name, e}
+ if e := f.pfd.Fchmod(syscallMode(mode)); e != nil {
+ return f.wrapErr("chmod", e)
}
return nil
}
@@ -73,6 +70,9 @@ func (f *File) Chmod(mode FileMode) error {
// Chown changes the numeric uid and gid of the named file.
// If the file is a symbolic link, it changes the uid and gid of the link's target.
// If there is an error, it will be of type *PathError.
+//
+// On Windows, it always returns the syscall.EWINDOWS error, wrapped
+// in *PathError.
func Chown(name string, uid, gid int) error {
if e := syscall.Chown(name, uid, gid); e != nil {
return &PathError{"chown", name, e}
@@ -83,6 +83,9 @@ func Chown(name string, uid, gid int) error {
// Lchown changes the numeric uid and gid of the named file.
// If the file is a symbolic link, it changes the uid and gid of the link itself.
// If there is an error, it will be of type *PathError.
+//
+// On Windows, it always returns the syscall.EWINDOWS error, wrapped
+// in *PathError.
func Lchown(name string, uid, gid int) error {
if e := syscall.Lchown(name, uid, gid); e != nil {
return &PathError{"lchown", name, e}
@@ -92,12 +95,15 @@ func Lchown(name string, uid, gid int) error {
// Chown changes the numeric uid and gid of the named file.
// If there is an error, it will be of type *PathError.
+//
+// On Windows, it always returns the syscall.EWINDOWS error, wrapped
+// in *PathError.
func (f *File) Chown(uid, gid int) error {
if err := f.checkValid("chown"); err != nil {
return err
}
- if e := syscall.Fchown(f.fd, uid, gid); e != nil {
- return &PathError{"chown", f.name, e}
+ if e := f.pfd.Fchown(uid, gid); e != nil {
+ return f.wrapErr("chown", e)
}
return nil
}
@@ -109,8 +115,8 @@ func (f *File) Truncate(size int64) error {
if err := f.checkValid("truncate"); err != nil {
return err
}
- if e := syscall.Ftruncate(f.fd, size); e != nil {
- return &PathError{"truncate", f.name, e}
+ if e := f.pfd.Ftruncate(size); e != nil {
+ return f.wrapErr("truncate", e)
}
return nil
}
@@ -122,8 +128,8 @@ func (f *File) Sync() error {
if err := f.checkValid("sync"); err != nil {
return err
}
- if e := syscall.Fsync(f.fd); e != nil {
- return &PathError{"sync", f.name, e}
+ if e := f.pfd.Fsync(); e != nil {
+ return f.wrapErr("sync", e)
}
return nil
}
@@ -143,3 +149,25 @@ func Chtimes(name string, atime time.Time, mtime time.Time) error {
}
return nil
}
+
+// Chdir changes the current working directory to the file,
+// which must be a directory.
+// If there is an error, it will be of type *PathError.
+func (f *File) Chdir() error {
+ if err := f.checkValid("chdir"); err != nil {
+ return err
+ }
+ if e := f.pfd.Fchdir(); e != nil {
+ return f.wrapErr("chdir", e)
+ }
+ return nil
+}
+
+// checkValid checks whether f is valid for use.
+// If not, it returns an appropriate error, perhaps incorporating the operation name op.
+func (f *File) checkValid(op string) error {
+ if f == nil {
+ return ErrInvalid
+ }
+ return nil
+}
diff --git a/libgo/go/os/file_unix.go b/libgo/go/os/file_unix.go
index 1bba4ed9d63..819999409a9 100644
--- a/libgo/go/os/file_unix.go
+++ b/libgo/go/os/file_unix.go
@@ -7,6 +7,7 @@
package os
import (
+ "internal/poll"
"runtime"
"syscall"
)
@@ -19,11 +20,22 @@ func fixLongPath(path string) string {
func rename(oldname, newname string) error {
fi, err := Lstat(newname)
if err == nil && fi.IsDir() {
+ // There are two independent errors this function can return:
+ // one for a bad oldname, and one for a bad newname.
+ // At this point we've determined the newname is bad.
+ // But just in case oldname is also bad, prioritize returning
+ // the oldname error because that's what we did historically.
+ if _, err := Lstat(oldname); err != nil {
+ if pe, ok := err.(*PathError); ok {
+ err = pe.Err
+ }
+ return &LinkError{"rename", oldname, newname, err}
+ }
return &LinkError{"rename", oldname, newname, syscall.EEXIST}
}
- e := syscall.Rename(oldname, newname)
- if e != nil {
- return &LinkError{"rename", oldname, newname, e}
+ err = syscall.Rename(oldname, newname)
+ if err != nil {
+ return &LinkError{"rename", oldname, newname, err}
}
return nil
}
@@ -33,9 +45,10 @@ func rename(oldname, newname string) error {
// can overwrite this data, which could cause the finalizer
// to close the wrong file descriptor.
type file struct {
- fd int
- name string
- dirinfo *dirInfo // nil unless directory being read
+ pfd poll.FD
+ name string
+ dirinfo *dirInfo // nil unless directory being read
+ nonblock bool // whether we set nonblocking mode
}
// Fd returns the integer Unix file descriptor referencing the open file.
@@ -44,16 +57,64 @@ func (f *File) Fd() uintptr {
if f == nil {
return ^(uintptr(0))
}
- return uintptr(f.fd)
+
+ // If we put the file descriptor into nonblocking mode,
+ // then set it to blocking mode before we return it,
+ // because historically we have always returned a descriptor
+ // opened in blocking mode. The File will continue to work,
+ // but any blocking operation will tie up a thread.
+ if f.nonblock {
+ syscall.SetNonblock(f.pfd.Sysfd, false)
+ }
+
+ return uintptr(f.pfd.Sysfd)
}
-// NewFile returns a new File with the given file descriptor and name.
+// NewFile returns a new File with the given file descriptor and
+// name. The returned value will be nil if fd is not a valid file
+// descriptor.
func NewFile(fd uintptr, name string) *File {
+ return newFile(fd, name, false)
+}
+
+// newFile is like NewFile, but if pollable is true it tries to add the
+// file to the runtime poller.
+func newFile(fd uintptr, name string, pollable bool) *File {
fdi := int(fd)
if fdi < 0 {
return nil
}
- f := &File{&file{fd: fdi, name: name}}
+ f := &File{&file{
+ pfd: poll.FD{
+ Sysfd: fdi,
+ IsStream: true,
+ ZeroReadIsEOF: true,
+ },
+ name: name,
+ }}
+
+ // Don't try to use kqueue with regular files on FreeBSD.
+ // It crashes the system unpredictably while running all.bash.
+ // Issue 19093.
+ if runtime.GOOS == "freebsd" {
+ pollable = false
+ }
+
+ if err := f.pfd.Init("file", pollable); err != nil {
+ // An error here indicates a failure to register
+ // with the netpoll system. That can happen for
+ // a file descriptor that is not supported by
+ // epoll/kqueue; for example, disk files on
+ // GNU/Linux systems. We assume that any real error
+ // will show up in later I/O.
+ } else if pollable {
+ // We successfully registered with netpoll, so put
+ // the file into nonblocking mode.
+ if err := syscall.SetNonblock(fdi, true); err == nil {
+ f.nonblock = true
+ }
+ }
+
runtime.SetFinalizer(f.file, (*file).close)
return f
}
@@ -68,7 +129,7 @@ type dirInfo struct {
// output or standard error. See the SIGPIPE docs in os/signal, and
// issue 11845.
func epipecheck(file *File, e error) {
- if e == syscall.EPIPE && (file.fd == 1 || file.fd == 2) {
+ if e == syscall.EPIPE && (file.pfd.Sysfd == 1 || file.pfd.Sysfd == 2) {
sigpipe()
}
}
@@ -119,7 +180,7 @@ func OpenFile(name string, flag int, perm FileMode) (*File, error) {
syscall.CloseOnExec(r)
}
- return NewFile(uintptr(r), name), nil
+ return newFile(uintptr(r), name, true), nil
}
// Close closes the File, rendering it unusable for I/O.
@@ -132,11 +193,14 @@ func (f *File) Close() error {
}
func (file *file) close() error {
- if file == nil || file.fd == badFd {
+ if file == nil {
return syscall.EINVAL
}
var err error
- if e := syscall.Close(file.fd); e != nil {
+ if e := file.pfd.Close(); e != nil {
+ if e == poll.ErrFileClosing {
+ e = ErrClosed
+ }
err = &PathError{"close", file.name, e}
}
@@ -151,76 +215,42 @@ func (file *file) close() error {
}
}
- file.fd = -1 // so it can't be closed again
-
// no need for a finalizer anymore
runtime.SetFinalizer(file, nil)
return err
}
-// Darwin and FreeBSD can't read or write 2GB+ at a time,
-// even on 64-bit systems. See golang.org/issue/7812.
-// Use 1GB instead of, say, 2GB-1, to keep subsequent
-// reads aligned.
-const (
- needsMaxRW = runtime.GOOS == "darwin" || runtime.GOOS == "freebsd"
- maxRW = 1 << 30
-)
-
// read reads up to len(b) bytes from the File.
// It returns the number of bytes read and an error, if any.
func (f *File) read(b []byte) (n int, err error) {
- if needsMaxRW && len(b) > maxRW {
- b = b[:maxRW]
- }
- return fixCount(syscall.Read(f.fd, b))
+ n, err = f.pfd.Read(b)
+ runtime.KeepAlive(f)
+ return n, err
}
// pread reads len(b) bytes from the File starting at byte offset off.
// It returns the number of bytes read and the error, if any.
// EOF is signaled by a zero count with err set to nil.
func (f *File) pread(b []byte, off int64) (n int, err error) {
- if needsMaxRW && len(b) > maxRW {
- b = b[:maxRW]
- }
- return fixCount(syscall.Pread(f.fd, b, off))
+ n, err = f.pfd.Pread(b, off)
+ runtime.KeepAlive(f)
+ return n, err
}
// write writes len(b) bytes to the File.
// It returns the number of bytes written and an error, if any.
func (f *File) write(b []byte) (n int, err error) {
- for {
- bcap := b
- if needsMaxRW && len(bcap) > maxRW {
- bcap = bcap[:maxRW]
- }
- m, err := fixCount(syscall.Write(f.fd, bcap))
- n += m
-
- // If the syscall wrote some data but not all (short write)
- // or it returned EINTR, then assume it stopped early for
- // reasons that are uninteresting to the caller, and try again.
- if 0 < m && m < len(bcap) || err == syscall.EINTR {
- b = b[m:]
- continue
- }
-
- if needsMaxRW && len(bcap) != len(b) && err == nil {
- b = b[m:]
- continue
- }
-
- return n, err
- }
+ n, err = f.pfd.Write(b)
+ runtime.KeepAlive(f)
+ return n, err
}
// pwrite writes len(b) bytes to the File starting at byte offset off.
// It returns the number of bytes written and an error, if any.
func (f *File) pwrite(b []byte, off int64) (n int, err error) {
- if needsMaxRW && len(b) > maxRW {
- b = b[:maxRW]
- }
- return fixCount(syscall.Pwrite(f.fd, b, off))
+ n, err = f.pfd.Pwrite(b, off)
+ runtime.KeepAlive(f)
+ return n, err
}
// seek sets the offset for the next Read or Write on file to offset, interpreted
@@ -228,7 +258,9 @@ func (f *File) pwrite(b []byte, off int64) (n int, err error) {
// relative to the current offset, and 2 means relative to the end.
// It returns the new offset and an error, if any.
func (f *File) seek(offset int64, whence int) (ret int64, err error) {
- return syscall.Seek(f.fd, offset, whence)
+ ret, err = f.pfd.Seek(offset, whence)
+ runtime.KeepAlive(f)
+ return ret, err
}
// Truncate changes the size of the named file.
@@ -272,8 +304,7 @@ func Remove(name string) error {
return &PathError{"remove", name, e}
}
-// TempDir returns the default directory to use for temporary files.
-func TempDir() string {
+func tempDir() string {
dir := Getenv("TMPDIR")
if dir == "" {
if runtime.GOOS == "android" {
diff --git a/libgo/go/os/os_test.go b/libgo/go/os/os_test.go
index dcc8d762bf8..0f1617ad5d0 100644
--- a/libgo/go/os/os_test.go
+++ b/libgo/go/os/os_test.go
@@ -17,6 +17,7 @@ import (
"path/filepath"
"reflect"
"runtime"
+ "runtime/debug"
"sort"
"strings"
"sync"
@@ -52,15 +53,12 @@ var sysdir = func() *sysDir {
case "darwin":
switch runtime.GOARCH {
case "arm", "arm64":
- /// At this point the test harness has not had a chance
- // to move us into the ./src/os directory, so the
- // current working directory is the root of the app.
wd, err := syscall.Getwd()
if err != nil {
wd = err.Error()
}
return &sysDir{
- wd,
+ filepath.Join(wd, "..", ".."),
[]string{
"ResourceRules.plist",
"Info.plist",
@@ -110,7 +108,7 @@ func size(name string, t *testing.T) int64 {
break
}
if e != nil {
- t.Fatal("read failed:", err)
+ t.Fatal("read failed:", e)
}
}
return int64(len)
@@ -174,6 +172,45 @@ func TestStat(t *testing.T) {
}
}
+func TestStatError(t *testing.T) {
+ defer chtmpdir(t)()
+
+ path := "no-such-file"
+ Remove(path) // Just in case
+
+ fi, err := Stat(path)
+ if err == nil {
+ t.Fatal("got nil, want error")
+ }
+ if fi != nil {
+ t.Errorf("got %v, want nil", fi)
+ }
+ if perr, ok := err.(*PathError); !ok {
+ t.Errorf("got %T, want %T", err, perr)
+ }
+
+ testenv.MustHaveSymlink(t)
+
+ link := "symlink"
+ Remove(link) // Just in case
+ err = Symlink(path, link)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer Remove(link)
+
+ fi, err = Stat(link)
+ if err == nil {
+ t.Fatal("got nil, want error")
+ }
+ if fi != nil {
+ t.Errorf("got %v, want nil", fi)
+ }
+ if perr, ok := err.(*PathError); !ok {
+ t.Errorf("got %T, want %T", err, perr)
+ }
+}
+
func TestFstat(t *testing.T) {
path := sfdir + "/" + sfname
file, err1 := Open(path)
@@ -359,6 +396,50 @@ func BenchmarkReaddir(b *testing.B) {
benchmarkReaddir(".", b)
}
+func benchmarkStat(b *testing.B, path string) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := Stat(path)
+ if err != nil {
+ b.Fatalf("Stat(%q) failed: %v", path, err)
+ }
+ }
+}
+
+func benchmarkLstat(b *testing.B, path string) {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := Lstat(path)
+ if err != nil {
+ b.Fatalf("Lstat(%q) failed: %v", path, err)
+ }
+ }
+}
+
+func BenchmarkStatDot(b *testing.B) {
+ benchmarkStat(b, ".")
+}
+
+func BenchmarkStatFile(b *testing.B) {
+ benchmarkStat(b, filepath.Join(runtime.GOROOT(), "src/os/os_test.go"))
+}
+
+func BenchmarkStatDir(b *testing.B) {
+ benchmarkStat(b, filepath.Join(runtime.GOROOT(), "src/os"))
+}
+
+func BenchmarkLstatDot(b *testing.B) {
+ benchmarkLstat(b, ".")
+}
+
+func BenchmarkLstatFile(b *testing.B) {
+ benchmarkLstat(b, filepath.Join(runtime.GOROOT(), "src/os/os_test.go"))
+}
+
+func BenchmarkLstatDir(b *testing.B) {
+ benchmarkLstat(b, filepath.Join(runtime.GOROOT(), "src/os"))
+}
+
// Read the directory one entry at a time.
func smallReaddirnames(file *File, length int, t *testing.T) []string {
names := make([]string, length)
@@ -673,55 +754,58 @@ func TestSymlink(t *testing.T) {
Remove(from) // Just in case.
file, err := Create(to)
if err != nil {
- t.Fatalf("open %q failed: %v", to, err)
+ t.Fatalf("Create(%q) failed: %v", to, err)
}
defer Remove(to)
if err = file.Close(); err != nil {
- t.Errorf("close %q failed: %v", to, err)
+ t.Errorf("Close(%q) failed: %v", to, err)
}
err = Symlink(to, from)
if err != nil {
- t.Fatalf("symlink %q, %q failed: %v", to, from, err)
+ t.Fatalf("Symlink(%q, %q) failed: %v", to, from, err)
}
defer Remove(from)
tostat, err := Lstat(to)
if err != nil {
- t.Fatalf("stat %q failed: %v", to, err)
+ t.Fatalf("Lstat(%q) failed: %v", to, err)
}
if tostat.Mode()&ModeSymlink != 0 {
- t.Fatalf("stat %q claims to have found a symlink", to)
+ t.Fatalf("Lstat(%q).Mode()&ModeSymlink = %v, want 0", to, tostat.Mode()&ModeSymlink)
}
fromstat, err := Stat(from)
if err != nil {
- t.Fatalf("stat %q failed: %v", from, err)
+ t.Fatalf("Stat(%q) failed: %v", from, err)
}
if !SameFile(tostat, fromstat) {
- t.Errorf("symlink %q, %q did not create symlink", to, from)
+ t.Errorf("Symlink(%q, %q) did not create symlink", to, from)
}
fromstat, err = Lstat(from)
if err != nil {
- t.Fatalf("lstat %q failed: %v", from, err)
+ t.Fatalf("Lstat(%q) failed: %v", from, err)
}
if fromstat.Mode()&ModeSymlink == 0 {
- t.Fatalf("symlink %q, %q did not create symlink", to, from)
+ t.Fatalf("Lstat(%q).Mode()&ModeSymlink = 0, want %v", from, ModeSymlink)
}
fromstat, err = Stat(from)
if err != nil {
- t.Fatalf("stat %q failed: %v", from, err)
+ t.Fatalf("Stat(%q) failed: %v", from, err)
+ }
+ if fromstat.Name() != from {
+ t.Errorf("Stat(%q).Name() = %q, want %q", from, fromstat.Name(), from)
}
if fromstat.Mode()&ModeSymlink != 0 {
- t.Fatalf("stat %q did not follow symlink", from)
+ t.Fatalf("Stat(%q).Mode()&ModeSymlink = %v, want 0", from, fromstat.Mode()&ModeSymlink)
}
s, err := Readlink(from)
if err != nil {
- t.Fatalf("readlink %q failed: %v", from, err)
+ t.Fatalf("Readlink(%q) failed: %v", from, err)
}
if s != to {
- t.Fatalf("after symlink %q != %q", s, to)
+ t.Fatalf("Readlink(%q) = %q, want %q", from, s, to)
}
file, err = Open(from)
if err != nil {
- t.Fatalf("open %q failed: %v", from, err)
+ t.Fatalf("Open(%q) failed: %v", from, err)
}
file.Close()
}
@@ -844,6 +928,18 @@ func TestRenameFailed(t *testing.T) {
}
}
+func TestRenameNotExisting(t *testing.T) {
+ defer chtmpdir(t)()
+ from, to := "doesnt-exist", "dest"
+
+ Mkdir(to, 0777)
+ defer Remove(to)
+
+ if err := Rename(from, to); !IsNotExist(err) {
+ t.Errorf("Rename(%q, %q) = %v; want an IsNotExist error", from, to, err)
+ }
+}
+
func TestRenameToDirFailed(t *testing.T) {
defer chtmpdir(t)()
from, to := "renamefrom", "renameto"
@@ -1054,14 +1150,22 @@ func testChtimes(t *testing.T, name string) {
}
postStat := st
- /* Plan 9, NaCl:
- Mtime is the time of the last change of content. Similarly, atime is set whenever the
- contents are accessed; also, it is set whenever mtime is set.
- */
pat := Atime(postStat)
pmt := postStat.ModTime()
- if !pat.Before(at) && runtime.GOOS != "plan9" && runtime.GOOS != "nacl" {
- t.Errorf("AccessTime didn't go backwards; was=%d, after=%d", at, pat)
+ if !pat.Before(at) {
+ switch runtime.GOOS {
+ case "plan9", "nacl":
+ // Ignore.
+ // Plan 9, NaCl:
+ // Mtime is the time of the last change of
+ // content. Similarly, atime is set whenever
+ // the contents are accessed; also, it is set
+ // whenever mtime is set.
+ case "netbsd":
+ t.Logf("AccessTime didn't go backwards; was=%d, after=%d (Ignoring. See NetBSD issue golang.org/issue/19293)", at, pat)
+ default:
+ t.Errorf("AccessTime didn't go backwards; was=%d, after=%d", at, pat)
+ }
}
if !pmt.Before(mt) {
@@ -1239,6 +1343,32 @@ func TestSeek(t *testing.T) {
}
}
+func TestSeekError(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9", "nacl":
+ t.Skipf("skipping test on %v", runtime.GOOS)
+ }
+
+ r, w, err := Pipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = r.Seek(0, 0)
+ if err == nil {
+ t.Fatal("Seek on pipe should fail")
+ }
+ if perr, ok := err.(*PathError); !ok || perr.Err != syscall.ESPIPE {
+ t.Errorf("Seek returned error %v, want &PathError{Err: syscall.ESPIPE}", err)
+ }
+ _, err = w.Seek(0, 0)
+ if err == nil {
+ t.Fatal("Seek on pipe should fail")
+ }
+ if perr, ok := err.(*PathError); !ok || perr.Err != syscall.ESPIPE {
+ t.Errorf("Seek returned error %v, want &PathError{Err: syscall.ESPIPE}", err)
+ }
+}
+
type openErrorTest struct {
path string
mode int
@@ -1443,6 +1573,26 @@ func TestReadAtOffset(t *testing.T) {
}
}
+// Verify that ReadAt doesn't allow negative offset.
+func TestReadAtNegativeOffset(t *testing.T) {
+ f := newFile("TestReadAtNegativeOffset", t)
+ defer Remove(f.Name())
+ defer f.Close()
+
+ const data = "hello, world\n"
+ io.WriteString(f, data)
+
+ f.Seek(0, 0)
+ b := make([]byte, 5)
+
+ n, err := f.ReadAt(b, -10)
+
+ const wantsub = "negative offset"
+ if !strings.Contains(fmt.Sprint(err), wantsub) || n != 0 {
+ t.Errorf("ReadAt(-10) = %v, %v; want 0, ...%q...", n, err, wantsub)
+ }
+}
+
func TestWriteAt(t *testing.T) {
f := newFile("TestWriteAt", t)
defer Remove(f.Name())
@@ -1465,6 +1615,20 @@ func TestWriteAt(t *testing.T) {
}
}
+// Verify that WriteAt doesn't allow negative offset.
+func TestWriteAtNegativeOffset(t *testing.T) {
+ f := newFile("TestWriteAtNegativeOffset", t)
+ defer Remove(f.Name())
+ defer f.Close()
+
+ n, err := f.WriteAt([]byte("WORLD"), -10)
+
+ const wantsub = "negative offset"
+ if !strings.Contains(fmt.Sprint(err), wantsub) || n != 0 {
+ t.Errorf("WriteAt(-10) = %v, %v; want 0, ...%q...", n, err, wantsub)
+ }
+}
+
func writeFile(t *testing.T, fname string, flag int, text string) string {
f, err := OpenFile(fname, flag, 0666)
if err != nil {
@@ -1667,6 +1831,17 @@ func TestStatStdin(t *testing.T) {
Exit(0)
}
+ fi, err := Stdin.Stat()
+ if err != nil {
+ t.Fatal(err)
+ }
+ switch mode := fi.Mode(); {
+ case mode&ModeCharDevice != 0:
+ case mode&ModeNamedPipe != 0:
+ default:
+ t.Fatalf("unexpected Stdin mode (%v), want ModeCharDevice or ModeNamedPipe", mode)
+ }
+
var cmd *osexec.Cmd
if runtime.GOOS == "windows" {
cmd = osexec.Command("cmd", "/c", "echo output | "+Args[0]+" -test.run=TestStatStdin")
@@ -1686,6 +1861,60 @@ func TestStatStdin(t *testing.T) {
}
}
+func TestStatRelativeSymlink(t *testing.T) {
+ testenv.MustHaveSymlink(t)
+
+ tmpdir, err := ioutil.TempDir("", "TestStatRelativeSymlink")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer RemoveAll(tmpdir)
+
+ target := filepath.Join(tmpdir, "target")
+ f, err := Create(target)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ st, err := f.Stat()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ link := filepath.Join(tmpdir, "link")
+ err = Symlink(filepath.Base(target), link)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ st1, err := Stat(link)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !SameFile(st, st1) {
+ t.Error("Stat doesn't follow relative symlink")
+ }
+
+ if runtime.GOOS == "windows" {
+ Remove(link)
+ err = Symlink(target[len(filepath.VolumeName(target)):], link)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ st1, err := Stat(link)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !SameFile(st, st1) {
+ t.Error("Stat doesn't follow relative symlink")
+ }
+ }
+}
+
func TestReadAtEOF(t *testing.T) {
f := newFile("TestReadAtEOF", t)
defer Remove(f.Name())
@@ -1759,6 +1988,10 @@ func TestLongPath(t *testing.T) {
if dir.Size() != filesize || filesize != wantSize {
t.Errorf("Size(%q) is %d, len(ReadFile()) is %d, want %d", path, dir.Size(), filesize, wantSize)
}
+ err = Chmod(path, dir.Mode())
+ if err != nil {
+ t.Fatalf("Chmod(%q) failed: %v", path, err)
+ }
}
if err := Truncate(sizedTempDir+"/bar.txt", 0); err != nil {
t.Fatalf("Truncate failed: %v", err)
@@ -1927,3 +2160,99 @@ func TestRemoveAllRace(t *testing.T) {
close(hold) // let workers race to remove root
wg.Wait()
}
+
+// Test that reading from a pipe doesn't use up a thread.
+func TestPipeThreads(t *testing.T) {
+ switch runtime.GOOS {
+ case "freebsd":
+ t.Skip("skipping on FreeBSD; issue 19093")
+ case "solaris":
+ t.Skip("skipping on Solaris; issue 19111")
+ case "windows":
+ t.Skip("skipping on Windows; issue 19098")
+ case "plan9":
+ t.Skip("skipping on Plan 9; does not support runtime poller")
+ }
+
+ threads := 100
+
+ // OpenBSD has a low default for max number of files.
+ if runtime.GOOS == "openbsd" {
+ threads = 50
+ }
+
+ r := make([]*File, threads)
+ w := make([]*File, threads)
+ for i := 0; i < threads; i++ {
+ rp, wp, err := Pipe()
+ if err != nil {
+ for j := 0; j < i; j++ {
+ r[j].Close()
+ w[j].Close()
+ }
+ t.Fatal(err)
+ }
+ r[i] = rp
+ w[i] = wp
+ }
+
+ defer debug.SetMaxThreads(debug.SetMaxThreads(threads / 2))
+
+ var wg sync.WaitGroup
+ wg.Add(threads)
+ c := make(chan bool, threads)
+ for i := 0; i < threads; i++ {
+ go func(i int) {
+ defer wg.Done()
+ var b [1]byte
+ c <- true
+ if _, err := r[i].Read(b[:]); err != nil {
+ t.Error(err)
+ }
+ }(i)
+ }
+
+ for i := 0; i < threads; i++ {
+ <-c
+ }
+
+ // If we are still alive, it means that the 100 goroutines did
+ // not require 100 threads.
+
+ for i := 0; i < threads; i++ {
+ if _, err := w[i].Write([]byte{0}); err != nil {
+ t.Error(err)
+ }
+ if err := w[i].Close(); err != nil {
+ t.Error(err)
+ }
+ }
+
+ wg.Wait()
+
+ for i := 0; i < threads; i++ {
+ if err := r[i].Close(); err != nil {
+ t.Error(err)
+ }
+ }
+}
+
+func TestDoubleCloseError(t *testing.T) {
+ path := sfdir + "/" + sfname
+ file, err := Open(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := file.Close(); err != nil {
+ t.Fatalf("unexpected error from Close: %v", err)
+ }
+ if err := file.Close(); err == nil {
+ t.Error("second Close did not fail")
+ } else if pe, ok := err.(*PathError); !ok {
+ t.Errorf("second Close returned unexpected error type %T; expected os.PathError", pe)
+ } else if pe.Err != ErrClosed {
+ t.Errorf("second Close returned %q, wanted %q", err, ErrClosed)
+ } else {
+ t.Logf("second close returned expected error %q", err)
+ }
+}
diff --git a/libgo/go/os/pipe_bsd.go b/libgo/go/os/pipe_bsd.go
index ebe198bb6a8..ae153fa00d6 100644
--- a/libgo/go/os/pipe_bsd.go
+++ b/libgo/go/os/pipe_bsd.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build aix darwin dragonfly freebsd nacl netbsd openbsd solaris
+// +build aix darwin dragonfly nacl netbsd openbsd solaris
package os
@@ -24,5 +24,5 @@ func Pipe() (r *File, w *File, err error) {
syscall.CloseOnExec(p[1])
syscall.ForkLock.RUnlock()
- return NewFile(uintptr(p[0]), "|0"), NewFile(uintptr(p[1]), "|1"), nil
+ return newFile(uintptr(p[0]), "|0", true), newFile(uintptr(p[1]), "|1", true), nil
}
diff --git a/libgo/go/os/pipe_freebsd.go b/libgo/go/os/pipe_freebsd.go
new file mode 100644
index 00000000000..ea6622cd260
--- /dev/null
+++ b/libgo/go/os/pipe_freebsd.go
@@ -0,0 +1,34 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package os
+
+import "syscall"
+
+// Pipe returns a connected pair of Files; reads from r return bytes written to w.
+// It returns the files and an error, if any.
+func Pipe() (r *File, w *File, err error) {
+ var p [2]int
+
+ e := syscall.Pipe2(p[0:], syscall.O_CLOEXEC)
+ if e != nil {
+ // Fallback support for FreeBSD 9, which lacks Pipe2.
+ //
+ // TODO: remove this for Go 1.10 when FreeBSD 9
+ // is removed (Issue 19072).
+
+ // See ../syscall/exec.go for description of lock.
+ syscall.ForkLock.RLock()
+ e := syscall.Pipe(p[0:])
+ if e != nil {
+ syscall.ForkLock.RUnlock()
+ return nil, nil, NewSyscallError("pipe", e)
+ }
+ syscall.CloseOnExec(p[0])
+ syscall.CloseOnExec(p[1])
+ syscall.ForkLock.RUnlock()
+ }
+
+ return newFile(uintptr(p[0]), "|0", true), newFile(uintptr(p[1]), "|1", true), nil
+}
diff --git a/libgo/go/os/pipe_linux.go b/libgo/go/os/pipe_linux.go
index 9bafad84f9f..96f2ce900cb 100644
--- a/libgo/go/os/pipe_linux.go
+++ b/libgo/go/os/pipe_linux.go
@@ -29,5 +29,5 @@ func Pipe() (r *File, w *File, err error) {
return nil, nil, NewSyscallError("pipe2", e)
}
- return NewFile(uintptr(p[0]), "|0"), NewFile(uintptr(p[1]), "|1"), nil
+ return newFile(uintptr(p[0]), "|0", true), newFile(uintptr(p[1]), "|1", true), nil
}
diff --git a/libgo/go/os/pipe_test.go b/libgo/go/os/pipe_test.go
index 74cce80ee4b..9d79d84575d 100644
--- a/libgo/go/os/pipe_test.go
+++ b/libgo/go/os/pipe_test.go
@@ -10,11 +10,16 @@ package os_test
import (
"fmt"
"internal/testenv"
+ "io/ioutil"
"os"
osexec "os/exec"
"os/signal"
+ "runtime"
+ "strconv"
+ "strings"
"syscall"
"testing"
+ "time"
)
func TestEPIPE(t *testing.T) {
@@ -82,7 +87,7 @@ func TestStdPipe(t *testing.T) {
t.Errorf("unexpected SIGPIPE signal for descriptor %d sig %t", dest, sig)
}
} else {
- t.Errorf("unexpected exit status %v for descriptor %ds sig %t", err, dest, sig)
+ t.Errorf("unexpected exit status %v for descriptor %d sig %t", err, dest, sig)
}
}
}
@@ -111,3 +116,107 @@ func TestStdPipeHelper(t *testing.T) {
// For descriptor 3, a normal exit is expected.
os.Exit(0)
}
+
+func testClosedPipeRace(t *testing.T, read bool) {
+ switch runtime.GOOS {
+ case "freebsd":
+ t.Skip("FreeBSD does not use the poller; issue 19093")
+ }
+
+ limit := 1
+ if !read {
+ // Get the amount we have to write to overload a pipe
+ // with no reader.
+ limit = 65537
+ if b, err := ioutil.ReadFile("/proc/sys/fs/pipe-max-size"); err == nil {
+ if i, err := strconv.Atoi(strings.TrimSpace(string(b))); err == nil {
+ limit = i + 1
+ }
+ }
+ t.Logf("using pipe write limit of %d", limit)
+ }
+
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+ defer w.Close()
+
+ // Close the read end of the pipe in a goroutine while we are
+ // writing to the write end, or vice-versa.
+ go func() {
+ // Give the main goroutine a chance to enter the Read or
+ // Write call. This is sloppy but the test will pass even
+ // if we close before the read/write.
+ time.Sleep(20 * time.Millisecond)
+
+ var err error
+ if read {
+ err = r.Close()
+ } else {
+ err = w.Close()
+ }
+ if err != nil {
+ t.Error(err)
+ }
+ }()
+
+ b := make([]byte, limit)
+ if read {
+ _, err = r.Read(b[:])
+ } else {
+ _, err = w.Write(b[:])
+ }
+ if err == nil {
+ t.Error("I/O on closed pipe unexpectedly succeeded")
+ } else if pe, ok := err.(*os.PathError); !ok {
+ t.Errorf("I/O on closed pipe returned unexpected error type %T; expected os.PathError", pe)
+ } else if pe.Err != os.ErrClosed {
+ t.Errorf("got error %q but expected %q", pe.Err, os.ErrClosed)
+ } else {
+ t.Logf("I/O returned expected error %q", err)
+ }
+}
+
+func TestClosedPipeRaceRead(t *testing.T) {
+ testClosedPipeRace(t, true)
+}
+
+func TestClosedPipeRaceWrite(t *testing.T) {
+ testClosedPipeRace(t, false)
+}
+
+// Issue 20915: Reading on nonblocking fd should not return "waiting
+// for unsupported file type." Currently it returns EAGAIN; it is
+// possible that in the future it will simply wait for data.
+func TestReadNonblockingFd(t *testing.T) {
+ if os.Getenv("GO_WANT_READ_NONBLOCKING_FD") == "1" {
+ fd := int(os.Stdin.Fd())
+ syscall.SetNonblock(fd, true)
+ defer syscall.SetNonblock(fd, false)
+ _, err := os.Stdin.Read(make([]byte, 1))
+ if err != nil {
+ if perr, ok := err.(*os.PathError); !ok || perr.Err != syscall.EAGAIN {
+ t.Fatalf("read on nonblocking stdin got %q, should have gotten EAGAIN", err)
+ }
+ }
+ os.Exit(0)
+ }
+
+ testenv.MustHaveExec(t)
+ r, w, err := os.Pipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+ defer w.Close()
+ cmd := osexec.Command(os.Args[0], "-test.run="+t.Name())
+ cmd.Env = append(os.Environ(), "GO_WANT_READ_NONBLOCKING_FD=1")
+ cmd.Stdin = r
+ output, err := cmd.CombinedOutput()
+ t.Logf("%s", output)
+ if err != nil {
+ t.Errorf("child process failed: %v", err)
+ }
+}
diff --git a/libgo/go/os/proc.go b/libgo/go/os/proc.go
index 33a8b26f78d..804128a1da4 100644
--- a/libgo/go/os/proc.go
+++ b/libgo/go/os/proc.go
@@ -25,18 +25,29 @@ func init() {
func runtime_args() []string // in package runtime
// Getuid returns the numeric user id of the caller.
+//
+// On Windows, it returns -1.
func Getuid() int { return syscall.Getuid() }
// Geteuid returns the numeric effective user id of the caller.
+//
+// On Windows, it returns -1.
func Geteuid() int { return syscall.Geteuid() }
// Getgid returns the numeric group id of the caller.
+//
+// On Windows, it returns -1.
func Getgid() int { return syscall.Getgid() }
// Getegid returns the numeric effective group id of the caller.
+//
+// On Windows, it returns -1.
func Getegid() int { return syscall.Getegid() }
// Getgroups returns a list of the numeric ids of groups that the caller belongs to.
+//
+// On Windows, it returns syscall.EWINDOWS. See the os/user package
+// for a possible alternative.
func Getgroups() ([]int, error) {
gids, e := syscall.Getgroups()
return gids, NewSyscallError("getgroups", e)
diff --git a/libgo/go/os/signal/doc.go b/libgo/go/os/signal/doc.go
index 73b01a2764d..16f49c7ab8b 100644
--- a/libgo/go/os/signal/doc.go
+++ b/libgo/go/os/signal/doc.go
@@ -181,10 +181,11 @@ If the Go runtime sees an existing signal handler for the SIGCANCEL or
SIGSETXID signals (which are used only on GNU/Linux), it will turn on
the SA_ONSTACK flag and otherwise keep the signal handler.
-For the synchronous signals, the Go runtime will install a signal
-handler. It will save any existing signal handler. If a synchronous
-signal arrives while executing non-Go code, the Go runtime will invoke
-the existing signal handler instead of the Go signal handler.
+For the synchronous signals and SIGPIPE, the Go runtime will install a
+signal handler. It will save any existing signal handler. If a
+synchronous signal arrives while executing non-Go code, the Go runtime
+will invoke the existing signal handler instead of the Go signal
+handler.
Go code built with -buildmode=c-archive or -buildmode=c-shared will
not install any other signal handlers by default. If there is an
diff --git a/libgo/go/os/signal/signal.go b/libgo/go/os/signal/signal.go
index c1376daaea6..e5a21e85327 100644
--- a/libgo/go/os/signal/signal.go
+++ b/libgo/go/os/signal/signal.go
@@ -11,8 +11,21 @@ import (
var handlers struct {
sync.Mutex
- m map[chan<- os.Signal]*handler
+ // Map a channel to the signals that should be sent to it.
+ m map[chan<- os.Signal]*handler
+ // Map a signal to the number of channels receiving it.
ref [numSig]int64
+ // Map channels to signals while the channel is being stopped.
+ // Not a map because entries live here only very briefly.
+ // We need a separate container because we need m to correspond to ref
+ // at all times, and we also need to keep track of the *handler
+ // value for a channel being stopped. See the Stop function.
+ stopping []stopping
+}
+
+type stopping struct {
+ c chan<- os.Signal
+ h *handler
}
type handler struct {
@@ -142,10 +155,10 @@ func Reset(sig ...os.Signal) {
// When Stop returns, it is guaranteed that c will receive no more signals.
func Stop(c chan<- os.Signal) {
handlers.Lock()
- defer handlers.Unlock()
h := handlers.m[c]
if h == nil {
+ handlers.Unlock()
return
}
delete(handlers.m, c)
@@ -158,8 +171,40 @@ func Stop(c chan<- os.Signal) {
}
}
}
+
+ // Signals will no longer be delivered to the channel.
+ // We want to avoid a race for a signal such as SIGINT:
+ // it should be either delivered to the channel,
+ // or the program should take the default action (that is, exit).
+ // To avoid the possibility that the signal is delivered,
+ // and the signal handler invoked, and then Stop deregisters
+ // the channel before the process function below has a chance
+ // to send it on the channel, put the channel on a list of
+ // channels being stopped and wait for signal delivery to
+ // quiesce before fully removing it.
+
+ handlers.stopping = append(handlers.stopping, stopping{c, h})
+
+ handlers.Unlock()
+
+ signalWaitUntilIdle()
+
+ handlers.Lock()
+
+ for i, s := range handlers.stopping {
+ if s.c == c {
+ handlers.stopping = append(handlers.stopping[:i], handlers.stopping[i+1:]...)
+ break
+ }
+ }
+
+ handlers.Unlock()
}
+// Wait until there are no more signals waiting to be delivered.
+// Defined by the runtime package.
+func signalWaitUntilIdle()
+
func process(sig os.Signal) {
n := signum(sig)
if n < 0 {
@@ -178,4 +223,14 @@ func process(sig os.Signal) {
}
}
}
+
+ // Avoid the race mentioned in Stop.
+ for _, d := range handlers.stopping {
+ if d.h.want(n) {
+ select {
+ case d.c <- sig:
+ default:
+ }
+ }
+ }
}
diff --git a/libgo/go/os/signal/signal_test.go b/libgo/go/os/signal/signal_test.go
index c8409e73697..10a4146f5e9 100644
--- a/libgo/go/os/signal/signal_test.go
+++ b/libgo/go/os/signal/signal_test.go
@@ -7,12 +7,16 @@
package signal
import (
+ "bytes"
"flag"
+ "fmt"
+ "internal/testenv"
"io/ioutil"
"os"
"os/exec"
"runtime"
"strconv"
+ "sync"
"syscall"
"testing"
"time"
@@ -301,3 +305,90 @@ func TestSIGCONT(t *testing.T) {
syscall.Kill(syscall.Getpid(), syscall.SIGCONT)
waitSig(t, c, syscall.SIGCONT)
}
+
+// Test race between stopping and receiving a signal (issue 14571).
+func TestAtomicStop(t *testing.T) {
+ if os.Getenv("GO_TEST_ATOMIC_STOP") != "" {
+ atomicStopTestProgram()
+ t.Fatal("atomicStopTestProgram returned")
+ }
+
+ testenv.MustHaveExec(t)
+
+ const execs = 10
+ for i := 0; i < execs; i++ {
+ cmd := exec.Command(os.Args[0], "-test.run=TestAtomicStop")
+ cmd.Env = append(os.Environ(), "GO_TEST_ATOMIC_STOP=1")
+ out, err := cmd.CombinedOutput()
+ if err == nil {
+ t.Logf("iteration %d: output %s", i, out)
+ } else {
+ t.Logf("iteration %d: exit status %q: output: %s", i, err, out)
+ }
+
+ lost := bytes.Contains(out, []byte("lost signal"))
+ if lost {
+ t.Errorf("iteration %d: lost signal", i)
+ }
+
+ // The program should either die due to SIGINT,
+ // or exit with success without printing "lost signal".
+ if err == nil {
+ if len(out) > 0 && !lost {
+ t.Errorf("iteration %d: unexpected output", i)
+ }
+ } else {
+ if ee, ok := err.(*exec.ExitError); !ok {
+ t.Errorf("iteration %d: error (%v) has type %T; expected exec.ExitError", i, err, err)
+ } else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok {
+ t.Errorf("iteration %d: error.Sys (%v) has type %T; expected syscall.WaitStatus", i, ee.Sys(), ee.Sys())
+ } else if !ws.Signaled() || ws.Signal() != syscall.SIGINT {
+ t.Errorf("iteration %d: got exit status %v; expected SIGINT", i, ee)
+ }
+ }
+ }
+}
+
+// atomicStopTestProgram is run in a subprocess by TestAtomicStop.
+// It tries to trigger a signal delivery race. This function should
+// either catch a signal or die from it.
+func atomicStopTestProgram() {
+ const tries = 10
+ pid := syscall.Getpid()
+ printed := false
+ for i := 0; i < tries; i++ {
+ cs := make(chan os.Signal, 1)
+ Notify(cs, syscall.SIGINT)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ Stop(cs)
+ }()
+
+ syscall.Kill(pid, syscall.SIGINT)
+
+ // At this point we should either die from SIGINT or
+ // get a notification on cs. If neither happens, we
+ // dropped the signal. Give it a second to deliver,
+ // which is far far longer than it should require.
+
+ select {
+ case <-cs:
+ case <-time.After(1 * time.Second):
+ if !printed {
+ fmt.Print("lost signal on iterations:")
+ printed = true
+ }
+ fmt.Printf(" %d", i)
+ }
+
+ wg.Wait()
+ }
+ if printed {
+ fmt.Print("\n")
+ }
+
+ os.Exit(0)
+}
diff --git a/libgo/go/os/stat_unix.go b/libgo/go/os/stat_unix.go
index 043aefe8fad..7855fbab135 100644
--- a/libgo/go/os/stat_unix.go
+++ b/libgo/go/os/stat_unix.go
@@ -17,7 +17,7 @@ func (f *File) Stat() (FileInfo, error) {
return nil, ErrInvalid
}
var fs fileStat
- err := syscall.Fstat(f.fd, &fs.sys)
+ err := f.pfd.Fstat(&fs.sys)
if err != nil {
return nil, &PathError{"stat", f.name, err}
}
diff --git a/libgo/go/os/sys_darwin.go b/libgo/go/os/sys_darwin.go
index 7a8330abb55..11d678ef18d 100644
--- a/libgo/go/os/sys_darwin.go
+++ b/libgo/go/os/sys_darwin.go
@@ -4,28 +4,8 @@
package os
-import "syscall"
-
// supportsCloseOnExec reports whether the platform supports the
// O_CLOEXEC flag.
-var supportsCloseOnExec bool
-
-func init() {
- // Seems like kern.osreldate is veiled on latest OS X. We use
- // kern.osrelease instead.
- osver, err := syscall.Sysctl("kern.osrelease")
- if err != nil {
- return
- }
- var i int
- for i = range osver {
- if osver[i] != '.' {
- continue
- }
- }
- // The O_CLOEXEC flag was introduced in OS X 10.7 (Darwin
- // 11.0.0). See http://support.apple.com/kb/HT1633.
- if i > 2 || i == 2 && osver[0] >= '1' && osver[1] >= '1' {
- supportsCloseOnExec = true
- }
-}
+// The O_CLOEXEC flag was introduced in OS X 10.7 (Darwin 11.0.0).
+// See http://support.apple.com/kb/HT1633.
+const supportsCloseOnExec = true
diff --git a/libgo/go/os/types.go b/libgo/go/os/types.go
index c56548353f1..db7848759cb 100644
--- a/libgo/go/os/types.go
+++ b/libgo/go/os/types.go
@@ -45,7 +45,7 @@ const (
ModeDir FileMode = 1 << (32 - 1 - iota) // d: is a directory
ModeAppend // a: append-only
ModeExclusive // l: exclusive use
- ModeTemporary // T: temporary file (not backed up)
+ ModeTemporary // T: temporary file; Plan 9 only
ModeSymlink // L: symbolic link
ModeDevice // D: device file
ModeNamedPipe // p: named pipe (FIFO)
diff --git a/libgo/go/os/types_unix.go b/libgo/go/os/types_unix.go
index 1f614812fdd..c0259ae0e84 100644
--- a/libgo/go/os/types_unix.go
+++ b/libgo/go/os/types_unix.go
@@ -29,5 +29,3 @@ func (fs *fileStat) Sys() interface{} { return &fs.sys }
func sameFile(fs1, fs2 *fileStat) bool {
return fs1.sys.Dev == fs2.sys.Dev && fs1.sys.Ino == fs2.sys.Ino
}
-
-const badFd = -1
diff --git a/libgo/go/os/types_windows.go b/libgo/go/os/types_windows.go
index ad4e863fcbb..01d6b62a16e 100644
--- a/libgo/go/os/types_windows.go
+++ b/libgo/go/os/types_windows.go
@@ -12,16 +12,17 @@ import (
// A fileStat is the implementation of FileInfo returned by Stat and Lstat.
type fileStat struct {
- name string
- sys syscall.Win32FileAttributeData
- pipe bool
+ name string
+ sys syscall.Win32FileAttributeData
+ filetype uint32 // what syscall.GetFileType returns
// used to implement SameFile
sync.Mutex
- path string
- vol uint32
- idxhi uint32
- idxlo uint32
+ path string
+ vol uint32
+ idxhi uint32
+ idxlo uint32
+ appendNameToPath bool
}
func (fs *fileStat) Size() int64 {
@@ -32,19 +33,22 @@ func (fs *fileStat) Mode() (m FileMode) {
if fs == &devNullStat {
return ModeDevice | ModeCharDevice | 0666
}
- if fs.sys.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
- m |= ModeDir | 0111
- }
if fs.sys.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY != 0 {
m |= 0444
} else {
m |= 0666
}
if fs.sys.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 {
- m |= ModeSymlink
+ return m | ModeSymlink
+ }
+ if fs.sys.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ m |= ModeDir | 0111
}
- if fs.pipe {
+ switch fs.filetype {
+ case syscall.FILE_TYPE_PIPE:
m |= ModeNamedPipe
+ case syscall.FILE_TYPE_CHAR:
+ m |= ModeCharDevice
}
return m
}
@@ -63,7 +67,13 @@ func (fs *fileStat) loadFileId() error {
// already done
return nil
}
- pathp, err := syscall.UTF16PtrFromString(fs.path)
+ var path string
+ if fs.appendNameToPath {
+ path = fs.path + `\` + fs.name
+ } else {
+ path = fs.path
+ }
+ pathp, err := syscall.UTF16PtrFromString(path)
if err != nil {
return err
}
diff --git a/libgo/go/os/user/cgo_lookup_unix.go b/libgo/go/os/user/cgo_lookup_unix.go
new file mode 100644
index 00000000000..8881366cf9f
--- /dev/null
+++ b/libgo/go/os/user/cgo_lookup_unix.go
@@ -0,0 +1,266 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris
+// +build cgo
+
+package user
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+// bytePtrToString takes a NUL-terminated array of bytes and convert
+// it to a Go string.
+func bytePtrToString(p *byte) string {
+ a := (*[10000]byte)(unsafe.Pointer(p))
+ i := 0
+ for a[i] != 0 {
+ i++
+ }
+ return string(a[:i])
+}
+
+func current() (*User, error) {
+ return lookupUnixUid(syscall.Getuid())
+}
+
+func lookupUser(username string) (*User, error) {
+ var pwd syscall.Passwd
+ var result *syscall.Passwd
+ p := syscall.StringBytePtr(username)
+
+ buf := alloc(userBuffer)
+ defer buf.free()
+
+ err := retryWithBuffer(buf, func() syscall.Errno {
+ syscall.Entersyscall()
+ rv := libc_getpwnam_r(p,
+ &pwd,
+ buf.ptr,
+ buf.size,
+ &result)
+ syscall.Exitsyscall()
+ if rv != 0 {
+ return syscall.GetErrno()
+ }
+ return 0
+ })
+ if err != nil {
+ return nil, fmt.Errorf("user: lookup username %s: %v", username, err)
+ }
+ if result == nil {
+ return nil, UnknownUserError(username)
+ }
+ return buildUser(&pwd), err
+}
+
+func lookupUserId(uid string) (*User, error) {
+ i, e := strconv.Atoi(uid)
+ if e != nil {
+ return nil, e
+ }
+ return lookupUnixUid(i)
+}
+
+func lookupUnixUid(uid int) (*User, error) {
+ var pwd syscall.Passwd
+ var result *syscall.Passwd
+
+ buf := alloc(userBuffer)
+ defer buf.free()
+
+ err := retryWithBuffer(buf, func() syscall.Errno {
+ syscall.Entersyscall()
+ rv := libc_getpwuid_r(syscall.Uid_t(uid),
+ &pwd,
+ buf.ptr,
+ buf.size,
+ &result)
+ syscall.Exitsyscall()
+ if rv != 0 {
+ return syscall.GetErrno()
+ }
+ return 0
+ })
+ if err != nil {
+ return nil, fmt.Errorf("user: lookup userid %d: %v", uid, err)
+ }
+ if result == nil {
+ return nil, UnknownUserIdError(uid)
+ }
+ return buildUser(&pwd), nil
+}
+
+func buildUser(pwd *syscall.Passwd) *User {
+ u := &User{
+ Uid: strconv.Itoa(int(pwd.Pw_uid)),
+ Gid: strconv.Itoa(int(pwd.Pw_gid)),
+ Username: bytePtrToString((*byte)(unsafe.Pointer(pwd.Pw_name))),
+ Name: bytePtrToString((*byte)(unsafe.Pointer(pwd.Pw_gecos))),
+ HomeDir: bytePtrToString((*byte)(unsafe.Pointer(pwd.Pw_dir))),
+ }
+ // The pw_gecos field isn't quite standardized. Some docs
+ // say: "It is expected to be a comma separated list of
+ // personal data where the first item is the full name of the
+ // user."
+ if i := strings.Index(u.Name, ","); i >= 0 {
+ u.Name = u.Name[:i]
+ }
+ return u
+}
+
+func currentGroup() (*Group, error) {
+ return lookupUnixGid(syscall.Getgid())
+}
+
+func lookupGroup(groupname string) (*Group, error) {
+ var grp syscall.Group
+ var result *syscall.Group
+
+ buf := alloc(groupBuffer)
+ defer buf.free()
+ p := syscall.StringBytePtr(groupname)
+
+ err := retryWithBuffer(buf, func() syscall.Errno {
+ syscall.Entersyscall()
+ rv := libc_getgrnam_r(p,
+ &grp,
+ buf.ptr,
+ buf.size,
+ &result)
+ syscall.Exitsyscall()
+ if rv != 0 {
+ return syscall.GetErrno()
+ }
+ return 0
+ })
+ if err != nil {
+ return nil, fmt.Errorf("user: lookup groupname %s: %v", groupname, err)
+ }
+ if result == nil {
+ return nil, UnknownGroupError(groupname)
+ }
+ return buildGroup(&grp), nil
+}
+
+func lookupGroupId(gid string) (*Group, error) {
+ i, e := strconv.Atoi(gid)
+ if e != nil {
+ return nil, e
+ }
+ return lookupUnixGid(i)
+}
+
+func lookupUnixGid(gid int) (*Group, error) {
+ var grp syscall.Group
+ var result *syscall.Group
+
+ buf := alloc(groupBuffer)
+ defer buf.free()
+
+ err := retryWithBuffer(buf, func() syscall.Errno {
+ syscall.Entersyscall()
+ rv := libc_getgrgid_r(syscall.Gid_t(gid),
+ &grp,
+ buf.ptr,
+ buf.size,
+ &result)
+ syscall.Exitsyscall()
+ if rv != 0 {
+ return syscall.GetErrno()
+ }
+ return 0
+ })
+ if err != nil {
+ return nil, fmt.Errorf("user: lookup groupid %d: %v", gid, err)
+ }
+ if result == nil {
+ return nil, UnknownGroupIdError(strconv.Itoa(gid))
+ }
+ return buildGroup(&grp), nil
+}
+
+func buildGroup(grp *syscall.Group) *Group {
+ g := &Group{
+ Gid: strconv.Itoa(int(grp.Gr_gid)),
+ Name: bytePtrToString((*byte)(unsafe.Pointer(grp.Gr_name))),
+ }
+ return g
+}
+
+type bufferKind int
+
+const (
+ userBuffer = bufferKind(syscall.SC_GETPW_R_SIZE_MAX)
+ groupBuffer = bufferKind(syscall.SC_GETGR_R_SIZE_MAX)
+)
+
+func (k bufferKind) initialSize() syscall.Size_t {
+ sz, _ := syscall.Sysconf(int(k))
+ if sz == -1 {
+ // DragonFly and FreeBSD do not have _SC_GETPW_R_SIZE_MAX.
+ // Additionally, not all Linux systems have it, either. For
+ // example, the musl libc returns -1.
+ return 1024
+ }
+ if !isSizeReasonable(int64(sz)) {
+ // Truncate. If this truly isn't enough, retryWithBuffer will error on the first run.
+ return maxBufferSize
+ }
+ return syscall.Size_t(sz)
+}
+
+type memBuffer struct {
+ ptr *byte
+ size syscall.Size_t
+}
+
+func alloc(kind bufferKind) *memBuffer {
+ sz := kind.initialSize()
+ b := make([]byte, sz)
+ return &memBuffer{
+ ptr: &b[0],
+ size: sz,
+ }
+}
+
+func (mb *memBuffer) resize(newSize syscall.Size_t) {
+ b := make([]byte, newSize)
+ mb.ptr = &b[0]
+ mb.size = newSize
+}
+
+func (mb *memBuffer) free() {
+ mb.ptr = nil
+}
+
+// retryWithBuffer repeatedly calls f(), increasing the size of the
+// buffer each time, until f succeeds, fails with a non-ERANGE error,
+// or the buffer exceeds a reasonable limit.
+func retryWithBuffer(buf *memBuffer, f func() syscall.Errno) error {
+ for {
+ errno := f()
+ if errno == 0 {
+ return nil
+ } else if errno != syscall.ERANGE {
+ return errno
+ }
+ newSize := buf.size * 2
+ if !isSizeReasonable(int64(newSize)) {
+ return fmt.Errorf("internal buffer exceeds %d bytes", maxBufferSize)
+ }
+ buf.resize(newSize)
+ }
+}
+
+const maxBufferSize = 1 << 20
+
+func isSizeReasonable(sz int64) bool {
+ return sz > 0 && sz <= maxBufferSize
+}
diff --git a/libgo/go/os/user/lookup.go b/libgo/go/os/user/lookup.go
index 3b4421badd5..2243a25788a 100644
--- a/libgo/go/os/user/lookup.go
+++ b/libgo/go/os/user/lookup.go
@@ -4,20 +4,40 @@
package user
+import "sync"
+
// Current returns the current user.
func Current() (*User, error) {
- return current()
+ cache.Do(func() { cache.u, cache.err = current() })
+ if cache.err != nil {
+ return nil, cache.err
+ }
+ u := *cache.u // copy
+ return &u, nil
+}
+
+// cache of the current user
+var cache struct {
+ sync.Once
+ u *User
+ err error
}
// Lookup looks up a user by username. If the user cannot be found, the
// returned error is of type UnknownUserError.
func Lookup(username string) (*User, error) {
+ if u, err := Current(); err == nil && u.Username == username {
+ return u, err
+ }
return lookupUser(username)
}
// LookupId looks up a user by userid. If the user cannot be found, the
// returned error is of type UnknownUserIdError.
func LookupId(uid string) (*User, error) {
+ if u, err := Current(); err == nil && u.Uid == uid {
+ return u, err
+ }
return lookupUserId(uid)
}
diff --git a/libgo/go/os/user/lookup_android.go b/libgo/go/os/user/lookup_android.go
index b1be3dc1931..8ca30b8c274 100644
--- a/libgo/go/os/user/lookup_android.go
+++ b/libgo/go/os/user/lookup_android.go
@@ -8,15 +8,6 @@ package user
import "errors"
-func init() {
- userImplemented = false
- groupImplemented = false
-}
-
-func current() (*User, error) {
- return nil, errors.New("user: Current not implemented on android")
-}
-
func lookupUser(string) (*User, error) {
return nil, errors.New("user: Lookup not implemented on android")
}
@@ -32,7 +23,3 @@ func lookupGroup(string) (*Group, error) {
func lookupGroupId(string) (*Group, error) {
return nil, errors.New("user: LookupGroupId not implemented on android")
}
-
-func listGroups(*User) ([]string, error) {
- return nil, errors.New("user: GroupIds not implemented on android")
-}
diff --git a/libgo/go/os/user/lookup_stubs.go b/libgo/go/os/user/lookup_stubs.go
index ebf24f79dee..d23870fda88 100644
--- a/libgo/go/os/user/lookup_stubs.go
+++ b/libgo/go/os/user/lookup_stubs.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !cgo,!windows,!plan9,!android
+// +build !cgo,!windows,!plan9 android
package user
@@ -15,7 +15,6 @@ import (
)
func init() {
- userImplemented = false
groupImplemented = false
}
@@ -27,7 +26,9 @@ func current() (*User, error) {
Name: "", // ignored
HomeDir: os.Getenv("HOME"),
}
- if runtime.GOOS == "nacl" {
+ // On NaCL and Android, return a dummy user instead of failing.
+ switch runtime.GOOS {
+ case "nacl":
if u.Uid == "" {
u.Uid = "1"
}
@@ -35,7 +36,17 @@ func current() (*User, error) {
u.Username = "nacl"
}
if u.HomeDir == "" {
- u.HomeDir = "/home/nacl"
+ u.HomeDir = "/"
+ }
+ case "android":
+ if u.Uid == "" {
+ u.Uid = "1"
+ }
+ if u.Username == "" {
+ u.Username = "android"
+ }
+ if u.HomeDir == "" {
+ u.HomeDir = "/sdcard"
}
}
// cgo isn't available, but if we found the minimum information
@@ -46,23 +57,10 @@ func current() (*User, error) {
return u, fmt.Errorf("user: Current not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}
-func lookupUser(username string) (*User, error) {
- return nil, errors.New("user: Lookup requires cgo")
-}
-
-func lookupUserId(uid string) (*User, error) {
- return nil, errors.New("user: LookupId requires cgo")
-}
-
-func lookupGroup(groupname string) (*Group, error) {
- return nil, errors.New("user: LookupGroup requires cgo")
-}
-
-func lookupGroupId(string) (*Group, error) {
- return nil, errors.New("user: LookupGroupId requires cgo")
-}
-
func listGroups(*User) ([]string, error) {
+ if runtime.GOOS == "android" {
+ return nil, errors.New("user: GroupIds not implemented on Android")
+ }
return nil, errors.New("user: GroupIds requires cgo")
}
diff --git a/libgo/go/os/user/lookup_unix.go b/libgo/go/os/user/lookup_unix.go
index 9670ada4942..5f34ba8611c 100644
--- a/libgo/go/os/user/lookup_unix.go
+++ b/libgo/go/os/user/lookup_unix.go
@@ -1,266 +1,197 @@
-// Copyright 2011 The Go Authors. All rights reserved.
+// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build aix darwin dragonfly freebsd !android,linux netbsd openbsd solaris
-// +build cgo
+// +build darwin dragonfly freebsd !android,linux nacl netbsd openbsd solaris
+// +build !cgo
package user
import (
- "fmt"
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "os"
"strconv"
"strings"
- "syscall"
- "unsafe"
)
-// bytePtrToString takes a NUL-terminated array of bytes and convert
-// it to a Go string.
-func bytePtrToString(p *byte) string {
- a := (*[10000]byte)(unsafe.Pointer(p))
- i := 0
- for a[i] != 0 {
- i++
- }
- return string(a[:i])
-}
+const groupFile = "/etc/group"
+const userFile = "/etc/passwd"
-func current() (*User, error) {
- return lookupUnixUid(syscall.Getuid())
-}
+var colon = []byte{':'}
-func lookupUser(username string) (*User, error) {
- var pwd syscall.Passwd
- var result *syscall.Passwd
- p := syscall.StringBytePtr(username)
+func init() {
+ groupImplemented = false
+}
- buf := alloc(userBuffer)
- defer buf.free()
+// lineFunc returns a value, an error, or (nil, nil) to skip the row.
+type lineFunc func(line []byte) (v interface{}, err error)
- err := retryWithBuffer(buf, func() syscall.Errno {
- syscall.Entersyscall()
- rv := libc_getpwnam_r(p,
- &pwd,
- buf.ptr,
- buf.size,
- &result)
- syscall.Exitsyscall()
- if rv != 0 {
- return syscall.GetErrno()
+// readColonFile parses r as an /etc/group or /etc/passwd style file, running
+// fn for each row. readColonFile returns a value, an error, or (nil, nil) if
+// the end of the file is reached without a match.
+func readColonFile(r io.Reader, fn lineFunc) (v interface{}, err error) {
+ bs := bufio.NewScanner(r)
+ for bs.Scan() {
+ line := bs.Bytes()
+ // There's no spec for /etc/passwd or /etc/group, but we try to follow
+ // the same rules as the glibc parser, which allows comments and blank
+ // space at the beginning of a line.
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 || line[0] == '#' {
+ continue
+ }
+ v, err = fn(line)
+ if v != nil || err != nil {
+ return
}
- return 0
- })
- if err != nil {
- return nil, fmt.Errorf("user: lookup username %s: %v", username, err)
- }
- if result == nil {
- return nil, UnknownUserError(username)
}
- return buildUser(&pwd), err
+ return nil, bs.Err()
}
-func lookupUserId(uid string) (*User, error) {
- i, e := strconv.Atoi(uid)
- if e != nil {
- return nil, e
+func matchGroupIndexValue(value string, idx int) lineFunc {
+ var leadColon string
+ if idx > 0 {
+ leadColon = ":"
}
- return lookupUnixUid(i)
-}
-
-func lookupUnixUid(uid int) (*User, error) {
- var pwd syscall.Passwd
- var result *syscall.Passwd
-
- buf := alloc(userBuffer)
- defer buf.free()
-
- err := retryWithBuffer(buf, func() syscall.Errno {
- syscall.Entersyscall()
- rv := libc_getpwuid_r(syscall.Uid_t(uid),
- &pwd,
- buf.ptr,
- buf.size,
- &result)
- syscall.Exitsyscall()
- if rv != 0 {
- return syscall.GetErrno()
+ substr := []byte(leadColon + value + ":")
+ return func(line []byte) (v interface{}, err error) {
+ if !bytes.Contains(line, substr) || bytes.Count(line, colon) < 3 {
+ return
}
- return 0
- })
- if err != nil {
- return nil, fmt.Errorf("user: lookup userid %d: %v", uid, err)
- }
- if result == nil {
- return nil, UnknownUserIdError(uid)
+ // wheel:*:0:root
+ parts := strings.SplitN(string(line), ":", 4)
+ if len(parts) < 4 || parts[0] == "" || parts[idx] != value ||
+ // If the file contains +foo and you search for "foo", glibc
+ // returns an "invalid argument" error. Similarly, if you search
+ // for a gid for a row where the group name starts with "+" or "-",
+ // glibc fails to find the record.
+ parts[0][0] == '+' || parts[0][0] == '-' {
+ return
+ }
+ if _, err := strconv.Atoi(parts[2]); err != nil {
+ return nil, nil
+ }
+ return &Group{Name: parts[0], Gid: parts[2]}, nil
}
- return buildUser(&pwd), nil
}
-func buildUser(pwd *syscall.Passwd) *User {
- u := &User{
- Uid: strconv.Itoa(int(pwd.Pw_uid)),
- Gid: strconv.Itoa(int(pwd.Pw_gid)),
- Username: bytePtrToString((*byte)(unsafe.Pointer(pwd.Pw_name))),
- Name: bytePtrToString((*byte)(unsafe.Pointer(pwd.Pw_gecos))),
- HomeDir: bytePtrToString((*byte)(unsafe.Pointer(pwd.Pw_dir))),
- }
- // The pw_gecos field isn't quite standardized. Some docs
- // say: "It is expected to be a comma separated list of
- // personal data where the first item is the full name of the
- // user."
- if i := strings.Index(u.Name, ","); i >= 0 {
- u.Name = u.Name[:i]
+func findGroupId(id string, r io.Reader) (*Group, error) {
+ if v, err := readColonFile(r, matchGroupIndexValue(id, 2)); err != nil {
+ return nil, err
+ } else if v != nil {
+ return v.(*Group), nil
}
- return u
+ return nil, UnknownGroupIdError(id)
}
-func currentGroup() (*Group, error) {
- return lookupUnixGid(syscall.Getgid())
+func findGroupName(name string, r io.Reader) (*Group, error) {
+ if v, err := readColonFile(r, matchGroupIndexValue(name, 0)); err != nil {
+ return nil, err
+ } else if v != nil {
+ return v.(*Group), nil
+ }
+ return nil, UnknownGroupError(name)
}
-func lookupGroup(groupname string) (*Group, error) {
- var grp syscall.Group
- var result *syscall.Group
-
- buf := alloc(groupBuffer)
- defer buf.free()
- p := syscall.StringBytePtr(groupname)
-
- err := retryWithBuffer(buf, func() syscall.Errno {
- syscall.Entersyscall()
- rv := libc_getgrnam_r(p,
- &grp,
- buf.ptr,
- buf.size,
- &result)
- syscall.Exitsyscall()
- if rv != 0 {
- return syscall.GetErrno()
- }
- return 0
- })
- if err != nil {
- return nil, fmt.Errorf("user: lookup groupname %s: %v", groupname, err)
+// returns a *User for a row if that row's has the given value at the
+// given index.
+func matchUserIndexValue(value string, idx int) lineFunc {
+ var leadColon string
+ if idx > 0 {
+ leadColon = ":"
}
- if result == nil {
- return nil, UnknownGroupError(groupname)
+ substr := []byte(leadColon + value + ":")
+ return func(line []byte) (v interface{}, err error) {
+ if !bytes.Contains(line, substr) || bytes.Count(line, colon) < 6 {
+ return
+ }
+ // kevin:x:1005:1006::/home/kevin:/usr/bin/zsh
+ parts := strings.SplitN(string(line), ":", 7)
+ if len(parts) < 6 || parts[idx] != value || parts[0] == "" ||
+ parts[0][0] == '+' || parts[0][0] == '-' {
+ return
+ }
+ if _, err := strconv.Atoi(parts[2]); err != nil {
+ return nil, nil
+ }
+ if _, err := strconv.Atoi(parts[3]); err != nil {
+ return nil, nil
+ }
+ u := &User{
+ Username: parts[0],
+ Uid: parts[2],
+ Gid: parts[3],
+ Name: parts[4],
+ HomeDir: parts[5],
+ }
+ // The pw_gecos field isn't quite standardized. Some docs
+ // say: "It is expected to be a comma separated list of
+ // personal data where the first item is the full name of the
+ // user."
+ if i := strings.Index(u.Name, ","); i >= 0 {
+ u.Name = u.Name[:i]
+ }
+ return u, nil
}
- return buildGroup(&grp), nil
}
-func lookupGroupId(gid string) (*Group, error) {
- i, e := strconv.Atoi(gid)
+func findUserId(uid string, r io.Reader) (*User, error) {
+ i, e := strconv.Atoi(uid)
if e != nil {
- return nil, e
+ return nil, errors.New("user: invalid userid " + uid)
}
- return lookupUnixGid(i)
-}
-
-func lookupUnixGid(gid int) (*Group, error) {
- var grp syscall.Group
- var result *syscall.Group
-
- buf := alloc(groupBuffer)
- defer buf.free()
-
- err := retryWithBuffer(buf, func() syscall.Errno {
- syscall.Entersyscall()
- rv := libc_getgrgid_r(syscall.Gid_t(gid),
- &grp,
- buf.ptr,
- buf.size,
- &result)
- syscall.Exitsyscall()
- if rv != 0 {
- return syscall.GetErrno()
- }
- return 0
- })
- if err != nil {
- return nil, fmt.Errorf("user: lookup groupid %d: %v", gid, err)
+ if v, err := readColonFile(r, matchUserIndexValue(uid, 2)); err != nil {
+ return nil, err
+ } else if v != nil {
+ return v.(*User), nil
}
- if result == nil {
- return nil, UnknownGroupIdError(strconv.Itoa(gid))
- }
- return buildGroup(&grp), nil
+ return nil, UnknownUserIdError(i)
}
-func buildGroup(grp *syscall.Group) *Group {
- g := &Group{
- Gid: strconv.Itoa(int(grp.Gr_gid)),
- Name: bytePtrToString((*byte)(unsafe.Pointer(grp.Gr_name))),
+func findUsername(name string, r io.Reader) (*User, error) {
+ if v, err := readColonFile(r, matchUserIndexValue(name, 0)); err != nil {
+ return nil, err
+ } else if v != nil {
+ return v.(*User), nil
}
- return g
+ return nil, UnknownUserError(name)
}
-type bufferKind int
-
-const (
- userBuffer = bufferKind(syscall.SC_GETPW_R_SIZE_MAX)
- groupBuffer = bufferKind(syscall.SC_GETGR_R_SIZE_MAX)
-)
-
-func (k bufferKind) initialSize() syscall.Size_t {
- sz, _ := syscall.Sysconf(int(k))
- if sz == -1 {
- // DragonFly and FreeBSD do not have _SC_GETPW_R_SIZE_MAX.
- // Additionally, not all Linux systems have it, either. For
- // example, the musl libc returns -1.
- return 1024
- }
- if !isSizeReasonable(int64(sz)) {
- // Truncate. If this truly isn't enough, retryWithBuffer will error on the first run.
- return maxBufferSize
+func lookupGroup(groupname string) (*Group, error) {
+ f, err := os.Open(groupFile)
+ if err != nil {
+ return nil, err
}
- return syscall.Size_t(sz)
+ defer f.Close()
+ return findGroupName(groupname, f)
}
-type memBuffer struct {
- ptr *byte
- size syscall.Size_t
-}
-
-func alloc(kind bufferKind) *memBuffer {
- sz := kind.initialSize()
- b := make([]byte, sz)
- return &memBuffer{
- ptr: &b[0],
- size: sz,
+func lookupGroupId(id string) (*Group, error) {
+ f, err := os.Open(groupFile)
+ if err != nil {
+ return nil, err
}
+ defer f.Close()
+ return findGroupId(id, f)
}
-func (mb *memBuffer) resize(newSize syscall.Size_t) {
- b := make([]byte, newSize)
- mb.ptr = &b[0]
- mb.size = newSize
-}
-
-func (mb *memBuffer) free() {
- mb.ptr = nil
-}
-
-// retryWithBuffer repeatedly calls f(), increasing the size of the
-// buffer each time, until f succeeds, fails with a non-ERANGE error,
-// or the buffer exceeds a reasonable limit.
-func retryWithBuffer(buf *memBuffer, f func() syscall.Errno) error {
- for {
- errno := f()
- if errno == 0 {
- return nil
- } else if errno != syscall.ERANGE {
- return errno
- }
- newSize := buf.size * 2
- if !isSizeReasonable(int64(newSize)) {
- return fmt.Errorf("internal buffer exceeds %d bytes", maxBufferSize)
- }
- buf.resize(newSize)
+func lookupUser(username string) (*User, error) {
+ f, err := os.Open(userFile)
+ if err != nil {
+ return nil, err
}
+ defer f.Close()
+ return findUsername(username, f)
}
-const maxBufferSize = 1 << 20
-
-func isSizeReasonable(sz int64) bool {
- return sz > 0 && sz <= maxBufferSize
+func lookupUserId(uid string) (*User, error) {
+ f, err := os.Open(userFile)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return findUserId(uid, f)
}
diff --git a/libgo/go/os/user/lookup_unix_test.go b/libgo/go/os/user/lookup_unix_test.go
new file mode 100644
index 00000000000..02c88ab8757
--- /dev/null
+++ b/libgo/go/os/user/lookup_unix_test.go
@@ -0,0 +1,276 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd !android,linux nacl netbsd openbsd solaris
+// +build !cgo
+
+package user
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+)
+
+const testGroupFile = `# See the opendirectoryd(8) man page for additional
+# information about Open Directory.
+##
+nobody:*:-2:
+nogroup:*:-1:
+wheel:*:0:root
+emptyid:*::root
+invalidgid:*:notanumber:root
++plussign:*:20:root
+-minussign:*:21:root
+
+daemon:*:1:root
+ indented:*:7:
+# comment:*:4:found
+ # comment:*:4:found
+kmem:*:2:root
+`
+
+var groupTests = []struct {
+ in string
+ name string
+ gid string
+}{
+ {testGroupFile, "nobody", "-2"},
+ {testGroupFile, "kmem", "2"},
+ {testGroupFile, "notinthefile", ""},
+ {testGroupFile, "comment", ""},
+ {testGroupFile, "plussign", ""},
+ {testGroupFile, "+plussign", ""},
+ {testGroupFile, "-minussign", ""},
+ {testGroupFile, "minussign", ""},
+ {testGroupFile, "emptyid", ""},
+ {testGroupFile, "invalidgid", ""},
+ {testGroupFile, "indented", "7"},
+ {testGroupFile, "# comment", ""},
+ {"", "emptyfile", ""},
+}
+
+func TestFindGroupName(t *testing.T) {
+ for _, tt := range groupTests {
+ got, err := findGroupName(tt.name, strings.NewReader(tt.in))
+ if tt.gid == "" {
+ if err == nil {
+ t.Errorf("findGroupName(%s): got nil error, expected err", tt.name)
+ continue
+ }
+ switch terr := err.(type) {
+ case UnknownGroupError:
+ if terr.Error() != "group: unknown group "+tt.name {
+ t.Errorf("findGroupName(%s): got %v, want %v", tt.name, terr, tt.name)
+ }
+ default:
+ t.Errorf("findGroupName(%s): got unexpected error %v", tt.name, terr)
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("findGroupName(%s): got unexpected error %v", tt.name, err)
+ }
+ if got.Gid != tt.gid {
+ t.Errorf("findGroupName(%s): got gid %v, want %s", tt.name, got.Gid, tt.gid)
+ }
+ if got.Name != tt.name {
+ t.Errorf("findGroupName(%s): got name %s, want %s", tt.name, got.Name, tt.name)
+ }
+ }
+ }
+}
+
+var groupIdTests = []struct {
+ in string
+ gid string
+ name string
+}{
+ {testGroupFile, "-2", "nobody"},
+ {testGroupFile, "2", "kmem"},
+ {testGroupFile, "notinthefile", ""},
+ {testGroupFile, "comment", ""},
+ {testGroupFile, "7", "indented"},
+ {testGroupFile, "4", ""},
+ {testGroupFile, "20", ""}, // row starts with a plus
+ {testGroupFile, "21", ""}, // row starts with a minus
+ {"", "emptyfile", ""},
+}
+
+func TestFindGroupId(t *testing.T) {
+ for _, tt := range groupIdTests {
+ got, err := findGroupId(tt.gid, strings.NewReader(tt.in))
+ if tt.name == "" {
+ if err == nil {
+ t.Errorf("findGroupId(%s): got nil error, expected err", tt.gid)
+ continue
+ }
+ switch terr := err.(type) {
+ case UnknownGroupIdError:
+ if terr.Error() != "group: unknown groupid "+tt.gid {
+ t.Errorf("findGroupId(%s): got %v, want %v", tt.name, terr, tt.name)
+ }
+ default:
+ t.Errorf("findGroupId(%s): got unexpected error %v", tt.name, terr)
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("findGroupId(%s): got unexpected error %v", tt.name, err)
+ }
+ if got.Gid != tt.gid {
+ t.Errorf("findGroupId(%s): got gid %v, want %s", tt.name, got.Gid, tt.gid)
+ }
+ if got.Name != tt.name {
+ t.Errorf("findGroupId(%s): got name %s, want %s", tt.name, got.Name, tt.name)
+ }
+ }
+ }
+}
+
+const testUserFile = ` # Example user file
+root:x:0:0:root:/root:/bin/bash
+daemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin
+bin:x:2:3:bin:/bin:/usr/sbin/nologin
+ indented:x:3:3:indented:/dev:/usr/sbin/nologin
+sync:x:4:65534:sync:/bin:/bin/sync
+negative:x:-5:60:games:/usr/games:/usr/sbin/nologin
+man:x:6:12:man:/var/cache/man:/usr/sbin/nologin
+allfields:x:6:12:mansplit,man2,man3,man4:/home/allfields:/usr/sbin/nologin
++plussign:x:8:10:man:/var/cache/man:/usr/sbin/nologin
+-minussign:x:9:10:man:/var/cache/man:/usr/sbin/nologin
+
+malformed:x:27:12 # more:colons:after:comment
+
+struid:x:notanumber:12 # more:colons:after:comment
+
+# commented:x:28:12:commented:/var/cache/man:/usr/sbin/nologin
+ # commentindented:x:29:12:commentindented:/var/cache/man:/usr/sbin/nologin
+
+struid2:x:30:badgid:struid2name:/home/struid:/usr/sbin/nologin
+`
+
+var userIdTests = []struct {
+ in string
+ uid string
+ name string
+}{
+ {testUserFile, "-5", "negative"},
+ {testUserFile, "2", "bin"},
+ {testUserFile, "100", ""}, // not in the file
+ {testUserFile, "8", ""}, // plus sign, glibc doesn't find it
+ {testUserFile, "9", ""}, // minus sign, glibc doesn't find it
+ {testUserFile, "27", ""}, // malformed
+ {testUserFile, "28", ""}, // commented out
+ {testUserFile, "29", ""}, // commented out, indented
+ {testUserFile, "3", "indented"},
+ {testUserFile, "30", ""}, // the Gid is not valid, shouldn't match
+ {"", "1", ""},
+}
+
+func TestInvalidUserId(t *testing.T) {
+ _, err := findUserId("notanumber", strings.NewReader(""))
+ if err == nil {
+ t.Fatalf("findUserId('notanumber'): got nil error")
+ }
+ if want := "user: invalid userid notanumber"; err.Error() != want {
+ t.Errorf("findUserId('notanumber'): got %v, want %s", err, want)
+ }
+}
+
+func TestLookupUserId(t *testing.T) {
+ for _, tt := range userIdTests {
+ got, err := findUserId(tt.uid, strings.NewReader(tt.in))
+ if tt.name == "" {
+ if err == nil {
+ t.Errorf("findUserId(%s): got nil error, expected err", tt.uid)
+ continue
+ }
+ switch terr := err.(type) {
+ case UnknownUserIdError:
+ if want := "user: unknown userid " + tt.uid; terr.Error() != want {
+ t.Errorf("findUserId(%s): got %v, want %v", tt.name, terr, want)
+ }
+ default:
+ t.Errorf("findUserId(%s): got unexpected error %v", tt.name, terr)
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("findUserId(%s): got unexpected error %v", tt.name, err)
+ }
+ if got.Uid != tt.uid {
+ t.Errorf("findUserId(%s): got uid %v, want %s", tt.name, got.Uid, tt.uid)
+ }
+ if got.Username != tt.name {
+ t.Errorf("findUserId(%s): got name %s, want %s", tt.name, got.Username, tt.name)
+ }
+ }
+ }
+}
+
+func TestLookupUserPopulatesAllFields(t *testing.T) {
+ u, err := findUsername("allfields", strings.NewReader(testUserFile))
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := &User{
+ Username: "allfields",
+ Uid: "6",
+ Gid: "12",
+ Name: "mansplit",
+ HomeDir: "/home/allfields",
+ }
+ if !reflect.DeepEqual(u, want) {
+ t.Errorf("findUsername: got %#v, want %#v", u, want)
+ }
+}
+
+var userTests = []struct {
+ in string
+ name string
+ uid string
+}{
+ {testUserFile, "negative", "-5"},
+ {testUserFile, "bin", "2"},
+ {testUserFile, "notinthefile", ""},
+ {testUserFile, "indented", "3"},
+ {testUserFile, "plussign", ""},
+ {testUserFile, "+plussign", ""},
+ {testUserFile, "minussign", ""},
+ {testUserFile, "-minussign", ""},
+ {testUserFile, " indented", ""},
+ {testUserFile, "commented", ""},
+ {testUserFile, "commentindented", ""},
+ {testUserFile, "malformed", ""},
+ {testUserFile, "# commented", ""},
+ {"", "emptyfile", ""},
+}
+
+func TestLookupUser(t *testing.T) {
+ for _, tt := range userTests {
+ got, err := findUsername(tt.name, strings.NewReader(tt.in))
+ if tt.uid == "" {
+ if err == nil {
+ t.Errorf("lookupUser(%s): got nil error, expected err", tt.uid)
+ continue
+ }
+ switch terr := err.(type) {
+ case UnknownUserError:
+ if want := "user: unknown user " + tt.name; terr.Error() != want {
+ t.Errorf("lookupUser(%s): got %v, want %v", tt.name, terr, want)
+ }
+ default:
+ t.Errorf("lookupUser(%s): got unexpected error %v", tt.name, terr)
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("lookupUser(%s): got unexpected error %v", tt.name, err)
+ }
+ if got.Uid != tt.uid {
+ t.Errorf("lookupUser(%s): got uid %v, want %s", tt.name, got.Uid, tt.uid)
+ }
+ if got.Username != tt.name {
+ t.Errorf("lookupUser(%s): got name %s, want %s", tt.name, got.Username, tt.name)
+ }
+ }
+ }
+}
diff --git a/libgo/go/os/user/user_test.go b/libgo/go/os/user/user_test.go
index 9d8d94d8dae..b3aeed883cd 100644
--- a/libgo/go/os/user/user_test.go
+++ b/libgo/go/os/user/user_test.go
@@ -16,9 +16,6 @@ func checkUser(t *testing.T) {
}
func TestCurrent(t *testing.T) {
- if runtime.GOOS == "android" {
- t.Skipf("skipping on %s", runtime.GOOS)
- }
u, err := Current()
if err != nil {
t.Fatalf("Current: %v (got %#v)", err, u)
@@ -31,6 +28,12 @@ func TestCurrent(t *testing.T) {
}
}
+func BenchmarkCurrent(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Current()
+ }
+}
+
func compare(t *testing.T, want, got *User) {
if want.Uid != got.Uid {
t.Errorf("got Uid=%q; want %q", got.Uid, want.Uid)
@@ -64,6 +67,9 @@ func TestLookup(t *testing.T) {
if err != nil {
t.Fatalf("Current: %v", err)
}
+ // TODO: Lookup() has a fast path that calls Current() and returns if the
+ // usernames match, so this test does not exercise very much. It would be
+ // good to try and test finding a different user than the current user.
got, err := Lookup(want.Username)
if err != nil {
t.Fatalf("Lookup: %v", err)
diff --git a/libgo/go/os/wait_unimp.go b/libgo/go/os/wait_unimp.go
index 0378b830b76..98243b567f4 100644
--- a/libgo/go/os/wait_unimp.go
+++ b/libgo/go/os/wait_unimp.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build aix dragonfly nacl netbsd openbsd solaris
+// +build aix darwin dragonfly nacl netbsd openbsd solaris
package os
diff --git a/libgo/go/os/wait_waitid.go b/libgo/go/os/wait_waitid.go
index 3337395510e..5a62b27f191 100644
--- a/libgo/go/os/wait_waitid.go
+++ b/libgo/go/os/wait_waitid.go
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin linux
+// We used to used this code for Darwin, but according to issue #19314
+// waitid returns if the process is stopped, even when using WEXITED.
+
+// +build linux
package os
diff --git a/libgo/go/path/example_test.go b/libgo/go/path/example_test.go
index 88b76557f2b..21ed1fb2fcd 100644
--- a/libgo/go/path/example_test.go
+++ b/libgo/go/path/example_test.go
@@ -13,7 +13,12 @@ import (
func ExampleBase() {
fmt.Println(path.Base("/a/b"))
- // Output: b
+ fmt.Println(path.Base("/"))
+ fmt.Println(path.Base(""))
+ // Output:
+ // b
+ // /
+ // .
}
func ExampleClean() {
@@ -24,6 +29,7 @@ func ExampleClean() {
"a/c/b/..",
"/../a/c",
"/../a/b/../././/c",
+ "",
}
for _, p := range paths {
@@ -37,16 +43,29 @@ func ExampleClean() {
// Clean("a/c/b/..") = "a/c"
// Clean("/../a/c") = "/a/c"
// Clean("/../a/b/../././/c") = "/a/c"
+ // Clean("") = "."
}
func ExampleDir() {
fmt.Println(path.Dir("/a/b/c"))
- // Output: /a/b
+ fmt.Println(path.Dir("a/b/c"))
+ fmt.Println(path.Dir("/"))
+ fmt.Println(path.Dir(""))
+ // Output:
+ // /a/b
+ // a/b
+ // /
+ // .
}
func ExampleExt() {
fmt.Println(path.Ext("/a/b/c/bar.css"))
- // Output: .css
+ fmt.Println(path.Ext("/"))
+ fmt.Println(path.Ext(""))
+ // Output:
+ // .css
+ //
+ //
}
func ExampleIsAbs() {
@@ -58,17 +77,26 @@ func ExampleJoin() {
fmt.Println(path.Join("a", "b", "c"))
fmt.Println(path.Join("a", "b/c"))
fmt.Println(path.Join("a/b", "c"))
- fmt.Println(path.Join("a/b", "/c"))
+ fmt.Println(path.Join("", ""))
+ fmt.Println(path.Join("a", ""))
+ fmt.Println(path.Join("", "a"))
// Output:
// a/b/c
// a/b/c
// a/b/c
- // a/b/c
+ //
+ // a
+ // a
}
func ExampleSplit() {
fmt.Println(path.Split("static/myfile.css"))
- // Output: static/ myfile.css
+ fmt.Println(path.Split("myfile.css"))
+ fmt.Println(path.Split(""))
+ // Output:
+ // static/ myfile.css
+ // myfile.css
+ //
}
*/
diff --git a/libgo/go/path/filepath/match_test.go b/libgo/go/path/filepath/match_test.go
index ae7ca1c228f..12d922f83bc 100644
--- a/libgo/go/path/filepath/match_test.go
+++ b/libgo/go/path/filepath/match_test.go
@@ -155,8 +155,8 @@ func TestGlob(t *testing.T) {
}
func TestGlobError(t *testing.T) {
- _, err := Glob("[7]")
- if err != nil {
+ _, err := Glob("[]")
+ if err == nil {
t.Error("expected error for bad pattern; got none")
}
}
diff --git a/libgo/go/path/filepath/path.go b/libgo/go/path/filepath/path.go
index 1d8e35c969e..c242143c7a3 100644
--- a/libgo/go/path/filepath/path.go
+++ b/libgo/go/path/filepath/path.go
@@ -4,6 +4,11 @@
// Package filepath implements utility routines for manipulating filename paths
// in a way compatible with the target operating system-defined file paths.
+//
+// The filepath package uses either forward slashes or backslashes,
+// depending on the operating system. To process paths such as URLs
+// that always use forward slashes regardless of the operating
+// system, see the path package.
package filepath
import (
@@ -461,6 +466,10 @@ func Dir(path string) string {
i--
}
dir := Clean(path[len(vol) : i+1])
+ if dir == "." && len(vol) > 2 {
+ // must be UNC
+ return vol
+ }
return vol + dir
}
diff --git a/libgo/go/path/filepath/path_test.go b/libgo/go/path/filepath/path_test.go
index 7389ea29a48..f2e92528a96 100644
--- a/libgo/go/path/filepath/path_test.go
+++ b/libgo/go/path/filepath/path_test.go
@@ -6,12 +6,14 @@ package filepath_test
import (
"errors"
+ "fmt"
"internal/testenv"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"runtime"
+ "sort"
"strings"
"testing"
)
@@ -389,7 +391,7 @@ func checkMarks(t *testing.T, report bool) {
// Assumes that each node name is unique. Good enough for a test.
// If clear is true, any incoming error is cleared before return. The errors
// are always accumulated, though.
-func mark(path string, info os.FileInfo, err error, errors *[]error, clear bool) error {
+func mark(info os.FileInfo, err error, errors *[]error, clear bool) error {
if err != nil {
*errors = append(*errors, err)
if clear {
@@ -438,7 +440,7 @@ func TestWalk(t *testing.T) {
errors := make([]error, 0, 10)
clear := true
markFn := func(path string, info os.FileInfo, err error) error {
- return mark(path, info, err, &errors, clear)
+ return mark(info, err, &errors, clear)
}
// Expect no errors.
err := filepath.Walk(tree.name, markFn)
@@ -668,6 +670,7 @@ var windirtests = []PathTest{
{`c:\a\b`, `c:\a`},
{`c:a\b`, `c:a`},
{`c:a\b\c`, `c:a\b`},
+ {`\\host\share`, `\\host\share`},
{`\\host\share\`, `\\host\share\`},
{`\\host\share\a`, `\\host\share\`},
{`\\host\share\a\b`, `\\host\share\a`},
@@ -1330,3 +1333,53 @@ func TestBug3486(t *testing.T) { // https://golang.org/issue/3486
t.Fatalf("%q not seen", ken)
}
}
+
+func testWalkSymlink(t *testing.T, mklink func(target, link string) error) {
+ tmpdir, err := ioutil.TempDir("", "testWalkSymlink")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Chdir(wd)
+
+ err = os.Chdir(tmpdir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = mklink(tmpdir, "link")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var visited []string
+ err = filepath.Walk(tmpdir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ t.Fatal(err)
+ }
+ rel, err := filepath.Rel(tmpdir, path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ visited = append(visited, rel)
+ return nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ sort.Strings(visited)
+ want := []string{".", "link"}
+ if fmt.Sprintf("%q", visited) != fmt.Sprintf("%q", want) {
+ t.Errorf("unexpected paths visited %q, want %q", visited, want)
+ }
+}
+
+func TestWalkSymlink(t *testing.T) {
+ testenv.MustHaveSymlink(t)
+ testWalkSymlink(t, os.Symlink)
+}
diff --git a/libgo/go/path/path.go b/libgo/go/path/path.go
index 76c7814c59d..5c905110a1b 100644
--- a/libgo/go/path/path.go
+++ b/libgo/go/path/path.go
@@ -5,7 +5,10 @@
// Package path implements utility routines for manipulating slash-separated
// paths.
//
-// To manipulate operating system paths, use the path/filepath package.
+// The path package should only be used for paths separated by forward
+// slashes, such as the paths in URLs. This package does not deal with
+// Windows paths with drive letters or backslashes; to manipulate
+// operating system paths, use the path/filepath package.
package path
import (
diff --git a/libgo/go/plugin/plugin.go b/libgo/go/plugin/plugin.go
index b86099a4f6f..c7744658122 100644
--- a/libgo/go/plugin/plugin.go
+++ b/libgo/go/plugin/plugin.go
@@ -4,8 +4,6 @@
// Package plugin implements loading and symbol resolution of Go plugins.
//
-// Currently plugins only work on Linux.
-//
// A plugin is a Go main package with exported functions and variables that
// has been built with:
//
@@ -14,6 +12,9 @@
// When a plugin is first opened, the init functions of all packages not
// already part of the program are called. The main function is not run.
// A plugin is only initialized once, and cannot be closed.
+//
+// The plugin support is currently incomplete, only supports Linux,
+// and has known bugs. Please report any issues.
package plugin
// Plugin is a loaded Go plugin.
@@ -44,9 +45,6 @@ func (p *Plugin) Lookup(symName string) (Symbol, error) {
//
// package main
//
-// // // No C code needed.
-// import "C"
-//
// import "fmt"
//
// var V int
diff --git a/libgo/go/plugin/plugin_dlopen.go b/libgo/go/plugin/plugin_dlopen.go
index c5b0a4721c5..3237598f06b 100644
--- a/libgo/go/plugin/plugin_dlopen.go
+++ b/libgo/go/plugin/plugin_dlopen.go
@@ -39,6 +39,47 @@ import (
"unsafe"
)
+// avoid a dependency on strings
+func lastIndexByte(s string, c byte) int {
+ for i := len(s) - 1; i >= 0; i-- {
+ if s[i] == c {
+ return i
+ }
+ }
+ return -1
+}
+
+// pathToPrefix converts raw string to the prefix that will be used in the symbol
+// table. If modifying, modify the version in internal/obj/sym.go as well.
+func pathToPrefix(s string) string {
+ slash := lastIndexByte(s, '/')
+ // check for chars that need escaping
+ n := 0
+ for r := 0; r < len(s); r++ {
+ if c := s[r]; c <= ' ' || (c == '.' && r > slash) || c == '%' || c == '"' || c >= 0x7F {
+ n++
+ }
+ }
+
+ // quick exit
+ if n == 0 {
+ return s
+ }
+
+ // escape
+ const hex = "0123456789abcdef"
+ p := make([]byte, 0, len(s)+2*n)
+ for r := 0; r < len(s); r++ {
+ if c := s[r]; c <= ' ' || (c == '.' && r > slash) || c == '%' || c == '"' || c >= 0x7F {
+ p = append(p, '%', hex[c>>4], hex[c&0xF])
+ } else {
+ p = append(p, c)
+ }
+ }
+
+ return string(p)
+}
+
func open(name string) (*Plugin, error) {
cPath := (*C.char)(C.malloc(C.PATH_MAX + 1))
defer C.free(unsafe.Pointer(cPath))
@@ -82,7 +123,6 @@ func open(name string) (*Plugin, error) {
p := &Plugin{
pluginpath: pluginpath,
loaded: make(chan struct{}),
- syms: syms,
}
plugins[filepath] = p
pluginsMu.Unlock()
@@ -97,14 +137,14 @@ func open(name string) (*Plugin, error) {
}
// Fill out the value of each plugin symbol.
+ updatedSyms := map[string]interface{}{}
for symName, sym := range syms {
isFunc := symName[0] == '.'
if isFunc {
delete(syms, symName)
symName = symName[1:]
}
-
- cname := C.CString(pluginpath + "." + symName)
+ cname := C.CString(pathToPrefix(pluginpath) + "." + symName)
p := C.pluginLookup(h, cname, &cErr)
C.free(unsafe.Pointer(cname))
if p == nil {
@@ -116,8 +156,12 @@ func open(name string) (*Plugin, error) {
} else {
(*valp)[1] = p
}
- syms[symName] = sym
+ // we can't add to syms during iteration as we'll end up processing
+ // some symbols twice with the inability to tell if the symbol is a function
+ updatedSyms[symName] = sym
}
+ p.syms = updatedSyms
+
close(p.loaded)
return p, nil
}
diff --git a/libgo/go/reflect/all_test.go b/libgo/go/reflect/all_test.go
index 6ac33526774..736467319ee 100644
--- a/libgo/go/reflect/all_test.go
+++ b/libgo/go/reflect/all_test.go
@@ -1576,9 +1576,11 @@ func BenchmarkCallArgCopy(b *testing.B) {
args := []Value{size.arg}
b.SetBytes(int64(size.arg.Len()))
b.ResetTimer()
- for i := 0; i < b.N; i++ {
- size.fv.Call(args)
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ size.fv.Call(args)
+ }
+ })
}
name := fmt.Sprintf("size=%v", size.arg.Len())
b.Run(name, bench)
@@ -2559,6 +2561,28 @@ func TestPtrToGC(t *testing.T) {
}
}
+func BenchmarkPtrTo(b *testing.B) {
+ // Construct a type with a zero ptrToThis.
+ type T struct{ int }
+ t := SliceOf(TypeOf(T{}))
+ ptrToThis := ValueOf(t).Elem().FieldByName("ptrToThis")
+ if !ptrToThis.IsValid() {
+ b.Fatalf("%v has no ptrToThis field; was it removed from rtype?", t)
+ }
+ if ptrToThis.Int() != 0 {
+ b.Fatalf("%v.ptrToThis unexpectedly nonzero", t)
+ }
+ b.ResetTimer()
+
+ // Now benchmark calling PtrTo on it: we'll have to hit the ptrMap cache on
+ // every call.
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ PtrTo(t)
+ }
+ })
+}
+
func TestAddr(t *testing.T) {
var p struct {
X, Y int
@@ -3738,7 +3762,7 @@ func checkSameType(t *testing.T, x, y interface{}) {
func TestArrayOf(t *testing.T) {
// check construction and use of type not in binary
- for _, table := range []struct {
+ tests := []struct {
n int
value func(i int) interface{}
comparable bool
@@ -3816,7 +3840,9 @@ func TestArrayOf(t *testing.T) {
comparable: true,
want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]",
},
- } {
+ }
+
+ for _, table := range tests {
at := ArrayOf(table.n, TypeOf(table.value(0)))
v := New(at).Elem()
vok := New(at).Elem()
@@ -4045,6 +4071,54 @@ func TestSliceOfGC(t *testing.T) {
}
}
+func TestStructOfFieldName(t *testing.T) {
+ // invalid field name "1nvalid"
+ shouldPanic(func() {
+ StructOf([]StructField{
+ StructField{Name: "valid", Type: TypeOf("")},
+ StructField{Name: "1nvalid", Type: TypeOf("")},
+ })
+ })
+
+ // invalid field name "+"
+ shouldPanic(func() {
+ StructOf([]StructField{
+ StructField{Name: "val1d", Type: TypeOf("")},
+ StructField{Name: "+", Type: TypeOf("")},
+ })
+ })
+
+ // no field name
+ shouldPanic(func() {
+ StructOf([]StructField{
+ StructField{Name: "", Type: TypeOf("")},
+ })
+ })
+
+ // verify creation of a struct with valid struct fields
+ validFields := []StructField{
+ StructField{
+ Name: "Ï",
+ Type: TypeOf(""),
+ },
+ StructField{
+ Name: "ValidName",
+ Type: TypeOf(""),
+ },
+ StructField{
+ Name: "Val1dNam5",
+ Type: TypeOf(""),
+ },
+ }
+
+ validStruct := StructOf(validFields)
+
+ const structStr = `struct { Ï string; ValidName string; Val1dNam5 string }`
+ if got, want := validStruct.String(), structStr; got != want {
+ t.Errorf("StructOf(validFields).String()=%q, want %q", got, want)
+ }
+}
+
func TestStructOf(t *testing.T) {
// check construction and use of type not in binary
fields := []StructField{
@@ -4392,7 +4466,7 @@ func TestStructOfGenericAlg(t *testing.T) {
{Name: "S1", Type: st1},
})
- for _, table := range []struct {
+ tests := []struct {
rt Type
idx []int
}{
@@ -4473,7 +4547,9 @@ func TestStructOfGenericAlg(t *testing.T) {
),
idx: []int{2},
},
- } {
+ }
+
+ for _, table := range tests {
v1 := New(table.rt).Elem()
v2 := New(table.rt).Elem()
@@ -4582,18 +4658,21 @@ func TestStructOfWithInterface(t *testing.T) {
type Iface interface {
Get() int
}
- for i, table := range []struct {
+ tests := []struct {
+ name string
typ Type
val Value
impl bool
}{
{
+ name: "StructI",
typ: TypeOf(StructI(want)),
val: ValueOf(StructI(want)),
impl: true,
},
{
- typ: PtrTo(TypeOf(StructI(want))),
+ name: "StructI",
+ typ: PtrTo(TypeOf(StructI(want))),
val: ValueOf(func() interface{} {
v := StructI(want)
return &v
@@ -4601,7 +4680,8 @@ func TestStructOfWithInterface(t *testing.T) {
impl: true,
},
{
- typ: PtrTo(TypeOf(StructIPtr(want))),
+ name: "StructIPtr",
+ typ: PtrTo(TypeOf(StructIPtr(want))),
val: ValueOf(func() interface{} {
v := StructIPtr(want)
return &v
@@ -4609,6 +4689,7 @@ func TestStructOfWithInterface(t *testing.T) {
impl: true,
},
{
+ name: "StructIPtr",
typ: TypeOf(StructIPtr(want)),
val: ValueOf(StructIPtr(want)),
impl: false,
@@ -4618,41 +4699,70 @@ func TestStructOfWithInterface(t *testing.T) {
// val: ValueOf(StructI(want)),
// impl: true,
// },
- } {
- rt := StructOf(
- []StructField{
- {
- Name: "",
+ }
+
+ for i, table := range tests {
+ for j := 0; j < 2; j++ {
+ var fields []StructField
+ if j == 1 {
+ fields = append(fields, StructField{
+ Name: "Dummy",
PkgPath: "",
- Type: table.typ,
- },
- },
- )
- rv := New(rt).Elem()
- rv.Field(0).Set(table.val)
+ Type: TypeOf(int(0)),
+ })
+ }
+ fields = append(fields, StructField{
+ Name: table.name,
+ Anonymous: true,
+ PkgPath: "",
+ Type: table.typ,
+ })
- if _, ok := rv.Interface().(Iface); ok != table.impl {
- if table.impl {
- t.Errorf("test-%d: type=%v fails to implement Iface.\n", i, table.typ)
- } else {
- t.Errorf("test-%d: type=%v should NOT implement Iface\n", i, table.typ)
+ // We currently do not correctly implement methods
+ // for anonymous fields other than the first.
+ // Therefore, for now, we expect those methods
+ // to not exist. See issues 15924 and 20824.
+ // When those issues are fixed, this test of panic
+ // should be removed.
+ if j == 1 && table.impl {
+ func() {
+ defer func() {
+ if err := recover(); err == nil {
+ t.Errorf("test-%d-%d did not panic", i, j)
+ }
+ }()
+ _ = StructOf(fields)
+ }()
+ continue
}
- continue
- }
- if !table.impl {
- continue
- }
+ rt := StructOf(fields)
+ rv := New(rt).Elem()
+ rv.Field(j).Set(table.val)
- v := rv.Interface().(Iface).Get()
- if v != want {
- t.Errorf("test-%d: x.Get()=%v. want=%v\n", i, v, want)
- }
+ if _, ok := rv.Interface().(Iface); ok != table.impl {
+ if table.impl {
+ t.Errorf("test-%d-%d: type=%v fails to implement Iface.\n", i, j, table.typ)
+ } else {
+ t.Errorf("test-%d-%d: type=%v should NOT implement Iface\n", i, j, table.typ)
+ }
+ continue
+ }
- fct := rv.MethodByName("Get")
- out := fct.Call(nil)
- if !DeepEqual(out[0].Interface(), want) {
- t.Errorf("test-%d: x.Get()=%v. want=%v\n", i, out[0].Interface(), want)
+ if !table.impl {
+ continue
+ }
+
+ v := rv.Interface().(Iface).Get()
+ if v != want {
+ t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, v, want)
+ }
+
+ fct := rv.MethodByName("Get")
+ out := fct.Call(nil)
+ if !DeepEqual(out[0].Interface(), want) {
+ t.Errorf("test-%d-%d: x.Get()=%v. want=%v\n", i, j, out[0].Interface(), want)
+ }
}
}
}
@@ -4914,16 +5024,20 @@ type B1 struct {
func BenchmarkFieldByName1(b *testing.B) {
t := TypeOf(B1{})
- for i := 0; i < b.N; i++ {
- t.FieldByName("Z")
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("Z")
+ }
+ })
}
func BenchmarkFieldByName2(b *testing.B) {
t := TypeOf(S3{})
- for i := 0; i < b.N; i++ {
- t.FieldByName("B")
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("B")
+ }
+ })
}
type R0 struct {
@@ -5006,9 +5120,11 @@ func TestEmbed(t *testing.T) {
func BenchmarkFieldByName3(b *testing.B) {
t := TypeOf(R0{})
- for i := 0; i < b.N; i++ {
- t.FieldByName("X")
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ t.FieldByName("X")
+ }
+ })
}
type S struct {
@@ -5018,9 +5134,11 @@ type S struct {
func BenchmarkInterfaceBig(b *testing.B) {
v := ValueOf(S{})
- for i := 0; i < b.N; i++ {
- v.Interface()
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ v.Interface()
+ }
+ })
b.StopTimer()
}
@@ -5036,9 +5154,11 @@ func TestAllocsInterfaceBig(t *testing.T) {
func BenchmarkInterfaceSmall(b *testing.B) {
v := ValueOf(int64(0))
- for i := 0; i < b.N; i++ {
- v.Interface()
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ v.Interface()
+ }
+ })
}
func TestAllocsInterfaceSmall(t *testing.T) {
@@ -5835,7 +5955,7 @@ func TestTypeOfTypeOf(t *testing.T) {
check("SliceOf", SliceOf(TypeOf(T{})))
}
-type XM struct{}
+type XM struct{ _ bool }
func (*XM) String() string { return "" }
@@ -5861,6 +5981,24 @@ func TestMapAlloc(t *testing.T) {
if allocs > 0.5 {
t.Errorf("allocs per map assignment: want 0 got %f", allocs)
}
+
+ const size = 1000
+ tmp := 0
+ val := ValueOf(&tmp).Elem()
+ allocs = testing.AllocsPerRun(100, func() {
+ mv := MakeMapWithSize(TypeOf(map[int]int{}), size)
+ // Only adding half of the capacity to not trigger re-allocations due too many overloaded buckets.
+ for i := 0; i < size/2; i++ {
+ val.SetInt(int64(i))
+ mv.SetMapIndex(val, val)
+ }
+ })
+ if allocs > 10 {
+ t.Errorf("allocs per map assignment: want at most 10 got %f", allocs)
+ }
+ // Empirical testing shows that with capacity hint single run will trigger 3 allocations and without 91. I set
+ // the threshold to 10, to not make it overly brittle if something changes in the initial allocation of the
+ // map, but to still catch a regression where we keep re-allocating in the hashmap as new entries are added.
}
func TestChanAlloc(t *testing.T) {
@@ -5984,6 +6122,8 @@ func TestTypeStrings(t *testing.T) {
{TypeOf(new(XM)).Method(0).Type, "func(*reflect_test.XM) string"},
{ChanOf(3, TypeOf(XM{})), "chan reflect_test.XM"},
{MapOf(TypeOf(int(0)), TypeOf(XM{})), "map[int]reflect_test.XM"},
+ {ArrayOf(3, TypeOf(XM{})), "[3]reflect_test.XM"},
+ {ArrayOf(3, TypeOf(struct{}{})), "[3]struct {}"},
}
for i, test := range stringTests {
@@ -6014,9 +6154,11 @@ func TestOffsetLock(t *testing.T) {
func BenchmarkNew(b *testing.B) {
v := TypeOf(XM{})
- for i := 0; i < b.N; i++ {
- New(v)
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ New(v)
+ }
+ })
}
func TestSwapper(t *testing.T) {
@@ -6091,6 +6233,7 @@ func TestSwapper(t *testing.T) {
want: []pairPtr{{5, 6, &c}, {3, 4, &b}, {1, 2, &a}},
},
}
+
for i, tt := range tests {
inStr := fmt.Sprint(tt.in)
Swapper(tt.in)(tt.i, tt.j)
@@ -6116,3 +6259,36 @@ func TestUnaddressableField(t *testing.T) {
lv.Set(rv)
})
}
+
+type Tint int
+
+type Tint2 = Tint
+
+type Talias1 struct {
+ byte
+ uint8
+ int
+ int32
+ rune
+}
+
+type Talias2 struct {
+ Tint
+ Tint2
+}
+
+func TestAliasNames(t *testing.T) {
+ t1 := Talias1{byte: 1, uint8: 2, int: 3, int32: 4, rune: 5}
+ out := fmt.Sprintf("%#v", t1)
+ want := "reflect_test.Talias1{byte:0x1, uint8:0x2, int:3, int32:4, rune:5}"
+ if out != want {
+ t.Errorf("Talias1 print:\nhave: %s\nwant: %s", out, want)
+ }
+
+ t2 := Talias2{Tint: 1, Tint2: 2}
+ out = fmt.Sprintf("%#v", t2)
+ want = "reflect_test.Talias2{Tint:1, Tint2:2}"
+ if out != want {
+ t.Errorf("Talias2 print:\nhave: %s\nwant: %s", out, want)
+ }
+}
diff --git a/libgo/go/reflect/deepequal.go b/libgo/go/reflect/deepequal.go
index f3fd7043e5a..2fdd6a3d82b 100644
--- a/libgo/go/reflect/deepequal.go
+++ b/libgo/go/reflect/deepequal.go
@@ -178,6 +178,12 @@ func deepValueEqual(v1, v2 Value, visited map[visit]bool, depth int) bool {
// DeepEqual has been defined so that the same short-cut applies
// to slices and maps: if x and y are the same slice or the same map,
// they are deeply equal regardless of content.
+//
+// As DeepEqual traverses the data values it may find a cycle. The
+// second and subsequent times that DeepEqual compares two pointer
+// values that have been compared before, it treats the values as
+// equal rather than examining the values to which they point.
+// This ensures that DeepEqual terminates.
func DeepEqual(x, y interface{}) bool {
if x == nil || y == nil {
return x == y
diff --git a/libgo/go/reflect/set_test.go b/libgo/go/reflect/set_test.go
index bc35c78e1bb..7c39623a9db 100644
--- a/libgo/go/reflect/set_test.go
+++ b/libgo/go/reflect/set_test.go
@@ -7,6 +7,7 @@ package reflect_test
import (
"bytes"
"go/ast"
+ "go/token"
"io"
. "reflect"
"testing"
@@ -172,6 +173,23 @@ var implementsTests = []struct {
{new(bytes.Buffer), new(io.Reader), false},
{new(*bytes.Buffer), new(io.ReaderAt), false},
{new(*ast.Ident), new(ast.Expr), true},
+ {new(*notAnExpr), new(ast.Expr), false},
+ {new(*ast.Ident), new(notASTExpr), false},
+ {new(notASTExpr), new(ast.Expr), false},
+ {new(ast.Expr), new(notASTExpr), false},
+ {new(*notAnExpr), new(notASTExpr), true},
+}
+
+type notAnExpr struct{}
+
+func (notAnExpr) Pos() token.Pos { return token.NoPos }
+func (notAnExpr) End() token.Pos { return token.NoPos }
+func (notAnExpr) exprNode() {}
+
+type notASTExpr interface {
+ Pos() token.Pos
+ End() token.Pos
+ exprNode()
}
func TestImplements(t *testing.T) {
diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go
index 97b986a7bba..664d9717a06 100644
--- a/libgo/go/reflect/type.go
+++ b/libgo/go/reflect/type.go
@@ -18,6 +18,8 @@ package reflect
import (
"strconv"
"sync"
+ "unicode"
+ "unicode/utf8"
"unsafe"
)
@@ -258,6 +260,8 @@ const (
// It is embedded in other, public struct types, but always
// with a unique tag like `reflect:"array"` or `reflect:"ptr"`
// so that code cannot convert from, say, *arrayType to *ptrType.
+//
+// rtype must be kept in sync with ../runtime/type.go:/^type._type.
type rtype struct {
size uintptr
ptrdata uintptr // size of memory prefix holding all pointers
@@ -516,79 +520,52 @@ func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
func (t *rtype) common() *rtype { return t }
-func (t *uncommonType) Method(i int) (m Method) {
- if t == nil || i < 0 || i >= len(t.methods) {
- panic("reflect: Method index out of range")
- }
- found := false
- for mi := range t.methods {
- if t.methods[mi].pkgPath == nil {
- if i == 0 {
- i = mi
- found = true
- break
- }
- i--
- }
- }
- if !found {
- panic("reflect: Method index out of range")
- }
+var methodCache sync.Map // map[*rtype][]method
- p := &t.methods[i]
- if p.name != nil {
- m.Name = *p.name
- }
- fl := flag(Func)
- if p.pkgPath != nil {
- m.PkgPath = *p.pkgPath
- fl |= flagStickyRO
+func (t *rtype) exportedMethods() []method {
+ methodsi, found := methodCache.Load(t)
+ if found {
+ return methodsi.([]method)
}
- mt := p.typ
- m.Type = toType(mt)
- x := new(unsafe.Pointer)
- *x = unsafe.Pointer(&p.tfn)
- m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
- m.Index = i
- return
-}
-func (t *uncommonType) NumMethod() int {
- if t == nil {
- return 0
+ ut := t.uncommon()
+ if ut == nil {
+ return nil
}
- c := 0
- for i := range t.methods {
- if t.methods[i].pkgPath == nil {
- c++
+ allm := ut.methods
+ allExported := true
+ for _, m := range allm {
+ if m.pkgPath != nil {
+ allExported = false
+ break
}
}
- return c
-}
-
-func (t *uncommonType) MethodByName(name string) (m Method, ok bool) {
- if t == nil {
- return
- }
- var p *method
- for i := range t.methods {
- p = &t.methods[i]
- if p.pkgPath == nil && p.name != nil && *p.name == name {
- return t.Method(i), true
+ var methods []method
+ if allExported {
+ methods = allm
+ } else {
+ methods = make([]method, 0, len(allm))
+ for _, m := range allm {
+ if m.pkgPath == nil {
+ methods = append(methods, m)
+ }
}
+ methods = methods[:len(methods):len(methods)]
}
- return
+
+ methodsi, _ = methodCache.LoadOrStore(t, methods)
+ return methodsi.([]method)
}
-// TODO(rsc): gc supplies these, but they are not
-// as efficient as they could be: they have commonType
-// as the receiver instead of *rtype.
func (t *rtype) NumMethod() int {
if t.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.NumMethod()
}
- return t.uncommonType.NumMethod()
+ if t.uncommonType == nil {
+ return 0 // avoid methodCache synchronization
+ }
+ return len(t.exportedMethods())
}
func (t *rtype) Method(i int) (m Method) {
@@ -596,7 +573,22 @@ func (t *rtype) Method(i int) (m Method) {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.Method(i)
}
- return t.uncommonType.Method(i)
+ methods := t.exportedMethods()
+ if i < 0 || i >= len(methods) {
+ panic("reflect: Method index out of range")
+ }
+ p := methods[i]
+ if p.name != nil {
+ m.Name = *p.name
+ }
+ fl := flag(Func)
+ mt := p.typ
+ m.Type = toType(mt)
+ x := new(unsafe.Pointer)
+ *x = unsafe.Pointer(&p.tfn)
+ m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
+ m.Index = i
+ return m
}
func (t *rtype) MethodByName(name string) (m Method, ok bool) {
@@ -604,7 +596,17 @@ func (t *rtype) MethodByName(name string) (m Method, ok bool) {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.MethodByName(name)
}
- return t.uncommonType.MethodByName(name)
+ ut := t.uncommon()
+ if ut == nil {
+ return Method{}, false
+ }
+ for i := range ut.methods {
+ p := &ut.methods[i]
+ if p.pkgPath == nil && p.name != nil && *p.name == name {
+ return t.Method(i), true
+ }
+ }
+ return Method{}, false
}
func (t *rtype) PkgPath() string {
@@ -983,12 +985,11 @@ func (t *structType) FieldByNameFunc(match func(string) bool) (result StructFiel
visited[t] = true
for i := range t.fields {
f := &t.fields[i]
- // Find name and type for field f.
+ // Find name and (for anonymous field) type for field f.
fname := *f.name
var ntyp *rtype
if f.anon() {
// Anonymous field of type T or *T.
- // Name taken from type.
ntyp = f.typ
if ntyp.Kind() == Ptr {
ntyp = ntyp.Elem().common()
@@ -1072,10 +1073,7 @@ func TypeOf(i interface{}) Type {
}
// ptrMap is the cache for PtrTo.
-var ptrMap struct {
- sync.RWMutex
- m map[*rtype]*ptrType
-}
+var ptrMap sync.Map // map[*rtype]*ptrType
// PtrTo returns the pointer type with element t.
// For example, if t represents type Foo, PtrTo(t) represents *Foo.
@@ -1089,24 +1087,8 @@ func (t *rtype) ptrTo() *rtype {
}
// Check the cache.
- ptrMap.RLock()
- if m := ptrMap.m; m != nil {
- if p := m[t]; p != nil {
- ptrMap.RUnlock()
- return &p.rtype
- }
- }
- ptrMap.RUnlock()
-
- ptrMap.Lock()
- if ptrMap.m == nil {
- ptrMap.m = make(map[*rtype]*ptrType)
- }
- p := ptrMap.m[t]
- if p != nil {
- // some other goroutine won the race and created it
- ptrMap.Unlock()
- return &p.rtype
+ if pi, ok := ptrMap.Load(t); ok {
+ return &pi.(*ptrType).rtype
}
s := "*" + *t.string
@@ -1115,9 +1097,9 @@ func (t *rtype) ptrTo() *rtype {
r, ok := canonicalType[s]
canonicalTypeLock.RUnlock()
if ok {
- ptrMap.m[t] = (*ptrType)(unsafe.Pointer(r.(*rtype)))
- ptrMap.Unlock()
- return r.(*rtype)
+ p := (*ptrType)(unsafe.Pointer(r.(*rtype)))
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
}
// Create a new ptrType starting with the description
@@ -1143,11 +1125,10 @@ func (t *rtype) ptrTo() *rtype {
pp.elem = t
q := canonicalize(&pp.rtype)
- p = (*ptrType)(unsafe.Pointer(q.(*rtype)))
+ p := (*ptrType)(unsafe.Pointer(q.(*rtype)))
- ptrMap.m[t] = p
- ptrMap.Unlock()
- return &p.rtype
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
}
// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
@@ -1396,11 +1377,8 @@ func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
return false
}
-// The lookupCache caches ChanOf, MapOf, and SliceOf lookups.
-var lookupCache struct {
- sync.RWMutex
- m map[cacheKey]*rtype
-}
+// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
+var lookupCache sync.Map // map[cacheKey]*rtype
// A cacheKey is the key for use in the lookupCache.
// Four values describe any of the types we are looking for:
@@ -1412,48 +1390,15 @@ type cacheKey struct {
extra uintptr
}
-// cacheGet looks for a type under the key k in the lookupCache.
-// If it finds one, it returns that type.
-// If not, it returns nil with the cache locked.
-// The caller is expected to use cachePut to unlock the cache.
-func cacheGet(k cacheKey) Type {
- lookupCache.RLock()
- t := lookupCache.m[k]
- lookupCache.RUnlock()
- if t != nil {
- return t
- }
-
- lookupCache.Lock()
- t = lookupCache.m[k]
- if t != nil {
- lookupCache.Unlock()
- return t
- }
-
- if lookupCache.m == nil {
- lookupCache.m = make(map[cacheKey]*rtype)
- }
-
- return nil
-}
-
-// cachePut stores the given type in the cache, unlocks the cache,
-// and returns the type. It is expected that the cache is locked
-// because cacheGet returned nil.
-func cachePut(k cacheKey, t *rtype) Type {
- t = toType(t).common()
- lookupCache.m[k] = t
- lookupCache.Unlock()
- return t
-}
-
// The funcLookupCache caches FuncOf lookups.
// FuncOf does not share the common lookupCache since cacheKey is not
// sufficient to represent functions unambiguously.
var funcLookupCache struct {
- sync.RWMutex
- m map[uint32][]*rtype // keyed by hash calculated in FuncOf
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
+ // Elements of m are append-only and thus safe for concurrent reading.
+ m sync.Map
}
// ChanOf returns the channel type with the given direction and element type.
@@ -1466,13 +1411,12 @@ func ChanOf(dir ChanDir, t Type) Type {
// Look in cache.
ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
- if ch := cacheGet(ckey); ch != nil {
- return ch
+ if ch, ok := lookupCache.Load(ckey); ok {
+ return ch.(*rtype)
}
// This restriction is imposed by the gc compiler and the runtime.
if typ.size >= 1<<16 {
- lookupCache.Unlock()
panic("reflect.ChanOf: element size too large")
}
@@ -1481,7 +1425,6 @@ func ChanOf(dir ChanDir, t Type) Type {
var s string
switch dir {
default:
- lookupCache.Unlock()
panic("reflect.ChanOf: invalid dir")
case SendDir:
s = "chan<- " + *typ.string
@@ -1515,7 +1458,8 @@ func ChanOf(dir ChanDir, t Type) Type {
ch.uncommonType = nil
ch.ptrToThis = nil
- return cachePut(ckey, &ch.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
+ return ti.(Type)
}
func ismapkey(*rtype) bool // implemented in runtime
@@ -1536,8 +1480,8 @@ func MapOf(key, elem Type) Type {
// Look in cache.
ckey := cacheKey{Map, ktyp, etyp, 0}
- if mt := cacheGet(ckey); mt != nil {
- return mt
+ if mt, ok := lookupCache.Load(ckey); ok {
+ return mt.(Type)
}
// Look in known types.
@@ -1576,7 +1520,8 @@ func MapOf(key, elem Type) Type {
mt.reflexivekey = isReflexive(ktyp)
mt.needkeyupdate = needKeyUpdate(ktyp)
- return cachePut(ckey, &mt.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
+ return ti.(Type)
}
// FuncOf returns the function type with the given argument and result types.
@@ -1625,25 +1570,32 @@ func FuncOf(in, out []Type, variadic bool) Type {
ft.dotdotdot = variadic
// Look in cache.
- funcLookupCache.RLock()
- for _, t := range funcLookupCache.m[hash] {
- if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
- funcLookupCache.RUnlock()
- return t
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
}
}
- funcLookupCache.RUnlock()
// Not in cache, lock and retry.
funcLookupCache.Lock()
defer funcLookupCache.Unlock()
- if funcLookupCache.m == nil {
- funcLookupCache.m = make(map[uint32][]*rtype)
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
+ }
}
- for _, t := range funcLookupCache.m[hash] {
- if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
- return t
+
+ addToCache := func(tt *rtype) Type {
+ var rts []*rtype
+ if rti, ok := funcLookupCache.m.Load(hash); ok {
+ rts = rti.([]*rtype)
}
+ funcLookupCache.m.Store(hash, append(rts, tt))
+ return tt
}
str := funcStr(ft)
@@ -1652,10 +1604,7 @@ func FuncOf(in, out []Type, variadic bool) Type {
ft.string = &str
ft.uncommonType = nil
ft.ptrToThis = nil
-
- funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)
-
- return toType(&ft.rtype)
+ return addToCache(&ft.rtype)
}
// funcStr builds a string representation of a funcType.
@@ -1771,9 +1720,6 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
// Prepare GC data if any.
// A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
// or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
- // Normally the enforced limit on pointer maps is 16 bytes,
- // but larger ones are acceptable, 33 bytes isn't too too big,
- // and it's easier to generate a pointer bitmap than a GC program.
// Note that since the key and value are known to be <= 128 bytes,
// they're guaranteed to have bitmaps instead of GC programs.
var gcdata *byte
@@ -1812,7 +1758,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
panic("reflect: unexpected GC program in MapOf")
}
kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata))
- for i := uintptr(0); i < ktyp.size/ptrSize; i++ {
+ for i := uintptr(0); i < ktyp.ptrdata/ptrSize; i++ {
if (kmask[i/8]>>(i%8))&1 != 0 {
for j := uintptr(0); j < bucketSize; j++ {
word := base + j*ktyp.size/ptrSize + i
@@ -1830,7 +1776,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
panic("reflect: unexpected GC program in MapOf")
}
emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata))
- for i := uintptr(0); i < etyp.size/ptrSize; i++ {
+ for i := uintptr(0); i < etyp.ptrdata/ptrSize; i++ {
if (emask[i/8]>>(i%8))&1 != 0 {
for j := uintptr(0); j < bucketSize; j++ {
word := base + j*etyp.size/ptrSize + i
@@ -1871,8 +1817,8 @@ func SliceOf(t Type) Type {
// Look in cache.
ckey := cacheKey{Slice, typ, nil, 0}
- if slice := cacheGet(ckey); slice != nil {
- return slice
+ if slice, ok := lookupCache.Load(ckey); ok {
+ return slice.(Type)
}
// Look in known types.
@@ -1892,17 +1838,44 @@ func SliceOf(t Type) Type {
slice.uncommonType = nil
slice.ptrToThis = nil
- return cachePut(ckey, &slice.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
+ return ti.(Type)
}
// The structLookupCache caches StructOf lookups.
// StructOf does not share the common lookupCache since we need to pin
// the memory associated with *structTypeFixedN.
var structLookupCache struct {
- sync.RWMutex
- m map[uint32][]interface {
- common() *rtype
- } // keyed by hash calculated in StructOf
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
+ // Elements in m are append-only and thus safe for concurrent reading.
+ m sync.Map
+}
+
+// isLetter returns true if a given 'rune' is classified as a Letter.
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+// isValidFieldName checks if a string is a valid (struct) field name or not.
+//
+// According to the language spec, a field name should be an identifier.
+//
+// identifier = letter { letter | unicode_digit } .
+// letter = unicode_letter | "_" .
+func isValidFieldName(fieldName string) bool {
+ for i, c := range fieldName {
+ if i == 0 && !isLetter(c) {
+ return false
+ }
+
+ if !(isLetter(c) || unicode.IsDigit(c)) {
+ return false
+ }
+ }
+
+ return len(fieldName) > 0
}
// StructOf returns the struct type containing fields.
@@ -1930,6 +1903,12 @@ func StructOf(fields []StructField) Type {
lastzero := uintptr(0)
repr = append(repr, "struct {"...)
for i, field := range fields {
+ if field.Name == "" {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
+ }
+ if !isValidFieldName(field.Name) {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
+ }
if field.Type == nil {
panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
}
@@ -1960,30 +1939,29 @@ func StructOf(fields []StructField) Type {
} else {
name = ft.String()
}
- // TODO(sbinet) check for syntactically impossible type names?
switch f.typ.Kind() {
case Interface:
ift := (*interfaceType)(unsafe.Pointer(ft))
if len(ift.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
case Ptr:
ptr := (*ptrType)(unsafe.Pointer(ft))
if unt := ptr.uncommon(); unt != nil {
if len(unt.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
}
if unt := ptr.elem.uncommon(); unt != nil {
if len(unt.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
}
default:
if unt := ft.uncommon(); unt != nil {
if len(unt.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
}
}
@@ -2044,30 +2022,35 @@ func StructOf(fields []StructField) Type {
*typ = *prototype
typ.fields = fs
- // Look in cache
- structLookupCache.RLock()
- for _, st := range structLookupCache.m[hash] {
- t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- structLookupCache.RUnlock()
- return t
+ // Look in cache.
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
}
}
- structLookupCache.RUnlock()
- // not in cache, lock and retry
+ // Not in cache, lock and retry.
structLookupCache.Lock()
defer structLookupCache.Unlock()
- if structLookupCache.m == nil {
- structLookupCache.m = make(map[uint32][]interface {
- common() *rtype
- })
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
+ }
}
- for _, st := range structLookupCache.m[hash] {
- t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- return t
+
+ addToCache := func(t Type) Type {
+ var ts []Type
+ if ti, ok := structLookupCache.m.Load(hash); ok {
+ ts = ti.([]Type)
}
+ structLookupCache.m.Store(hash, append(ts, t))
+ return t
}
typ.string = &str
@@ -2172,24 +2155,19 @@ func StructOf(fields []StructField) Type {
typ.uncommonType = nil
typ.ptrToThis = nil
- structLookupCache.m[hash] = append(structLookupCache.m[hash], typ)
- return &typ.rtype
+ return addToCache(&typ.rtype)
}
func runtimeStructField(field StructField) structField {
- var name *string
- if field.Name == "" {
- t := field.Type.(*rtype)
- if t.Kind() == Ptr {
- t = t.Elem().(*rtype)
- }
- } else if field.PkgPath == "" {
- s := field.Name
- name = &s
- b0 := s[0]
- if ('a' <= b0 && b0 <= 'z') || b0 == '_' {
- panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but has no PkgPath")
- }
+ if field.PkgPath != "" {
+ panic("reflect.StructOf: StructOf does not allow unexported fields")
+ }
+
+ // Best-effort check for misuse.
+ // Since PkgPath is empty, not much harm done if Unicode lowercase slips through.
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
}
offsetAnon := uintptr(0)
@@ -2197,24 +2175,18 @@ func runtimeStructField(field StructField) structField {
offsetAnon |= 1
}
- var pkgPath *string
- if field.PkgPath != "" {
- s := field.PkgPath
- pkgPath = &s
- // This could work with gccgo but we panic to be
- // compatible with gc.
- panic("reflect: creating a name with a package path is not supported")
- }
+ s := field.Name
+ name := &s
var tag *string
if field.Tag != "" {
- s := string(field.Tag)
- tag = &s
+ st := string(field.Tag)
+ tag = &st
}
return structField{
name: name,
- pkgPath: pkgPath,
+ pkgPath: nil,
typ: field.Type.common(),
tag: tag,
offsetAnon: offsetAnon,
@@ -2257,15 +2229,11 @@ const maxPtrmaskBytes = 2048
// ArrayOf panics.
func ArrayOf(count int, elem Type) Type {
typ := elem.(*rtype)
- // call SliceOf here as it calls cacheGet/cachePut.
- // ArrayOf also calls cacheGet/cachePut and thus may modify the state of
- // the lookupCache mutex.
- slice := SliceOf(elem)
// Look in cache.
ckey := cacheKey{Array, typ, nil, uintptr(count)}
- if array := cacheGet(ckey); array != nil {
- return array
+ if array, ok := lookupCache.Load(ckey); ok {
+ return array.(Type)
}
// Look in known types.
@@ -2287,9 +2255,11 @@ func ArrayOf(count int, elem Type) Type {
array.elem = typ
array.ptrToThis = nil
- max := ^uintptr(0) / typ.size
- if uintptr(count) > max {
- panic("reflect.ArrayOf: array size would exceed virtual address space")
+ if typ.size > 0 {
+ max := ^uintptr(0) / typ.size
+ if uintptr(count) > max {
+ panic("reflect.ArrayOf: array size would exceed virtual address space")
+ }
}
array.size = typ.size * uintptr(count)
if count > 0 && typ.ptrdata != 0 {
@@ -2299,7 +2269,7 @@ func ArrayOf(count int, elem Type) Type {
array.fieldAlign = typ.fieldAlign
array.uncommonType = nil
array.len = uintptr(count)
- array.slice = slice.(*rtype)
+ array.slice = SliceOf(elem).(*rtype)
array.kind &^= kindNoPointers
switch {
@@ -2413,7 +2383,8 @@ func ArrayOf(count int, elem Type) Type {
}
}
- return cachePut(ckey, &array.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
+ return ti.(Type)
}
func appendVarint(x []byte, v uintptr) []byte {
@@ -2466,7 +2437,7 @@ func ifaceIndir(t *rtype) bool {
return t.kind&kindDirectIface == 0
}
-// Layout matches runtime.BitVector (well enough).
+// Layout matches runtime.gobitvector (well enough).
type bitVector struct {
n uint32 // number of bits
data []byte
diff --git a/libgo/go/reflect/value.go b/libgo/go/reflect/value.go
index 8f6a93b3848..792699a6f65 100644
--- a/libgo/go/reflect/value.go
+++ b/libgo/go/reflect/value.go
@@ -30,9 +30,9 @@ const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ide
// the underlying Go value can be used concurrently for the equivalent
// direct operations.
//
-// Using == on two Values does not compare the underlying values
-// they represent, but rather the contents of the Value structs.
// To compare two Values, compare the results of the Interface method.
+// Using == on two Values does not compare the underlying values
+// they represent.
type Value struct {
// typ holds the type of the value represented by a Value.
typ *rtype
@@ -1000,7 +1000,7 @@ func (v Value) Method(i int) Value {
return Value{v.typ, v.ptr, fl}
}
-// NumMethod returns the number of methods in the value's method set.
+// NumMethod returns the number of exported methods in the value's method set.
func (v Value) NumMethod() int {
if v.typ == nil {
panic(&ValueError{"reflect.Value.NumMethod", Invalid})
@@ -1933,12 +1933,18 @@ func MakeChan(typ Type, buffer int) Value {
return Value{typ.common(), unsafe.Pointer(&ch), flag(Chan) | flagIndir}
}
-// MakeMap creates a new map of the specified type.
+// MakeMap creates a new map with the specified type.
func MakeMap(typ Type) Value {
+ return MakeMapWithSize(typ, 0)
+}
+
+// MakeMapWithSize creates a new map with the specified type
+// and initial space for approximately n elements.
+func MakeMapWithSize(typ Type, n int) Value {
if typ.Kind() != Map {
- panic("reflect.MakeMap of non-map type")
+ panic("reflect.MakeMapWithSize of non-map type")
}
- m := makemap(typ.(*rtype))
+ m := makemap(typ.(*rtype), n)
return Value{typ.common(), unsafe.Pointer(&m), flag(Map) | flagIndir}
}
@@ -2015,7 +2021,6 @@ func (v Value) assignTo(context string, dst *rtype, target unsafe.Pointer) Value
case directlyAssignable(dst, v.typ):
// Overwrite type so that they match.
// Same memory layout, so no harm done.
- v.typ = dst
fl := v.flag & (flagRO | flagAddr | flagIndir)
fl |= flag(dst.Kind())
return Value{dst, v.ptr, fl}
@@ -2333,7 +2338,7 @@ func chanrecv(ch unsafe.Pointer, nb bool, val unsafe.Pointer) (selected, receive
func chansend(ch unsafe.Pointer, val unsafe.Pointer, nb bool) bool
func makechan(typ *rtype, size uint64) (ch unsafe.Pointer)
-func makemap(t *rtype) (m unsafe.Pointer)
+func makemap(t *rtype, cap int) (m unsafe.Pointer)
//go:noescape
func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
diff --git a/libgo/go/regexp/all_test.go b/libgo/go/regexp/all_test.go
index beb46e70995..28fe20c15d8 100644
--- a/libgo/go/regexp/all_test.go
+++ b/libgo/go/regexp/all_test.go
@@ -9,6 +9,7 @@ import (
"regexp/syntax"
"strings"
"testing"
+ "unicode/utf8"
)
var goodRe = []string{
@@ -354,6 +355,7 @@ type MetaTest struct {
var metaTests = []MetaTest{
{``, ``, ``, true},
{`foo`, `foo`, `foo`, true},
+ {`æ¥æ¬èª+`, `æ¥æ¬èª\+`, `æ¥æ¬èª`, false},
{`foo\.\$`, `foo\\\.\\\$`, `foo.$`, true}, // has meta but no operator
{`foo.\$`, `foo\.\\\$`, `foo`, false}, // has escaped operators and real operators
{`!@#$%^&*()_+-=[{]}\|,<.>/?~`, `!@#\$%\^&\*\(\)_\+-=\[\{\]\}\\\|,<\.>/\?~`, `!@#`, false},
@@ -822,7 +824,13 @@ func BenchmarkMatchParallelCopied(b *testing.B) {
var sink string
func BenchmarkQuoteMetaAll(b *testing.B) {
- s := string(specialBytes)
+ specials := make([]byte, 0)
+ for i := byte(0); i < utf8.RuneSelf; i++ {
+ if special(i) {
+ specials = append(specials, i)
+ }
+ }
+ s := string(specials)
b.SetBytes(int64(len(s)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
diff --git a/libgo/go/regexp/exec.go b/libgo/go/regexp/exec.go
index 977619cb28a..f8fe7b5deff 100644
--- a/libgo/go/regexp/exec.go
+++ b/libgo/go/regexp/exec.go
@@ -309,12 +309,14 @@ func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond syntax.Empty
// onepass runs the machine over the input starting at pos.
// It reports whether a match was found.
// If so, m.matchcap holds the submatch information.
-func (m *machine) onepass(i input, pos int) bool {
+// ncap is the number of captures.
+func (m *machine) onepass(i input, pos, ncap int) bool {
startCond := m.re.cond
if startCond == ^syntax.EmptyOp(0) { // impossible
return false
}
m.matched = false
+ m.matchcap = m.matchcap[:ncap]
for i := range m.matchcap {
m.matchcap[i] = -1
}
@@ -428,7 +430,7 @@ func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap i
size = len(s)
}
if m.op != notOnePass {
- if !m.onepass(i, pos) {
+ if !m.onepass(i, pos, ncap) {
re.put(m)
return nil
}
diff --git a/libgo/go/regexp/exec_test.go b/libgo/go/regexp/exec_test.go
index 766394de6ee..5f8e747b17b 100644
--- a/libgo/go/regexp/exec_test.go
+++ b/libgo/go/regexp/exec_test.go
@@ -681,6 +681,35 @@ func BenchmarkMatch(b *testing.B) {
}
}
+func BenchmarkMatch_onepass_regex(b *testing.B) {
+ isRaceBuilder := strings.HasSuffix(testenv.Builder(), "-race")
+ r := MustCompile(`(?s)\A.*\z`)
+ if r.get().op == notOnePass {
+ b.Fatalf("want onepass regex, but %q is not onepass", r)
+ }
+ for _, size := range benchSizes {
+ if isRaceBuilder && size.n > 1<<10 {
+ continue
+ }
+ t := makeText(size.n)
+ bs := make([][]byte, len(t))
+ for i, s := range t {
+ bs[i] = []byte{s}
+ }
+ b.Run(size.name, func(b *testing.B) {
+ b.SetBytes(int64(size.n))
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ for _, byts := range bs {
+ if !r.Match(byts) {
+ b.Fatal("not match!")
+ }
+ }
+ }
+ })
+ }
+}
+
var benchData = []struct{ name, re string }{
{"Easy0", "ABCDEFGHIJKLMNOPQRSTUVWXYZ$"},
{"Easy0i", "(?i)ABCDEFGHIJklmnopqrstuvwxyz$"},
diff --git a/libgo/go/regexp/onepass.go b/libgo/go/regexp/onepass.go
index 1b0564c3fd0..3ceb4619058 100644
--- a/libgo/go/regexp/onepass.go
+++ b/libgo/go/regexp/onepass.go
@@ -222,9 +222,10 @@ func onePassCopy(prog *syntax.Prog) *onePassProg {
p := &onePassProg{
Start: prog.Start,
NumCap: prog.NumCap,
+ Inst: make([]onePassInst, len(prog.Inst)),
}
- for _, inst := range prog.Inst {
- p.Inst = append(p.Inst, onePassInst{Inst: inst})
+ for i, inst := range prog.Inst {
+ p.Inst[i] = onePassInst{Inst: inst}
}
// rewrites one or more common Prog constructs that enable some otherwise
@@ -304,13 +305,13 @@ func makeOnePass(p *onePassProg) *onePassProg {
var (
instQueue = newQueue(len(p.Inst))
visitQueue = newQueue(len(p.Inst))
- check func(uint32, map[uint32]bool) bool
+ check func(uint32, []bool) bool
onePassRunes = make([][]rune, len(p.Inst))
)
// check that paths from Alt instructions are unambiguous, and rebuild the new
// program as a onepass program
- check = func(pc uint32, m map[uint32]bool) (ok bool) {
+ check = func(pc uint32, m []bool) (ok bool) {
ok = true
inst := &p.Inst[pc]
if visitQueue.contains(pc) {
@@ -349,21 +350,20 @@ func makeOnePass(p *onePassProg) *onePassProg {
m[pc] = m[inst.Out]
// pass matching runes back through these no-ops.
onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...)
- inst.Next = []uint32{}
- for i := len(onePassRunes[pc]) / 2; i >= 0; i-- {
- inst.Next = append(inst.Next, inst.Out)
+ inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
+ for i := range inst.Next {
+ inst.Next[i] = inst.Out
}
case syntax.InstEmptyWidth:
ok = check(inst.Out, m)
m[pc] = m[inst.Out]
onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...)
- inst.Next = []uint32{}
- for i := len(onePassRunes[pc]) / 2; i >= 0; i-- {
- inst.Next = append(inst.Next, inst.Out)
+ inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
+ for i := range inst.Next {
+ inst.Next[i] = inst.Out
}
case syntax.InstMatch, syntax.InstFail:
m[pc] = inst.Op == syntax.InstMatch
- break
case syntax.InstRune:
m[pc] = false
if len(inst.Next) > 0 {
@@ -387,9 +387,9 @@ func makeOnePass(p *onePassProg) *onePassProg {
runes = append(runes, inst.Rune...)
}
onePassRunes[pc] = runes
- inst.Next = []uint32{}
- for i := len(onePassRunes[pc]) / 2; i >= 0; i-- {
- inst.Next = append(inst.Next, inst.Out)
+ inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
+ for i := range inst.Next {
+ inst.Next[i] = inst.Out
}
inst.Op = syntax.InstRune
case syntax.InstRune1:
@@ -411,9 +411,9 @@ func makeOnePass(p *onePassProg) *onePassProg {
runes = append(runes, inst.Rune[0], inst.Rune[0])
}
onePassRunes[pc] = runes
- inst.Next = []uint32{}
- for i := len(onePassRunes[pc]) / 2; i >= 0; i-- {
- inst.Next = append(inst.Next, inst.Out)
+ inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
+ for i := range inst.Next {
+ inst.Next[i] = inst.Out
}
inst.Op = syntax.InstRune
case syntax.InstRuneAny:
@@ -431,9 +431,9 @@ func makeOnePass(p *onePassProg) *onePassProg {
}
instQueue.insert(inst.Out)
onePassRunes[pc] = append([]rune{}, anyRuneNotNL...)
- inst.Next = []uint32{}
- for i := len(onePassRunes[pc]) / 2; i >= 0; i-- {
- inst.Next = append(inst.Next, inst.Out)
+ inst.Next = make([]uint32, len(onePassRunes[pc])/2+1)
+ for i := range inst.Next {
+ inst.Next[i] = inst.Out
}
}
return
@@ -441,7 +441,7 @@ func makeOnePass(p *onePassProg) *onePassProg {
instQueue.clear()
instQueue.insert(uint32(p.Start))
- m := make(map[uint32]bool, len(p.Inst))
+ m := make([]bool, len(p.Inst))
for !instQueue.empty() {
visitQueue.clear()
pc := instQueue.next()
diff --git a/libgo/go/regexp/onepass_test.go b/libgo/go/regexp/onepass_test.go
index f4e336c43ba..b1caa445150 100644
--- a/libgo/go/regexp/onepass_test.go
+++ b/libgo/go/regexp/onepass_test.go
@@ -7,6 +7,7 @@ package regexp
import (
"reflect"
"regexp/syntax"
+ "strings"
"testing"
)
@@ -173,6 +174,7 @@ var onePassTests = []struct {
{`^.bc(d|e)*$`, onePass},
{`^(?:(?:aa)|.)$`, notOnePass},
{`^(?:(?:a{1,2}){1,2})$`, notOnePass},
+ {`^l` + strings.Repeat("o", 2<<8) + `ng$`, onePass},
}
func TestCompileOnePass(t *testing.T) {
@@ -223,3 +225,23 @@ func TestRunOnePass(t *testing.T) {
}
}
}
+
+func BenchmarkCompileOnepass(b *testing.B) {
+ for _, test := range onePassTests {
+ if test.onePass == notOnePass {
+ continue
+ }
+ name := test.re
+ if len(name) > 20 {
+ name = name[:20] + "..."
+ }
+ b.Run(name, func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ if _, err := Compile(test.re); err != nil {
+ b.Fatal(err)
+ }
+ }
+ })
+ }
+}
diff --git a/libgo/go/regexp/regexp.go b/libgo/go/regexp/regexp.go
index 01093d4bd0d..b1af23e8504 100644
--- a/libgo/go/regexp/regexp.go
+++ b/libgo/go/regexp/regexp.go
@@ -76,7 +76,8 @@ import (
)
// Regexp is the representation of a compiled regular expression.
-// A Regexp is safe for concurrent use by multiple goroutines.
+// A Regexp is safe for concurrent use by multiple goroutines,
+// except for configuration methods, such as Longest.
type Regexp struct {
// read-only after Compile
regexpRO
@@ -159,6 +160,8 @@ func CompilePOSIX(expr string) (*Regexp, error) {
// That is, when matching against text, the regexp returns a match that
// begins as early as possible in the input (leftmost), and among those
// it chooses a match that is as long as possible.
+// This method modifies the Regexp and may not be called concurrently
+// with any other methods.
func (re *Regexp) Longest() {
re.longest = true
}
@@ -313,11 +316,19 @@ func (i *inputString) index(re *Regexp, pos int) int {
func (i *inputString) context(pos int) syntax.EmptyOp {
r1, r2 := endOfText, endOfText
- if pos > 0 && pos <= len(i.str) {
- r1, _ = utf8.DecodeLastRuneInString(i.str[:pos])
+ // 0 < pos && pos <= len(i.str)
+ if uint(pos-1) < uint(len(i.str)) {
+ r1 = rune(i.str[pos-1])
+ if r1 >= utf8.RuneSelf {
+ r1, _ = utf8.DecodeLastRuneInString(i.str[:pos])
+ }
}
- if pos < len(i.str) {
- r2, _ = utf8.DecodeRuneInString(i.str[pos:])
+ // 0 <= pos && pos < len(i.str)
+ if uint(pos) < uint(len(i.str)) {
+ r2 = rune(i.str[pos])
+ if r2 >= utf8.RuneSelf {
+ r2, _ = utf8.DecodeRuneInString(i.str[pos:])
+ }
}
return syntax.EmptyOpContext(r1, r2)
}
@@ -352,11 +363,19 @@ func (i *inputBytes) index(re *Regexp, pos int) int {
func (i *inputBytes) context(pos int) syntax.EmptyOp {
r1, r2 := endOfText, endOfText
- if pos > 0 && pos <= len(i.str) {
- r1, _ = utf8.DecodeLastRune(i.str[:pos])
+ // 0 < pos && pos <= len(i.str)
+ if uint(pos-1) < uint(len(i.str)) {
+ r1 = rune(i.str[pos-1])
+ if r1 >= utf8.RuneSelf {
+ r1, _ = utf8.DecodeLastRune(i.str[:pos])
+ }
}
- if pos < len(i.str) {
- r2, _ = utf8.DecodeRune(i.str[pos:])
+ // 0 <= pos && pos < len(i.str)
+ if uint(pos) < uint(len(i.str)) {
+ r2 = rune(i.str[pos])
+ if r2 >= utf8.RuneSelf {
+ r2, _ = utf8.DecodeRune(i.str[pos:])
+ }
}
return syntax.EmptyOpContext(r1, r2)
}
@@ -590,10 +609,18 @@ func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {
})
}
-var specialBytes = []byte(`\.+*?()|[]{}^$`)
+// Bitmap used by func special to check whether a character needs to be escaped.
+var specialBytes [16]byte
+// special reports whether byte b needs to be escaped by QuoteMeta.
func special(b byte) bool {
- return bytes.IndexByte(specialBytes, b) >= 0
+ return b < utf8.RuneSelf && specialBytes[b%16]&(1<<(b/16)) != 0
+}
+
+func init() {
+ for _, b := range []byte(`\.+*?()|[]{}^$`) {
+ specialBytes[b%16] |= 1 << (b / 16)
+ }
}
// QuoteMeta returns a string that quotes all regular expression metacharacters
diff --git a/libgo/go/regexp/syntax/parse.go b/libgo/go/regexp/syntax/parse.go
index 7b8be55ddb1..8c6d43a7063 100644
--- a/libgo/go/regexp/syntax/parse.go
+++ b/libgo/go/regexp/syntax/parse.go
@@ -381,7 +381,7 @@ func (p *parser) collapse(subs []*Regexp, op Op) *Regexp {
}
}
if op == OpAlternate {
- re.Sub = p.factor(re.Sub, re.Flags)
+ re.Sub = p.factor(re.Sub)
if len(re.Sub) == 1 {
old := re
re = re.Sub[0]
@@ -402,7 +402,7 @@ func (p *parser) collapse(subs []*Regexp, op Op) *Regexp {
// which simplifies by character class introduction to
// A(B[CD]|EF)|BC[XY]
//
-func (p *parser) factor(sub []*Regexp, flags Flags) []*Regexp {
+func (p *parser) factor(sub []*Regexp) []*Regexp {
if len(sub) < 2 {
return sub
}
diff --git a/libgo/go/runtime/cgo_gccgo.go b/libgo/go/runtime/cgo_gccgo.go
index 8236eeabf46..c3bf9552ea8 100644
--- a/libgo/go/runtime/cgo_gccgo.go
+++ b/libgo/go/runtime/cgo_gccgo.go
@@ -6,7 +6,7 @@ package runtime
import (
"runtime/internal/atomic"
- _ "unsafe"
+ "unsafe"
)
// For historical reasons these functions are called as though they
@@ -41,6 +41,7 @@ func Cgocall() {
mp := getg().m
mp.ncgocall++
mp.ncgo++
+ mp.incgo = true
entersyscall(0)
}
@@ -50,6 +51,7 @@ func CgocallDone() {
if gp == nil {
throw("no g in CgocallDone")
}
+ gp.m.incgo = false
gp.m.ncgo--
// If we are invoked because the C function called _cgo_panic,
@@ -68,15 +70,18 @@ func CgocallDone() {
// gofunction()
//go:nosplit
func CgocallBack() {
- if getg() == nil || getg().m == nil {
+ gp := getg()
+ if gp == nil || gp.m == nil {
needm(0)
- mp := getg().m
+ gp = getg()
+ mp := gp.m
mp.dropextram = true
}
exitsyscall(0)
+ gp.m.incgo = false
- if getg().m.ncgo == 0 {
+ if gp.m.ncgo == 0 {
// The C call to Go came from a thread created by C.
// The C call to Go came from a thread not currently running
// any Go. In the case of -buildmode=c-archive or c-shared,
@@ -85,7 +90,7 @@ func CgocallBack() {
<-main_init_done
}
- mp := getg().m
+ mp := gp.m
if mp.needextram || atomic.Load(&extraMWaiters) > 0 {
mp.needextram = false
newextram()
@@ -120,6 +125,7 @@ func CgocallBackDone() {
drop = true
}
+ gp.m.incgo = true
entersyscall(0)
if drop {
@@ -133,3 +139,8 @@ func _cgo_panic(p *byte) {
exitsyscall(0)
panic(gostringnocopy(p))
}
+
+// cgo_yield exists in the gc toolchain to let TSAN deliver a signal.
+// gccgo does not need this.
+var cgo_yield = &_cgo_yield
+var _cgo_yield unsafe.Pointer
diff --git a/libgo/go/runtime/cgocheck.go b/libgo/go/runtime/cgocheck.go
index 09d444dbd1a..30f054b3633 100644
--- a/libgo/go/runtime/cgocheck.go
+++ b/libgo/go/runtime/cgocheck.go
@@ -125,7 +125,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
aoff := uintptr(src) - mheap_.arena_start
idx := aoff >> _PageShift
s := mheap_.spans[idx]
- if s.state == _MSpanStack {
+ if s.state == _MSpanManual {
// There are no heap bits for value stored on the stack.
// For a channel receive src might be on the stack of some
// other goroutine, so we can't unwind the stack even if
diff --git a/libgo/go/runtime/chan.go b/libgo/go/runtime/chan.go
index d2470bd0442..7bb919c41db 100644
--- a/libgo/go/runtime/chan.go
+++ b/libgo/go/runtime/chan.go
@@ -185,7 +185,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
if sg := c.recvq.dequeue(); sg != nil {
// Found a waiting receiver. We pass the value we want to send
// directly to the receiver, bypassing the channel buffer (if any).
- send(c, sg, ep, func() { unlock(&c.lock) })
+ send(c, sg, ep, func() { unlock(&c.lock) }, 3)
return true
}
@@ -256,7 +256,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
// Channel c must be empty and locked. send unlocks c with unlockf.
// sg must already be dequeued from c.
// ep must be non-nil and point to the heap or the caller's stack.
-func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
+func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
if raceenabled {
if c.dataqsiz == 0 {
racesync(c, sg)
@@ -286,7 +286,7 @@ func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
- goready(gp, 4)
+ goready(gp, skip+1)
}
// Sends and receives on unbuffered or empty-buffered channels are the
@@ -466,7 +466,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
// directly from sender. Otherwise, receive from head of queue
// and add sender's value to the tail of the queue (both map to
// the same buffer slot because the queue is full).
- recv(c, sg, ep, func() { unlock(&c.lock) })
+ recv(c, sg, ep, func() { unlock(&c.lock) }, 3)
return true, true
}
@@ -542,7 +542,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
// Channel c must be full and locked. recv unlocks c with unlockf.
// sg must already be dequeued from c.
// A non-nil ep must point to the heap or the caller's stack.
-func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
+func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
if c.dataqsiz == 0 {
if raceenabled {
racesync(c, sg)
@@ -582,7 +582,7 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
- goready(gp, 4)
+ goready(gp, skip+1)
}
// compiler implements
diff --git a/libgo/go/runtime/cpuprof.go b/libgo/go/runtime/cpuprof.go
index e1206f99f10..b031b1a5e75 100644
--- a/libgo/go/runtime/cpuprof.go
+++ b/libgo/go/runtime/cpuprof.go
@@ -3,118 +3,45 @@
// license that can be found in the LICENSE file.
// CPU profiling.
-// Based on algorithms and data structures used in
-// https://github.com/google/pprof.
-//
-// The main difference between this code and the google-perftools
-// code is that this code is written to allow copying the profile data
-// to an arbitrary io.Writer, while the google-perftools code always
-// writes to an operating system file.
//
// The signal handler for the profiling clock tick adds a new stack trace
-// to a hash table tracking counts for recent traces. Most clock ticks
-// hit in the cache. In the event of a cache miss, an entry must be
-// evicted from the hash table, copied to a log that will eventually be
-// written as profile data. The google-perftools code flushed the
-// log itself during the signal handler. This code cannot do that, because
-// the io.Writer might block or need system calls or locks that are not
-// safe to use from within the signal handler. Instead, we split the log
-// into two halves and let the signal handler fill one half while a goroutine
-// is writing out the other half. When the signal handler fills its half, it
-// offers to swap with the goroutine. If the writer is not done with its half,
-// we lose the stack trace for this clock tick (and record that loss).
-// The goroutine interacts with the signal handler by calling getprofile() to
-// get the next log piece to write, implicitly handing back the last log
-// piece it obtained.
-//
-// The state of this dance between the signal handler and the goroutine
-// is encoded in the Profile.handoff field. If handoff == 0, then the goroutine
-// is not using either log half and is waiting (or will soon be waiting) for
-// a new piece by calling notesleep(&p.wait). If the signal handler
-// changes handoff from 0 to non-zero, it must call notewakeup(&p.wait)
-// to wake the goroutine. The value indicates the number of entries in the
-// log half being handed off. The goroutine leaves the non-zero value in
-// place until it has finished processing the log half and then flips the number
-// back to zero. Setting the high bit in handoff means that the profiling is over,
-// and the goroutine is now in charge of flushing the data left in the hash table
-// to the log and returning that data.
-//
-// The handoff field is manipulated using atomic operations.
-// For the most part, the manipulation of handoff is orderly: if handoff == 0
-// then the signal handler owns it and can change it to non-zero.
-// If handoff != 0 then the goroutine owns it and can change it to zero.
-// If that were the end of the story then we would not need to manipulate
-// handoff using atomic operations. The operations are needed, however,
-// in order to let the log closer set the high bit to indicate "EOF" safely
-// in the situation when normally the goroutine "owns" handoff.
+// to a log of recent traces. The log is read by a user goroutine that
+// turns it into formatted profile data. If the reader does not keep up
+// with the log, those writes will be recorded as a count of lost records.
+// The actual profile buffer is in profbuf.go.
package runtime
import (
"runtime/internal/atomic"
+ "runtime/internal/sys"
"unsafe"
)
-const (
- numBuckets = 1 << 10
- logSize = 1 << 17
- assoc = 4
- maxCPUProfStack = 64
-)
+const maxCPUProfStack = 64
-type cpuprofEntry struct {
- count uintptr
- depth int
- stack [maxCPUProfStack]uintptr
-}
-
-//go:notinheap
type cpuProfile struct {
- on bool // profiling is on
- wait note // goroutine waits here
- count uintptr // tick count
- evicts uintptr // eviction count
- lost uintptr // lost ticks that need to be logged
-
- // Active recent stack traces.
- hash [numBuckets]struct {
- entry [assoc]cpuprofEntry
- }
-
- // Log of traces evicted from hash.
- // Signal handler has filled log[toggle][:nlog].
- // Goroutine is writing log[1-toggle][:handoff].
- log [2][logSize / 2]uintptr
- nlog int
- toggle int32
- handoff uint32
-
- // Writer state.
- // Writer maintains its own toggle to avoid races
- // looking at signal handler's toggle.
- wtoggle uint32
- wholding bool // holding & need to release a log half
- flushing bool // flushing hash table - profile is over
- eodSent bool // special end-of-data record sent; => flushing
+ lock mutex
+ on bool // profiling is on
+ log *profBuf // profile events written here
+
+ // extra holds extra stacks accumulated in addNonGo
+ // corresponding to profiling signals arriving on
+ // non-Go-created threads. Those stacks are written
+ // to log the next time a normal Go thread gets the
+ // signal handler.
+ // Assuming the stacks are 2 words each (we don't get
+ // a full traceback from those threads), plus one word
+ // size for framing, 100 Hz profiling would generate
+ // 300 words per second.
+ // Hopefully a normal Go thread will get the profiling
+ // signal at least once every few seconds.
+ extra [1000]uintptr
+ numExtra int
+ lostExtra uint64 // count of frames lost because extra is full
}
-var (
- cpuprofLock mutex
- cpuprof *cpuProfile
-
- eod = [3]uintptr{0, 1, 0}
-)
-
-func setcpuprofilerate(hz int32) {
- systemstack(func() {
- setcpuprofilerate_m(hz)
- })
-}
-
-// lostProfileData is a no-op function used in profiles
-// to mark the number of profiling stack traces that were
-// discarded due to slow data writers.
-func lostProfileData() {}
+var cpuprof cpuProfile
// SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
// If hz <= 0, SetCPUProfileRate turns off profiling.
@@ -132,323 +59,153 @@ func SetCPUProfileRate(hz int) {
hz = 1000000
}
- lock(&cpuprofLock)
+ lock(&cpuprof.lock)
if hz > 0 {
- if cpuprof == nil {
- cpuprof = (*cpuProfile)(sysAlloc(unsafe.Sizeof(cpuProfile{}), &memstats.other_sys))
- if cpuprof == nil {
- print("runtime: cpu profiling cannot allocate memory\n")
- unlock(&cpuprofLock)
- return
- }
- }
- if cpuprof.on || cpuprof.handoff != 0 {
+ if cpuprof.on || cpuprof.log != nil {
print("runtime: cannot set cpu profile rate until previous profile has finished.\n")
- unlock(&cpuprofLock)
+ unlock(&cpuprof.lock)
return
}
cpuprof.on = true
- // pprof binary header format.
- // https://github.com/gperftools/gperftools/blob/master/src/profiledata.cc#L119
- p := &cpuprof.log[0]
- p[0] = 0 // count for header
- p[1] = 3 // depth for header
- p[2] = 0 // version number
- p[3] = uintptr(1e6 / hz) // period (microseconds)
- p[4] = 0
- cpuprof.nlog = 5
- cpuprof.toggle = 0
- cpuprof.wholding = false
- cpuprof.wtoggle = 0
- cpuprof.flushing = false
- cpuprof.eodSent = false
- noteclear(&cpuprof.wait)
-
+ cpuprof.log = newProfBuf(1, 1<<17, 1<<14)
+ hdr := [1]uint64{uint64(hz)}
+ cpuprof.log.write(nil, nanotime(), hdr[:], nil)
setcpuprofilerate(int32(hz))
- } else if cpuprof != nil && cpuprof.on {
+ } else if cpuprof.on {
setcpuprofilerate(0)
cpuprof.on = false
-
- // Now add is not running anymore, and getprofile owns the entire log.
- // Set the high bit in cpuprof.handoff to tell getprofile.
- for {
- n := cpuprof.handoff
- if n&0x80000000 != 0 {
- print("runtime: setcpuprofile(off) twice\n")
- }
- if atomic.Cas(&cpuprof.handoff, n, n|0x80000000) {
- if n == 0 {
- // we did the transition from 0 -> nonzero so we wake getprofile
- notewakeup(&cpuprof.wait)
- }
- break
- }
- }
+ cpuprof.addExtra()
+ cpuprof.log.close()
}
- unlock(&cpuprofLock)
+ unlock(&cpuprof.lock)
}
// add adds the stack trace to the profile.
// It is called from signal handlers and other limited environments
// and cannot allocate memory or acquire locks that might be
// held at the time of the signal, nor can it use substantial amounts
-// of stack. It is allowed to call evict.
+// of stack.
//go:nowritebarrierrec
-func (p *cpuProfile) add(pc []uintptr) {
- p.addWithFlushlog(pc, p.flushlog)
-}
-
-// addWithFlushlog implements add and addNonGo.
-// It is called from signal handlers and other limited environments
-// and cannot allocate memory or acquire locks that might be
-// held at the time of the signal, nor can it use substantial amounts
-// of stack. It may be called by a signal handler with no g or m.
-// It is allowed to call evict, passing the flushlog parameter.
-//go:nosplit
-//go:nowritebarrierrec
-func (p *cpuProfile) addWithFlushlog(pc []uintptr, flushlog func() bool) {
- if len(pc) > maxCPUProfStack {
- pc = pc[:maxCPUProfStack]
- }
-
- // Compute hash.
- h := uintptr(0)
- for _, x := range pc {
- h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1)))
- h += x * 41
+func (p *cpuProfile) add(gp *g, stk []uintptr) {
+ // Simple cas-lock to coordinate with setcpuprofilerate.
+ for !atomic.Cas(&prof.signalLock, 0, 1) {
+ osyield()
}
- p.count++
- // Add to entry count if already present in table.
- b := &p.hash[h%numBuckets]
-Assoc:
- for i := range b.entry {
- e := &b.entry[i]
- if e.depth != len(pc) {
- continue
- }
- for j := range pc {
- if e.stack[j] != pc[j] {
- continue Assoc
- }
+ if prof.hz != 0 { // implies cpuprof.log != nil
+ if p.numExtra > 0 || p.lostExtra > 0 {
+ p.addExtra()
}
- e.count++
- return
+ hdr := [1]uint64{1}
+ // Note: write "knows" that the argument is &gp.labels,
+ // because otherwise its write barrier behavior may not
+ // be correct. See the long comment there before
+ // changing the argument here.
+ cpuprof.log.write(&gp.labels, nanotime(), hdr[:], stk)
}
- // Evict entry with smallest count.
- var e *cpuprofEntry
- for i := range b.entry {
- if e == nil || b.entry[i].count < e.count {
- e = &b.entry[i]
- }
- }
- if e.count > 0 {
- if !p.evict(e, flushlog) {
- // Could not evict entry. Record lost stack.
- p.lost++
- return
- }
- p.evicts++
- }
-
- // Reuse the newly evicted entry.
- e.depth = len(pc)
- e.count = 1
- copy(e.stack[:], pc)
+ atomic.Store(&prof.signalLock, 0)
}
-// evict copies the given entry's data into the log, so that
-// the entry can be reused. evict is called from add, which
-// is called from the profiling signal handler, so it must not
-// allocate memory or block, and it may be called with no g or m.
-// It is safe to call flushlog. evict returns true if the entry was
-// copied to the log, false if there was no room available.
+// addNonGo adds the non-Go stack trace to the profile.
+// It is called from a non-Go thread, so we cannot use much stack at all,
+// nor do anything that needs a g or an m.
+// In particular, we can't call cpuprof.log.write.
+// Instead, we copy the stack into cpuprof.extra,
+// which will be drained the next time a Go thread
+// gets the signal handling event.
//go:nosplit
//go:nowritebarrierrec
-func (p *cpuProfile) evict(e *cpuprofEntry, flushlog func() bool) bool {
- d := e.depth
- nslot := d + 2
- log := &p.log[p.toggle]
- if p.nlog+nslot > len(log) {
- if !flushlog() {
- return false
- }
- log = &p.log[p.toggle]
- }
-
- q := p.nlog
- log[q] = e.count
- q++
- log[q] = uintptr(d)
- q++
- copy(log[q:], e.stack[:d])
- q += d
- p.nlog = q
- e.count = 0
- return true
-}
-
-// flushlog tries to flush the current log and switch to the other one.
-// flushlog is called from evict, called from add, called from the signal handler,
-// so it cannot allocate memory or block. It can try to swap logs with
-// the writing goroutine, as explained in the comment at the top of this file.
-//go:nowritebarrierrec
-func (p *cpuProfile) flushlog() bool {
- if !atomic.Cas(&p.handoff, 0, uint32(p.nlog)) {
- return false
+func (p *cpuProfile) addNonGo(stk []uintptr) {
+ // Simple cas-lock to coordinate with SetCPUProfileRate.
+ // (Other calls to add or addNonGo should be blocked out
+ // by the fact that only one SIGPROF can be handled by the
+ // process at a time. If not, this lock will serialize those too.)
+ for !atomic.Cas(&prof.signalLock, 0, 1) {
+ osyield()
}
- notewakeup(&p.wait)
- p.toggle = 1 - p.toggle
- log := &p.log[p.toggle]
- q := 0
- if p.lost > 0 {
- lostPC := funcPC(lostProfileData)
- log[0] = p.lost
- log[1] = 1
- log[2] = lostPC
- q = 3
- p.lost = 0
+ if cpuprof.numExtra+1+len(stk) < len(cpuprof.extra) {
+ i := cpuprof.numExtra
+ cpuprof.extra[i] = uintptr(1 + len(stk))
+ copy(cpuprof.extra[i+1:], stk)
+ cpuprof.numExtra += 1 + len(stk)
+ } else {
+ cpuprof.lostExtra++
}
- p.nlog = q
- return true
-}
-// addNonGo is like add, but runs on a non-Go thread.
-// It can't do anything that might need a g or an m.
-// With this entry point, we don't try to flush the log when evicting an
-// old entry. Instead, we just drop the stack trace if we're out of space.
-//go:nosplit
-//go:nowritebarrierrec
-func (p *cpuProfile) addNonGo(pc []uintptr) {
- p.addWithFlushlog(pc, func() bool { return false })
+ atomic.Store(&prof.signalLock, 0)
}
-// getprofile blocks until the next block of profiling data is available
-// and returns it as a []byte. It is called from the writing goroutine.
-func (p *cpuProfile) getprofile() []byte {
- if p == nil {
- return nil
- }
-
- if p.wholding {
- // Release previous log to signal handling side.
- // Loop because we are racing against SetCPUProfileRate(0).
- for {
- n := p.handoff
- if n == 0 {
- print("runtime: phase error during cpu profile handoff\n")
- return nil
- }
- if n&0x80000000 != 0 {
- p.wtoggle = 1 - p.wtoggle
- p.wholding = false
- p.flushing = true
- goto Flush
- }
- if atomic.Cas(&p.handoff, n, 0) {
- break
- }
- }
- p.wtoggle = 1 - p.wtoggle
- p.wholding = false
- }
-
- if p.flushing {
- goto Flush
- }
-
- if !p.on && p.handoff == 0 {
- return nil
- }
-
- // Wait for new log.
- notetsleepg(&p.wait, -1)
- noteclear(&p.wait)
-
- switch n := p.handoff; {
- case n == 0:
- print("runtime: phase error during cpu profile wait\n")
- return nil
- case n == 0x80000000:
- p.flushing = true
- goto Flush
- default:
- n &^= 0x80000000
-
- // Return new log to caller.
- p.wholding = true
-
- return uintptrBytes(p.log[p.wtoggle][:n])
- }
-
- // In flush mode.
- // Add is no longer being called. We own the log.
- // Also, p.handoff is non-zero, so flushlog will return false.
- // Evict the hash table into the log and return it.
-Flush:
- for i := range p.hash {
- b := &p.hash[i]
- for j := range b.entry {
- e := &b.entry[j]
- if e.count > 0 && !p.evict(e, p.flushlog) {
- // Filled the log. Stop the loop and return what we've got.
- break Flush
- }
+// addExtra adds the "extra" profiling events,
+// queued by addNonGo, to the profile log.
+// addExtra is called either from a signal handler on a Go thread
+// or from an ordinary goroutine; either way it can use stack
+// and has a g. The world may be stopped, though.
+func (p *cpuProfile) addExtra() {
+ // Copy accumulated non-Go profile events.
+ hdr := [1]uint64{1}
+ for i := 0; i < p.numExtra; {
+ p.log.write(nil, 0, hdr[:], p.extra[i+1:i+int(p.extra[i])])
+ i += int(p.extra[i])
+ }
+ p.numExtra = 0
+
+ // Report any lost events.
+ if p.lostExtra > 0 {
+ hdr := [1]uint64{p.lostExtra}
+ lostStk := [2]uintptr{
+ funcPC(_LostExternalCode) + sys.PCQuantum,
+ funcPC(_ExternalCode) + sys.PCQuantum,
}
+ cpuprof.log.write(nil, 0, hdr[:], lostStk[:])
}
-
- // Return pending log data.
- if p.nlog > 0 {
- // Note that we're using toggle now, not wtoggle,
- // because we're working on the log directly.
- n := p.nlog
- p.nlog = 0
- return uintptrBytes(p.log[p.toggle][:n])
- }
-
- // Made it through the table without finding anything to log.
- if !p.eodSent {
- // We may not have space to append this to the partial log buf,
- // so we always return a new slice for the end-of-data marker.
- p.eodSent = true
- return uintptrBytes(eod[:])
- }
-
- // Finally done. Clean up and return nil.
- p.flushing = false
- if !atomic.Cas(&p.handoff, p.handoff, 0) {
- print("runtime: profile flush racing with something\n")
- }
- return nil
}
-func uintptrBytes(p []uintptr) (ret []byte) {
- pp := (*slice)(unsafe.Pointer(&p))
- rp := (*slice)(unsafe.Pointer(&ret))
-
- rp.array = pp.array
- rp.len = pp.len * int(unsafe.Sizeof(p[0]))
- rp.cap = rp.len
-
- return
+func (p *cpuProfile) addLostAtomic64(count uint64) {
+ hdr := [1]uint64{count}
+ lostStk := [2]uintptr{
+ funcPC(_LostSIGPROFDuringAtomic64) + sys.PCQuantum,
+ funcPC(_System) + sys.PCQuantum,
+ }
+ cpuprof.log.write(nil, 0, hdr[:], lostStk[:])
}
-// CPUProfile returns the next chunk of binary CPU profiling stack trace data,
-// blocking until data is available. If profiling is turned off and all the profile
-// data accumulated while it was on has been returned, CPUProfile returns nil.
-// The caller must save the returned data before calling CPUProfile again.
+// CPUProfile panics.
+// It formerly provided raw access to chunks of
+// a pprof-format profile generated by the runtime.
+// The details of generating that format have changed,
+// so this functionality has been removed.
//
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.cpuprofile flag instead of calling
-// CPUProfile directly.
+// Deprecated: use the runtime/pprof package,
+// or the handlers in the net/http/pprof package,
+// or the testing package's -test.cpuprofile flag instead.
func CPUProfile() []byte {
- return cpuprof.getprofile()
+ panic("CPUProfile no longer available")
}
//go:linkname runtime_pprof_runtime_cyclesPerSecond runtime_pprof.runtime_cyclesPerSecond
func runtime_pprof_runtime_cyclesPerSecond() int64 {
return tickspersecond()
}
+
+// readProfile, provided to runtime/pprof, returns the next chunk of
+// binary CPU profiling stack trace data, blocking until data is available.
+// If profiling is turned off and all the profile data accumulated while it was
+// on has been returned, readProfile returns eof=true.
+// The caller must save the returned data and tags before calling readProfile again.
+//
+//go:linkname runtime_pprof_readProfile runtime_pprof.readProfile
+func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool) {
+ lock(&cpuprof.lock)
+ log := cpuprof.log
+ unlock(&cpuprof.lock)
+ data, tags, eof := log.read(profBufBlocking)
+ if len(data) == 0 && eof {
+ lock(&cpuprof.lock)
+ cpuprof.log = nil
+ unlock(&cpuprof.lock)
+ }
+ return data, tags, eof
+}
diff --git a/libgo/go/runtime/crash_cgo_test.go b/libgo/go/runtime/crash_cgo_test.go
index b338df985d3..b79873185cc 100644
--- a/libgo/go/runtime/crash_cgo_test.go
+++ b/libgo/go/runtime/crash_cgo_test.go
@@ -24,7 +24,10 @@ func TestCgoCrashHandler(t *testing.T) {
}
func TestCgoSignalDeadlock(t *testing.T) {
- t.Parallel()
+ // Don't call t.Parallel, since too much work going on at the
+ // same time can cause the testprogcgo code to overrun its
+ // timeouts (issue #18598).
+
if testing.Short() && runtime.GOOS == "windows" {
t.Skip("Skipping in short mode") // takes up to 64 seconds
}
@@ -291,33 +294,43 @@ func testCgoPprof(t *testing.T, buildArg, runArg string) {
got, err := testEnv(exec.Command(exe, runArg)).CombinedOutput()
if err != nil {
+ if testenv.Builder() == "linux-amd64-alpine" {
+ // See Issue 18243 and Issue 19938.
+ t.Skipf("Skipping failing test on Alpine (golang.org/issue/18243). Ignoring error: %v", err)
+ }
t.Fatal(err)
}
fn := strings.TrimSpace(string(got))
defer os.Remove(fn)
- cmd := testEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-top", "-nodecount=1", exe, fn))
-
- found := false
- for i, e := range cmd.Env {
- if strings.HasPrefix(e, "PPROF_TMPDIR=") {
- cmd.Env[i] = "PPROF_TMPDIR=" + os.TempDir()
- found = true
- break
+ for try := 0; try < 2; try++ {
+ cmd := testEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-top", "-nodecount=1"))
+ // Check that pprof works both with and without explicit executable on command line.
+ if try == 0 {
+ cmd.Args = append(cmd.Args, exe, fn)
+ } else {
+ cmd.Args = append(cmd.Args, fn)
}
- }
- if !found {
- cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir())
- }
- top, err := cmd.CombinedOutput()
- t.Logf("%s", top)
- if err != nil {
- t.Fatal(err)
- }
+ found := false
+ for i, e := range cmd.Env {
+ if strings.HasPrefix(e, "PPROF_TMPDIR=") {
+ cmd.Env[i] = "PPROF_TMPDIR=" + os.TempDir()
+ found = true
+ break
+ }
+ }
+ if !found {
+ cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir())
+ }
- if !bytes.Contains(top, []byte("cpuHog")) {
- t.Error("missing cpuHog in pprof output")
+ top, err := cmd.CombinedOutput()
+ t.Logf("%s:\n%s", cmd.Args, top)
+ if err != nil {
+ t.Error(err)
+ } else if !bytes.Contains(top, []byte("cpuHog")) {
+ t.Error("missing cpuHog in pprof output")
+ }
}
}
@@ -397,3 +410,16 @@ func TestRaceSignal(t *testing.T) {
t.Errorf("expected %q got %s", want, got)
}
}
+
+func TestCgoNumGoroutine(t *testing.T) {
+ switch runtime.GOOS {
+ case "windows", "plan9":
+ t.Skipf("skipping numgoroutine test on %s", runtime.GOOS)
+ }
+ t.Parallel()
+ got := runTestProg(t, "testprogcgo", "NumGoroutine")
+ want := "OK\n"
+ if got != want {
+ t.Errorf("expected %q got %v", want, got)
+ }
+}
diff --git a/libgo/go/runtime/crash_test.go b/libgo/go/runtime/crash_test.go
index 4ba9d444b9c..1cde6bf7997 100644
--- a/libgo/go/runtime/crash_test.go
+++ b/libgo/go/runtime/crash_test.go
@@ -164,6 +164,12 @@ func checkStaleRuntime(t *testing.T) {
return
}
if string(out) != "false\n" {
+ t.Logf("go list -f {{.Stale}} runtime:\n%s", out)
+ out, err := testEnv(exec.Command(testenv.GoToolPath(t), "list", "-f", "{{.StaleReason}}", "runtime")).CombinedOutput()
+ if err != nil {
+ t.Logf("go list -f {{.StaleReason}} failed: %v", err)
+ }
+ t.Logf("go list -f {{.StaleReason}} runtime:\n%s", out)
staleRuntimeErr = fmt.Errorf("Stale runtime.a. Run 'go install runtime'.")
}
})
@@ -305,6 +311,9 @@ func TestNoHelperGoroutines(t *testing.T) {
func TestBreakpoint(t *testing.T) {
output := runTestProg(t, "testprog", "Breakpoint")
+ // If runtime.Breakpoint() is inlined, then the stack trace prints
+ // "runtime.Breakpoint(...)" instead of "runtime.Breakpoint()".
+ // For gccgo, no parens.
want := "runtime.Breakpoint"
if !strings.Contains(output, want) {
t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
@@ -481,28 +490,33 @@ func TestMemPprof(t *testing.T) {
fn := strings.TrimSpace(string(got))
defer os.Remove(fn)
- cmd := testEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-alloc_space", "-top", exe, fn))
-
- found := false
- for i, e := range cmd.Env {
- if strings.HasPrefix(e, "PPROF_TMPDIR=") {
- cmd.Env[i] = "PPROF_TMPDIR=" + os.TempDir()
- found = true
- break
+ for try := 0; try < 2; try++ {
+ cmd := testEnv(exec.Command(testenv.GoToolPath(t), "tool", "pprof", "-alloc_space", "-top"))
+ // Check that pprof works both with and without explicit executable on command line.
+ if try == 0 {
+ cmd.Args = append(cmd.Args, exe, fn)
+ } else {
+ cmd.Args = append(cmd.Args, fn)
+ }
+ found := false
+ for i, e := range cmd.Env {
+ if strings.HasPrefix(e, "PPROF_TMPDIR=") {
+ cmd.Env[i] = "PPROF_TMPDIR=" + os.TempDir()
+ found = true
+ break
+ }
+ }
+ if !found {
+ cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir())
}
- }
- if !found {
- cmd.Env = append(cmd.Env, "PPROF_TMPDIR="+os.TempDir())
- }
-
- top, err := cmd.CombinedOutput()
- t.Logf("%s", top)
- if err != nil {
- t.Fatal(err)
- }
- if !bytes.Contains(top, []byte("MemProf")) {
- t.Error("missing MemProf in pprof output")
+ top, err := cmd.CombinedOutput()
+ t.Logf("%s:\n%s", cmd.Args, top)
+ if err != nil {
+ t.Error(err)
+ } else if !bytes.Contains(top, []byte("MemProf")) {
+ t.Error("missing MemProf in pprof output")
+ }
}
}
@@ -541,3 +555,87 @@ func TestConcurrentMapIterateWrite(t *testing.T) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
+
+type point struct {
+ x, y *int
+}
+
+func (p *point) negate() {
+ *p.x = *p.x * -1
+ *p.y = *p.y * -1
+}
+
+// Test for issue #10152.
+func TestPanicInlined(t *testing.T) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ t.Fatalf("recover failed")
+ }
+ buf := make([]byte, 2048)
+ n := runtime.Stack(buf, false)
+ buf = buf[:n]
+ want := []byte("(*point).negate(")
+ if runtime.Compiler == "gccgo" {
+ want = []byte("negate.pN18_runtime_test.point")
+ }
+ if !bytes.Contains(buf, want) {
+ t.Logf("%s", buf)
+ t.Fatalf("expecting stack trace to contain call to %s", want)
+ }
+ }()
+
+ pt := new(point)
+ pt.negate()
+}
+
+// Test for issues #3934 and #20018.
+// We want to delay exiting until a panic print is complete.
+func TestPanicRace(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+
+ exe, err := buildTestProg(t, "testprog")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // The test is intentionally racy, and in my testing does not
+ // produce the expected output about 0.05% of the time.
+ // So run the program in a loop and only fail the test if we
+ // get the wrong output ten times in a row.
+ const tries = 10
+retry:
+ for i := 0; i < tries; i++ {
+ got, err := testEnv(exec.Command(exe, "PanicRace")).CombinedOutput()
+ if err == nil {
+ t.Logf("try %d: program exited successfully, should have failed", i+1)
+ continue
+ }
+
+ if i > 0 {
+ t.Logf("try %d:\n", i+1)
+ }
+ t.Logf("%s\n", got)
+
+ wants := []string{
+ "panic: crash",
+ "PanicRace",
+ "created by ",
+ }
+ if runtime.Compiler == "gccgo" {
+ // gccgo will dump a function name like main.$nested30.
+ // Match on the file name instead.
+ wants[1] = "panicrace"
+ }
+ for _, want := range wants {
+ if !bytes.Contains(got, []byte(want)) {
+ t.Logf("did not find expected string %q", want)
+ continue retry
+ }
+ }
+
+ // Test generated expected output.
+ return
+ }
+ t.Errorf("test ran %d times without producing expected output", tries)
+}
diff --git a/libgo/go/runtime/crash_unix_test.go b/libgo/go/runtime/crash_unix_test.go
index 7a29c1eba79..09c25471d10 100644
--- a/libgo/go/runtime/crash_unix_test.go
+++ b/libgo/go/runtime/crash_unix_test.go
@@ -24,6 +24,15 @@ import (
// Send SIGQUIT to get a stack trace.
var sigquit = syscall.SIGQUIT
+func init() {
+ if runtime.Sigisblocked(int(syscall.SIGQUIT)) {
+ // We can't use SIGQUIT to kill subprocesses because
+ // it's blocked. Use SIGKILL instead. See issue
+ // #19196 for an example of when this happens.
+ sigquit = syscall.SIGKILL
+ }
+}
+
func TestCrashDumpsAllThreads(t *testing.T) {
switch runtime.GOOS {
case "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris":
@@ -31,6 +40,10 @@ func TestCrashDumpsAllThreads(t *testing.T) {
t.Skipf("skipping; not supported on %v", runtime.GOOS)
}
+ if runtime.Sigisblocked(int(syscall.SIGQUIT)) {
+ t.Skip("skipping; SIGQUIT is blocked, see golang.org/issue/19196")
+ }
+
// We don't use executeTest because we need to kill the
// program while it is running.
@@ -165,6 +178,10 @@ func TestPanicSystemstack(t *testing.T) {
t.Skip("Skipping in short mode (GOTRACEBACK=crash is slow)")
}
+ if runtime.Sigisblocked(int(syscall.SIGQUIT)) {
+ t.Skip("skipping; SIGQUIT is blocked, see golang.org/issue/19196")
+ }
+
t.Parallel()
cmd := exec.Command(os.Args[0], "testPanicSystemstackInternal")
cmd = testEnv(cmd)
@@ -251,3 +268,16 @@ func TestSignalIgnoreSIGTRAP(t *testing.T) {
t.Fatalf("want %s, got %s\n", want, output)
}
}
+
+func TestSignalDuringExec(t *testing.T) {
+ switch runtime.GOOS {
+ case "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd":
+ default:
+ t.Skipf("skipping test on %s", runtime.GOOS)
+ }
+ output := runTestProg(t, "testprognet", "SignalDuringExec")
+ want := "OK\n"
+ if output != want {
+ t.Fatalf("want %s, got %s\n", want, output)
+ }
+}
diff --git a/libgo/go/runtime/debug/garbage.go b/libgo/go/runtime/debug/garbage.go
index c82c024235b..785e9d4598e 100644
--- a/libgo/go/runtime/debug/garbage.go
+++ b/libgo/go/runtime/debug/garbage.go
@@ -89,9 +89,7 @@ func ReadGCStats(stats *GCStats) {
// at startup, or 100 if the variable is not set.
// A negative percentage disables garbage collection.
func SetGCPercent(percent int) int {
- old := setGCPercent(int32(percent))
- runtime.GC()
- return int(old)
+ return int(setGCPercent(int32(percent)))
}
// FreeOSMemory forces a garbage collection followed by an
diff --git a/libgo/go/runtime/debug/garbage_test.go b/libgo/go/runtime/debug/garbage_test.go
index 04e954b1b1e..62eeb2c8078 100644
--- a/libgo/go/runtime/debug/garbage_test.go
+++ b/libgo/go/runtime/debug/garbage_test.go
@@ -5,6 +5,7 @@
package debug_test
import (
+ "internal/testenv"
"runtime"
. "runtime/debug"
"testing"
@@ -104,15 +105,78 @@ func TestFreeOSMemory(t *testing.T) {
}
}
+var (
+ setGCPercentBallast interface{}
+ setGCPercentSink interface{}
+)
+
func TestSetGCPercent(t *testing.T) {
+ testenv.SkipFlaky(t, 20076)
+
// Test that the variable is being set and returned correctly.
- // Assume the percentage itself is implemented fine during GC,
- // which is harder to test.
old := SetGCPercent(123)
new := SetGCPercent(old)
if new != 123 {
t.Errorf("SetGCPercent(123); SetGCPercent(x) = %d, want 123", new)
}
+
+ // Test that the percentage is implemented correctly.
+ defer func() {
+ SetGCPercent(old)
+ setGCPercentBallast, setGCPercentSink = nil, nil
+ }()
+ SetGCPercent(100)
+ runtime.GC()
+ // Create 100 MB of live heap as a baseline.
+ const baseline = 100 << 20
+ var ms runtime.MemStats
+ runtime.ReadMemStats(&ms)
+ setGCPercentBallast = make([]byte, baseline-ms.Alloc)
+ runtime.GC()
+ runtime.ReadMemStats(&ms)
+ if abs64(baseline-int64(ms.Alloc)) > 10<<20 {
+ t.Fatalf("failed to set up baseline live heap; got %d MB, want %d MB", ms.Alloc>>20, baseline>>20)
+ }
+ // NextGC should be ~200 MB.
+ const thresh = 20 << 20 // TODO: Figure out why this is so noisy on some builders
+ if want := int64(2 * baseline); abs64(want-int64(ms.NextGC)) > thresh {
+ t.Errorf("NextGC = %d MB, want %d±%d MB", ms.NextGC>>20, want>>20, thresh>>20)
+ }
+ // Create some garbage, but not enough to trigger another GC.
+ for i := 0; float64(i) < 1.2*baseline; i += 1 << 10 {
+ setGCPercentSink = make([]byte, 1<<10)
+ }
+ setGCPercentSink = nil
+ // Adjust GOGC to 50. NextGC should be ~150 MB.
+ SetGCPercent(50)
+ runtime.ReadMemStats(&ms)
+ if want := int64(1.5 * baseline); abs64(want-int64(ms.NextGC)) > thresh {
+ t.Errorf("NextGC = %d MB, want %d±%d MB", ms.NextGC>>20, want>>20, thresh>>20)
+ }
+
+ // Trigger a GC and get back to 100 MB live with GOGC=100.
+ SetGCPercent(100)
+ runtime.GC()
+ // Raise live to 120 MB.
+ setGCPercentSink = make([]byte, int(0.2*baseline))
+ // Lower GOGC to 10. This must force a GC.
+ runtime.ReadMemStats(&ms)
+ ngc1 := ms.NumGC
+ SetGCPercent(10)
+ // It may require an allocation to actually force the GC.
+ setGCPercentSink = make([]byte, 1<<20)
+ runtime.ReadMemStats(&ms)
+ ngc2 := ms.NumGC
+ if ngc1 == ngc2 {
+ t.Errorf("expected GC to run but it did not")
+ }
+}
+
+func abs64(a int64) int64 {
+ if a < 0 {
+ return -a
+ }
+ return a
}
func TestSetMaxThreadsOvf(t *testing.T) {
diff --git a/libgo/go/runtime/env_posix.go b/libgo/go/runtime/env_posix.go
index 9bf7ddcc535..ddf3c02c025 100644
--- a/libgo/go/runtime/env_posix.go
+++ b/libgo/go/runtime/env_posix.go
@@ -11,7 +11,7 @@ func gogetenv(key string) string {
if env == nil {
throw("getenv before env init")
}
- for _, s := range environ() {
+ for _, s := range env {
if len(s) > len(key) && s[len(key)] == '=' && s[:len(key)] == key {
return s[len(key)+1:]
}
diff --git a/libgo/go/runtime/error.go b/libgo/go/runtime/error.go
index 9cf2230ab3f..44e63d8744b 100644
--- a/libgo/go/runtime/error.go
+++ b/libgo/go/runtime/error.go
@@ -140,8 +140,6 @@ func typestring(x interface{}) string {
// For calling from C.
// Prints an argument passed to panic.
-// There's room for arbitrary complexity here, but we keep it
-// simple and handle just a few important cases: int, string, and Stringer.
func printany(i interface{}) {
switch v := i.(type) {
case nil:
@@ -150,16 +148,41 @@ func printany(i interface{}) {
print(v.String())
case error:
print(v.Error())
+ case bool:
+ print(v)
case int:
print(v)
+ case int8:
+ print(v)
+ case int16:
+ print(v)
+ case int32:
+ print(v)
+ case int64:
+ print(v)
+ case uint:
+ print(v)
+ case uint8:
+ print(v)
+ case uint16:
+ print(v)
+ case uint32:
+ print(v)
+ case uint64:
+ print(v)
+ case uintptr:
+ print(v)
+ case float32:
+ print(v)
+ case float64:
+ print(v)
+ case complex64:
+ print(v)
+ case complex128:
+ print(v)
case string:
print(v)
default:
print("(", typestring(i), ") ", i)
}
}
-
-// called from generated code
-func panicwrap(pkg, typ, meth string) {
- panic(plainError("value method " + pkg + "." + typ + "." + meth + " called using nil *" + typ + " pointer"))
-}
diff --git a/libgo/go/runtime/example_test.go b/libgo/go/runtime/example_test.go
new file mode 100644
index 00000000000..e4912a51588
--- /dev/null
+++ b/libgo/go/runtime/example_test.go
@@ -0,0 +1,54 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+)
+
+func ExampleFrames() {
+ c := func() {
+ // Ask runtime.Callers for up to 10 pcs, including runtime.Callers itself.
+ pc := make([]uintptr, 10)
+ n := runtime.Callers(0, pc)
+ if n == 0 {
+ // No pcs available. Stop now.
+ // This can happen if the first argument to runtime.Callers is large.
+ return
+ }
+
+ pc = pc[:n] // pass only valid pcs to runtime.CallersFrames
+ frames := runtime.CallersFrames(pc)
+
+ // Loop to get frames.
+ // A fixed number of pcs can expand to an indefinite number of Frames.
+ for {
+ frame, more := frames.Next()
+ // To keep this example's output stable
+ // even if there are changes in the testing package,
+ // stop unwinding when we leave package runtime.
+ if !strings.Contains(frame.File, "runtime/") {
+ break
+ }
+ fmt.Printf("- more:%v | %s\n", more, frame.Function)
+ if !more {
+ break
+ }
+ }
+ }
+
+ b := func() { c() }
+ a := func() { b() }
+
+ a()
+ // Output:
+ // - more:true | runtime.Callers
+ // - more:true | runtime_test.ExampleFrames.func1
+ // - more:true | runtime_test.ExampleFrames.func2
+ // - more:true | runtime_test.ExampleFrames.func3
+ // - more:true | runtime_test.ExampleFrames
+}
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index bf435f447e0..6325dcb3948 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -41,11 +41,11 @@ type LFNode struct {
}
func LFStackPush(head *uint64, node *LFNode) {
- lfstackpush(head, (*lfnode)(unsafe.Pointer(node)))
+ (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
}
func LFStackPop(head *uint64) *LFNode {
- return (*LFNode)(unsafe.Pointer(lfstackpop(head)))
+ return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop()))
}
func GCMask(x interface{}) (ret []byte) {
@@ -241,6 +241,97 @@ func CountPagesInUse() (pagesInUse, counted uintptr) {
return
}
+func Fastrand() uint32 { return fastrand() }
+func Fastrandn(n uint32) uint32 { return fastrandn(n) }
+
+type ProfBuf profBuf
+
+func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
+ return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
+}
+
+func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
+ (*profBuf)(p).write(tag, now, hdr, stk)
+}
+
+const (
+ ProfBufBlocking = profBufBlocking
+ ProfBufNonBlocking = profBufNonBlocking
+)
+
+func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
+ return (*profBuf)(p).read(profBufReadMode(mode))
+}
+
+func (p *ProfBuf) Close() {
+ (*profBuf)(p).close()
+}
+
+// ReadMemStatsSlow returns both the runtime-computed MemStats and
+// MemStats accumulated by scanning the heap.
+func ReadMemStatsSlow() (base, slow MemStats) {
+ stopTheWorld("ReadMemStatsSlow")
+
+ // Run on the system stack to avoid stack growth allocation.
+ systemstack(func() {
+ // Make sure stats don't change.
+ getg().m.mallocing++
+
+ readmemstats_m(&base)
+
+ // Initialize slow from base and zero the fields we're
+ // recomputing.
+ slow = base
+ slow.Alloc = 0
+ slow.TotalAlloc = 0
+ slow.Mallocs = 0
+ slow.Frees = 0
+ var bySize [_NumSizeClasses]struct {
+ Mallocs, Frees uint64
+ }
+
+ // Add up current allocations in spans.
+ for _, s := range mheap_.allspans {
+ if s.state != mSpanInUse {
+ continue
+ }
+ if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
+ slow.Mallocs++
+ slow.Alloc += uint64(s.elemsize)
+ } else {
+ slow.Mallocs += uint64(s.allocCount)
+ slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
+ bySize[sizeclass].Mallocs += uint64(s.allocCount)
+ }
+ }
+
+ // Add in frees. readmemstats_m flushed the cached stats, so
+ // these are up-to-date.
+ var smallFree uint64
+ slow.Frees = mheap_.nlargefree
+ for i := range mheap_.nsmallfree {
+ slow.Frees += mheap_.nsmallfree[i]
+ bySize[i].Frees = mheap_.nsmallfree[i]
+ bySize[i].Mallocs += mheap_.nsmallfree[i]
+ smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
+ }
+ slow.Frees += memstats.tinyallocs
+ slow.Mallocs += slow.Frees
+
+ slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree
+
+ for i := range slow.BySize {
+ slow.BySize[i].Mallocs = bySize[i].Mallocs
+ slow.BySize[i].Frees = bySize[i].Frees
+ }
+
+ getg().m.mallocing--
+ })
+
+ startTheWorld()
+ return
+}
+
// BlockOnSystemStack switches to the system stack, prints "x\n" to
// stderr, and blocks in a stack containing
// "runtime.blockOnSystemStackInternal".
@@ -253,3 +344,23 @@ func blockOnSystemStackInternal() {
lock(&deadlock)
lock(&deadlock)
}
+
+type RWMutex struct {
+ rw rwmutex
+}
+
+func (rw *RWMutex) RLock() {
+ rw.rw.rlock()
+}
+
+func (rw *RWMutex) RUnlock() {
+ rw.rw.runlock()
+}
+
+func (rw *RWMutex) Lock() {
+ rw.rw.lock()
+}
+
+func (rw *RWMutex) Unlock() {
+ rw.rw.unlock()
+}
diff --git a/libgo/go/runtime/export_unix_test.go b/libgo/go/runtime/export_unix_test.go
new file mode 100644
index 00000000000..54d577072ec
--- /dev/null
+++ b/libgo/go/runtime/export_unix_test.go
@@ -0,0 +1,19 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package runtime
+
+func sigismember(mask *sigset, i int) bool {
+ clear := *mask
+ sigdelset(&clear, i)
+ return clear != *mask
+}
+
+func Sigisblocked(i int) bool {
+ var sigmask sigset
+ sigprocmask(_SIG_SETMASK, nil, &sigmask)
+ return sigismember(&sigmask, i)
+}
diff --git a/libgo/go/runtime/extern.go b/libgo/go/runtime/extern.go
index 5c50760b8b8..6ca978980f2 100644
--- a/libgo/go/runtime/extern.go
+++ b/libgo/go/runtime/extern.go
@@ -50,13 +50,6 @@ It is a comma-separated list of name=val pairs setting these named variables:
gcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines
onto smaller stacks. In this mode, a goroutine's stack can only grow.
- gcstackbarrieroff: setting gcstackbarrieroff=1 disables the use of stack barriers
- that allow the garbage collector to avoid repeating a stack scan during the
- mark termination phase.
-
- gcstackbarrierall: setting gcstackbarrierall=1 installs stack barriers
- in every stack frame, rather than in exponentially-spaced frames.
-
gcrescanstacks: setting gcrescanstacks=1 enables stack
re-scanning during the STW mark termination phase. This is
helpful for debugging if objects are being prematurely
@@ -85,7 +78,7 @@ It is a comma-separated list of name=val pairs setting these named variables:
for mark/scan are broken down in to assist time (GC performed in
line with allocation), background GC time, and idle GC time.
If the line ends with "(forced)", this GC was forced by a
- runtime.GC() call and all phases are STW.
+ runtime.GC() call.
Setting gctrace to any value > 0 also causes the garbage collector
to emit a summary when memory is released back to the system.
@@ -173,7 +166,7 @@ func Gosched()
// to ascend, with 0 identifying the caller of Caller. (For historical reasons the
// meaning of skip differs between Caller and Callers.) The return values report the
// program counter, file name, and line number within the file of the corresponding
-// call. The boolean ok is false if it was not possible to recover the information.
+// call. The boolean ok is false if it was not possible to recover the information.
func Caller(skip int) (pc uintptr, file string, line int, ok bool)
// Callers fills the slice pc with the return program counters of function invocations
@@ -181,6 +174,14 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool)
// to skip before recording in pc, with 0 identifying the frame for Callers itself and
// 1 identifying the caller of Callers.
// It returns the number of entries written to pc.
+//
+// To translate these PCs into symbolic information such as function
+// names and line numbers, use CallersFrames. CallersFrames accounts
+// for inlined functions and adjusts the return program counters into
+// call program counters. Iterating over the returned slice of PCs
+// directly is discouraged, as is using FuncForPC on any of the
+// returned PCs, since these cannot account for inlining or return
+// program counter adjustment.
func Callers(skip int, pc []uintptr) int
// GOROOT returns the root of the Go tree.
@@ -206,7 +207,7 @@ func Version() string {
const GOOS string = sys.GOOS
// GOARCH is the running program's architecture target:
-// 386, amd64, arm, or s390x.
+// one of 386, amd64, arm, s390x, and so on.
const GOARCH string = sys.GOARCH
// GCCGOTOOLDIR is the Tool Dir for the gccgo build
diff --git a/libgo/go/runtime/fastlog2.go b/libgo/go/runtime/fastlog2.go
index 5f3fb534232..1f251bfaab2 100644
--- a/libgo/go/runtime/fastlog2.go
+++ b/libgo/go/runtime/fastlog2.go
@@ -4,8 +4,6 @@
package runtime
-import "unsafe"
-
// fastlog2 implements a fast approximation to the base 2 log of a
// float64. This is used to compute a geometric distribution for heap
// sampling, without introducing dependencies into package math. This
@@ -27,7 +25,3 @@ func fastlog2(x float64) float64 {
low, high := fastlog2Table[xManIndex], fastlog2Table[xManIndex+1]
return float64(xExp) + low + (high-low)*float64(xManScale)*fastlogScaleRatio
}
-
-// float64bits returns the IEEE 754 binary representation of f.
-// Taken from math.Float64bits to avoid dependencies into package math.
-func float64bits(f float64) uint64 { return *(*uint64)(unsafe.Pointer(&f)) }
diff --git a/libgo/go/runtime/float.go b/libgo/go/runtime/float.go
new file mode 100644
index 00000000000..459e58dd7ef
--- /dev/null
+++ b/libgo/go/runtime/float.go
@@ -0,0 +1,53 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+var inf = float64frombits(0x7FF0000000000000)
+
+// isNaN reports whether f is an IEEE 754 ``not-a-number'' value.
+func isNaN(f float64) (is bool) {
+ // IEEE 754 says that only NaNs satisfy f != f.
+ return f != f
+}
+
+// isFinite reports whether f is neither NaN nor an infinity.
+func isFinite(f float64) bool {
+ return !isNaN(f - f)
+}
+
+// isInf reports whether f is an infinity.
+func isInf(f float64) bool {
+ return !isNaN(f) && !isFinite(f)
+}
+
+// Abs returns the absolute value of x.
+//
+// Special cases are:
+// Abs(±Inf) = +Inf
+// Abs(NaN) = NaN
+func abs(x float64) float64 {
+ const sign = 1 << 63
+ return float64frombits(float64bits(x) &^ sign)
+}
+
+// copysign returns a value with the magnitude
+// of x and the sign of y.
+func copysign(x, y float64) float64 {
+ const sign = 1 << 63
+ return float64frombits(float64bits(x)&^sign | float64bits(y)&sign)
+}
+
+// Float64bits returns the IEEE 754 binary representation of f.
+func float64bits(f float64) uint64 {
+ return *(*uint64)(unsafe.Pointer(&f))
+}
+
+// Float64frombits returns the floating point number corresponding
+// the IEEE 754 binary representation b.
+func float64frombits(b uint64) float64 {
+ return *(*float64)(unsafe.Pointer(&b))
+}
diff --git a/libgo/go/runtime/gc_test.go b/libgo/go/runtime/gc_test.go
index ec043ed45be..f14e0d5050e 100644
--- a/libgo/go/runtime/gc_test.go
+++ b/libgo/go/runtime/gc_test.go
@@ -5,6 +5,7 @@
package runtime_test
import (
+ "fmt"
"os"
"reflect"
"runtime"
@@ -450,3 +451,53 @@ func TestPageAccounting(t *testing.T) {
t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
}
}
+
+func TestReadMemStats(t *testing.T) {
+ base, slow := runtime.ReadMemStatsSlow()
+ if base != slow {
+ logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
+ t.Fatal("memstats mismatch")
+ }
+}
+
+func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
+ typ := got.Type()
+ switch typ.Kind() {
+ case reflect.Array, reflect.Slice:
+ if got.Len() != want.Len() {
+ t.Logf("len(%s): got %v, want %v", prefix, got, want)
+ return
+ }
+ for i := 0; i < got.Len(); i++ {
+ logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
+ }
+ case reflect.Struct:
+ for i := 0; i < typ.NumField(); i++ {
+ gf, wf := got.Field(i), want.Field(i)
+ logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
+ }
+ case reflect.Map:
+ t.Fatal("not implemented: logDiff for map")
+ default:
+ if got.Interface() != want.Interface() {
+ t.Logf("%s: got %v, want %v", prefix, got, want)
+ }
+ }
+}
+
+func BenchmarkReadMemStats(b *testing.B) {
+ var ms runtime.MemStats
+ const heapSize = 100 << 20
+ x := make([]*[1024]byte, heapSize/1024)
+ for i := range x {
+ x[i] = new([1024]byte)
+ }
+ hugeSink = x
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ runtime.ReadMemStats(&ms)
+ }
+
+ hugeSink = nil
+}
diff --git a/libgo/go/runtime/hashmap.go b/libgo/go/runtime/hashmap.go
index 5b191d45752..a3e50cd9221 100644
--- a/libgo/go/runtime/hashmap.go
+++ b/libgo/go/runtime/hashmap.go
@@ -129,6 +129,11 @@ type hmap struct {
oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
+ extra *mapextra // optional fields
+}
+
+// mapextra holds fields that are not present on all maps.
+type mapextra struct {
// If both key and value do not contain pointers and are inline, then we mark bucket
// type as containing no pointers. This avoids scanning such maps.
// However, bmap.overflow is a pointer. In order to keep overflow buckets
@@ -136,9 +141,11 @@ type hmap struct {
// Overflow is used only if key and value do not contain pointers.
// overflow[0] contains overflow buckets for hmap.buckets.
// overflow[1] contains overflow buckets for hmap.oldbuckets.
- // The first indirection allows us to reduce static size of hmap.
- // The second indirection allows to store a pointer to the slice in hiter.
- overflow *[2]*[]*bmap
+ // The indirection allows to store a pointer to the slice in hiter.
+ overflow [2]*[]*bmap
+
+ // nextOverflow holds a pointer to a free overflow bucket.
+ nextOverflow *bmap
}
// A bucket for a Go map.
@@ -183,6 +190,10 @@ func (b *bmap) overflow(t *maptype) *bmap {
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
}
+func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
+ *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
+}
+
// incrnoverflow increments h.noverflow.
// noverflow counts the number of overflow buckets.
// This is used to trigger same-size map growth.
@@ -209,21 +220,40 @@ func (h *hmap) incrnoverflow() {
}
}
-func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
+func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
+ var ovf *bmap
+ if h.extra != nil && h.extra.nextOverflow != nil {
+ // We have preallocated overflow buckets available.
+ // See makeBucketArray for more details.
+ ovf = h.extra.nextOverflow
+ if ovf.overflow(t) == nil {
+ // We're not at the end of the preallocated overflow buckets. Bump the pointer.
+ h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
+ } else {
+ // This is the last preallocated overflow bucket.
+ // Reset the overflow pointer on this bucket,
+ // which was set to a non-nil sentinel value.
+ ovf.setoverflow(t, nil)
+ h.extra.nextOverflow = nil
+ }
+ } else {
+ ovf = (*bmap)(newobject(t.bucket))
+ }
h.incrnoverflow()
if t.bucket.kind&kindNoPointers != 0 {
h.createOverflow()
- *h.overflow[0] = append(*h.overflow[0], ovf)
+ *h.extra.overflow[0] = append(*h.extra.overflow[0], ovf)
}
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
+ b.setoverflow(t, ovf)
+ return ovf
}
func (h *hmap) createOverflow() {
- if h.overflow == nil {
- h.overflow = new([2]*[]*bmap)
+ if h.extra == nil {
+ h.extra = new(mapextra)
}
- if h.overflow[0] == nil {
- h.overflow[0] = new([]*bmap)
+ if h.extra.overflow[0] == nil {
+ h.extra.overflow[0] = new([]*bmap)
}
}
@@ -238,9 +268,8 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
throw("bad hmap size")
}
- if hint < 0 || int64(int32(hint)) != hint {
- panic(plainError("makemap: size out of range"))
- // TODO: make hint an int, then none of this nonsense
+ if hint < 0 || hint > int64(maxSliceCap(t.bucket.size)) {
+ hint = 0
}
if !ismapkey(t.key) {
@@ -290,8 +319,14 @@ func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
// if B == 0, the buckets field is allocated lazily later (in mapassign)
// If hint is large zeroing this memory could take a while.
buckets := bucket
+ var extra *mapextra
if B != 0 {
- buckets = newarray(t.bucket, 1<= 4 {
+ // Add on the estimated number of overflow buckets
+ // required to insert the median number of elements
+ // used with this value of b.
+ nbuckets += 1 << (b - 4)
+ sz := t.bucket.size * nbuckets
+ up := roundupsize(sz)
+ if up != sz {
+ nbuckets = up / t.bucket.size
+ }
+ }
+ buckets = newarray(t.bucket, int(nbuckets))
+ if base != nbuckets {
+ // We preallocated some overflow buckets.
+ // To keep the overhead of tracking these overflow buckets to a minimum,
+ // we use the convention that if a preallocated overflow bucket's overflow
+ // pointer is nil, then there are more available by bumping the pointer.
+ // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
+ nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
+ last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
+ last.setoverflow(t, (*bmap)(buckets))
+ }
+ return buckets, nextOverflow
+}
+
func hashGrow(t *maptype, h *hmap) {
// If we've hit the load factor, get bigger.
// Otherwise, there are too many overflow buckets,
@@ -896,7 +967,8 @@ func hashGrow(t *maptype, h *hmap) {
h.flags |= sameSizeGrow
}
oldbuckets := h.buckets
- newbuckets := newarray(t.bucket, 1<<(h.B+bigger))
+ newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger)
+
flags := h.flags &^ (iterator | oldIterator)
if h.flags&iterator != 0 {
flags |= oldIterator
@@ -909,13 +981,19 @@ func hashGrow(t *maptype, h *hmap) {
h.nevacuate = 0
h.noverflow = 0
- if h.overflow != nil {
+ if h.extra != nil && h.extra.overflow[0] != nil {
// Promote current overflow buckets to the old generation.
- if h.overflow[1] != nil {
+ if h.extra.overflow[1] != nil {
throw("overflow is not nil")
}
- h.overflow[1] = h.overflow[0]
- h.overflow[0] = nil
+ h.extra.overflow[1] = h.extra.overflow[0]
+ h.extra.overflow[0] = nil
+ }
+ if nextOverflow != nil {
+ if h.extra == nil {
+ h.extra = new(mapextra)
+ }
+ h.extra.nextOverflow = nextOverflow
}
// the actual copying of the hash table data is done incrementally
@@ -925,7 +1003,7 @@ func hashGrow(t *maptype, h *hmap) {
// overLoadFactor reports whether count items placed in 1<= bucketCnt && float32(count) >= loadFactor*float32((uintptr(1)<= bucketCnt && float32(count) >= loadFactor*float32((uint64(1)< newbit {
+ stop = newbit
+ }
+ for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
+ h.nevacuate++
+ }
+ if h.nevacuate == newbit { // newbit == # of oldbuckets
// Growing is all done. Free old main bucket array.
h.oldbuckets = nil
// Can discard old overflow buckets as well.
// If they are still referenced by an iterator,
// then the iterator holds a pointers to the slice.
- if h.overflow != nil {
- h.overflow[1] = nil
+ if h.extra != nil {
+ h.extra.overflow[1] = nil
}
h.flags &^= sameSizeGrow
}
@@ -1139,8 +1229,8 @@ func ismapkey(t *_type) bool {
// Reflect stubs. Called from ../reflect/asm_*.s
//go:linkname reflect_makemap reflect.makemap
-func reflect_makemap(t *maptype) *hmap {
- return makemap(t, 0, nil, nil)
+func reflect_makemap(t *maptype, cap int) *hmap {
+ return makemap(t, int64(cap), nil, nil)
}
//go:linkname reflect_mapaccess reflect.mapaccess
diff --git a/libgo/go/runtime/hashmap_fast.go b/libgo/go/runtime/hashmap_fast.go
index 853da70e966..bec8fdac14e 100644
--- a/libgo/go/runtime/hashmap_fast.go
+++ b/libgo/go/runtime/hashmap_fast.go
@@ -45,7 +45,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if k != key {
continue
}
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@@ -94,7 +94,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if k != key {
continue
}
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@@ -143,7 +143,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if k != key {
continue
}
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@@ -192,7 +192,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if k != key {
continue
}
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@@ -223,7 +223,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if key.len < 32 {
// short key, doing lots of comparisons is ok
for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@@ -240,7 +240,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@@ -252,8 +252,6 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
}
// check first 4 bytes
- // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
- // four 1-byte comparisons.
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
continue
}
@@ -295,7 +293,7 @@ dohash:
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x != top {
continue
}
@@ -332,7 +330,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if key.len < 32 {
// short key, doing lots of comparisons is ok
for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@@ -349,7 +347,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
@@ -402,7 +400,7 @@ dohash:
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+ x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x != top {
continue
}
@@ -420,3 +418,441 @@ dohash:
}
}
}
+
+func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc(unsafe.Pointer(&t))
+ racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling alg.hash for consistency with mapassign.
+ h.flags |= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & (uintptr(1)<> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+
+ var inserti *uint8
+ var insertk unsafe.Pointer
+ var val unsafe.Pointer
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if b.tophash[i] == empty && inserti == nil {
+ inserti = &b.tophash[i]
+ insertk = add(unsafe.Pointer(b), dataOffset+i*4)
+ val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
+ }
+ continue
+ }
+ k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
+ if k != key {
+ continue
+ }
+ val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if inserti == nil {
+ // all current buckets are full, allocate a new one.
+ newb := h.newoverflow(t, b)
+ inserti = &newb.tophash[0]
+ insertk = add(unsafe.Pointer(newb), dataOffset)
+ val = add(insertk, bucketCnt*4)
+ }
+
+ // store new key/value at insert position
+ typedmemmove(t.key, insertk, unsafe.Pointer(&key))
+ *inserti = top
+ h.count++
+
+done:
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return val
+}
+
+func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc(unsafe.Pointer(&t))
+ racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling alg.hash for consistency with mapassign.
+ h.flags |= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & (uintptr(1)<> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+
+ var inserti *uint8
+ var insertk unsafe.Pointer
+ var val unsafe.Pointer
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if b.tophash[i] == empty && inserti == nil {
+ inserti = &b.tophash[i]
+ insertk = add(unsafe.Pointer(b), dataOffset+i*8)
+ val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
+ }
+ continue
+ }
+ k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
+ if k != key {
+ continue
+ }
+ val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if inserti == nil {
+ // all current buckets are full, allocate a new one.
+ newb := h.newoverflow(t, b)
+ inserti = &newb.tophash[0]
+ insertk = add(unsafe.Pointer(newb), dataOffset)
+ val = add(insertk, bucketCnt*8)
+ }
+
+ // store new key/value at insert position
+ typedmemmove(t.key, insertk, unsafe.Pointer(&key))
+ *inserti = top
+ h.count++
+
+done:
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return val
+}
+
+func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
+ if h == nil {
+ panic(plainError("assignment to entry in nil map"))
+ }
+ if raceenabled {
+ callerpc := getcallerpc(unsafe.Pointer(&t))
+ racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+ key := stringStructOf(&ky)
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+
+ // Set hashWriting after calling alg.hash for consistency with mapassign.
+ h.flags |= hashWriting
+
+ if h.buckets == nil {
+ h.buckets = newarray(t.bucket, 1)
+ }
+
+again:
+ bucket := hash & (uintptr(1)<> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+
+ var inserti *uint8
+ var insertk unsafe.Pointer
+ var val unsafe.Pointer
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ if b.tophash[i] == empty && inserti == nil {
+ inserti = &b.tophash[i]
+ insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+ val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
+ }
+ continue
+ }
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+ if k.len != key.len {
+ continue
+ }
+ if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
+ continue
+ }
+ // already have a mapping for key. Update it.
+ val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
+ goto done
+ }
+ ovf := b.overflow(t)
+ if ovf == nil {
+ break
+ }
+ b = ovf
+ }
+
+ // Did not find mapping for key. Allocate new cell & add entry.
+
+ // If we hit the max load factor or we have too many overflow buckets,
+ // and we're not already in the middle of growing, start growing.
+ if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
+ hashGrow(t, h)
+ goto again // Growing the table invalidates everything, so try again
+ }
+
+ if inserti == nil {
+ // all current buckets are full, allocate a new one.
+ newb := h.newoverflow(t, b)
+ inserti = &newb.tophash[0]
+ insertk = add(unsafe.Pointer(newb), dataOffset)
+ val = add(insertk, bucketCnt*2*sys.PtrSize)
+ }
+
+ // store new key/value at insert position
+ *((*stringStruct)(insertk)) = *key
+ *inserti = top
+ h.count++
+
+done:
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+ return val
+}
+
+func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer(&t))
+ racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
+ }
+ if h == nil || h.count == 0 {
+ return
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling alg.hash for consistency with mapdelete
+ h.flags |= hashWriting
+
+ bucket := hash & (uintptr(1)<> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ continue
+ }
+ k := (*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))
+ if key != *k {
+ continue
+ }
+ typedmemclr(t.key, unsafe.Pointer(k))
+ v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*4 + i*uintptr(t.valuesize))
+ typedmemclr(t.elem, v)
+ b.tophash[i] = empty
+ h.count--
+ goto done
+ }
+ b = b.overflow(t)
+ if b == nil {
+ goto done
+ }
+ }
+
+done:
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+}
+
+func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer(&t))
+ racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
+ }
+ if h == nil || h.count == 0 {
+ return
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+
+ // Set hashWriting after calling alg.hash for consistency with mapdelete
+ h.flags |= hashWriting
+
+ bucket := hash & (uintptr(1)<> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ continue
+ }
+ k := (*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))
+ if key != *k {
+ continue
+ }
+ typedmemclr(t.key, unsafe.Pointer(k))
+ v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*8 + i*uintptr(t.valuesize))
+ typedmemclr(t.elem, v)
+ b.tophash[i] = empty
+ h.count--
+ goto done
+ }
+ b = b.overflow(t)
+ if b == nil {
+ goto done
+ }
+ }
+
+done:
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+}
+
+func mapdelete_faststr(t *maptype, h *hmap, ky string) {
+ if raceenabled && h != nil {
+ callerpc := getcallerpc(unsafe.Pointer(&t))
+ racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
+ }
+ if h == nil || h.count == 0 {
+ return
+ }
+ if h.flags&hashWriting != 0 {
+ throw("concurrent map writes")
+ }
+
+ key := stringStructOf(&ky)
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+
+ // Set hashWriting after calling alg.hash for consistency with mapdelete
+ h.flags |= hashWriting
+
+ bucket := hash & (uintptr(1)<> (sys.PtrSize*8 - 8))
+ if top < minTopHash {
+ top += minTopHash
+ }
+ for {
+ for i := uintptr(0); i < bucketCnt; i++ {
+ if b.tophash[i] != top {
+ continue
+ }
+ k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+ if k.len != key.len {
+ continue
+ }
+ if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
+ continue
+ }
+ typedmemclr(t.key, unsafe.Pointer(k))
+ v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*2*sys.PtrSize + i*uintptr(t.valuesize))
+ typedmemclr(t.elem, v)
+ b.tophash[i] = empty
+ h.count--
+ goto done
+ }
+ b = b.overflow(t)
+ if b == nil {
+ goto done
+ }
+ }
+
+done:
+ if h.flags&hashWriting == 0 {
+ throw("concurrent map writes")
+ }
+ h.flags &^= hashWriting
+}
diff --git a/libgo/go/runtime/heapdump.go b/libgo/go/runtime/heapdump.go
index 0db53f544a5..166199b5ca3 100644
--- a/libgo/go/runtime/heapdump.go
+++ b/libgo/go/runtime/heapdump.go
@@ -413,7 +413,7 @@ func dumpmemstats() {
dumpint(memstats.gc_sys)
dumpint(memstats.other_sys)
dumpint(memstats.next_gc)
- dumpint(memstats.last_gc)
+ dumpint(memstats.last_gc_unix)
dumpint(memstats.pause_total_ns)
for i := 0; i < 256; i++ {
dumpint(memstats.pause_ns[i])
@@ -515,7 +515,7 @@ func writeheapdump_m(fd uintptr) {
// Update stats so we can dump them.
// As a side effect, flushes all the MCaches so the MSpan.freelist
// lists contain all the free objects.
- updatememstats(nil)
+ updatememstats()
// Set dump file.
dumpfd = fd
diff --git a/libgo/go/runtime/iface_test.go b/libgo/go/runtime/iface_test.go
index 3744a4f9700..d63ea796138 100644
--- a/libgo/go/runtime/iface_test.go
+++ b/libgo/go/runtime/iface_test.go
@@ -29,6 +29,20 @@ func (TM) Method2() {}
func (TL) Method1() {}
func (TL) Method2() {}
+type T8 uint8
+type T16 uint16
+type T32 uint32
+type T64 uint64
+type Tstr string
+type Tslice []byte
+
+func (T8) Method1() {}
+func (T16) Method1() {}
+func (T32) Method1() {}
+func (T64) Method1() {}
+func (Tstr) Method1() {}
+func (Tslice) Method1() {}
+
var (
e interface{}
e_ interface{}
@@ -269,3 +283,133 @@ func TestNonEscapingConvT2I(t *testing.T) {
t.Fatalf("want 0 allocs, got %v", n)
}
}
+
+func TestZeroConvT2x(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("does not work on gccgo without better escape analysis")
+ }
+
+ tests := []struct {
+ name string
+ fn func()
+ }{
+ {name: "E8", fn: func() { e = eight8 }}, // any byte-sized value does not allocate
+ {name: "E16", fn: func() { e = zero16 }}, // zero values do not allocate
+ {name: "E32", fn: func() { e = zero32 }},
+ {name: "E64", fn: func() { e = zero64 }},
+ {name: "Estr", fn: func() { e = zerostr }},
+ {name: "Eslice", fn: func() { e = zeroslice }},
+ {name: "Econstflt", fn: func() { e = 99.0 }}, // constants do not allocate
+ {name: "Econststr", fn: func() { e = "change" }},
+ {name: "I8", fn: func() { i1 = eight8I }},
+ {name: "I16", fn: func() { i1 = zero16I }},
+ {name: "I32", fn: func() { i1 = zero32I }},
+ {name: "I64", fn: func() { i1 = zero64I }},
+ {name: "Istr", fn: func() { i1 = zerostrI }},
+ {name: "Islice", fn: func() { i1 = zerosliceI }},
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ n := testing.AllocsPerRun(1000, test.fn)
+ if n != 0 {
+ t.Errorf("want zero allocs, got %v", n)
+ }
+ })
+ }
+}
+
+var (
+ eight8 uint8 = 8
+ eight8I T8 = 8
+
+ zero16 uint16 = 0
+ zero16I T16 = 0
+ one16 uint16 = 1
+
+ zero32 uint32 = 0
+ zero32I T32 = 0
+ one32 uint32 = 1
+
+ zero64 uint64 = 0
+ zero64I T64 = 0
+ one64 uint64 = 1
+
+ zerostr string = ""
+ zerostrI Tstr = ""
+ nzstr string = "abc"
+
+ zeroslice []byte = nil
+ zerosliceI Tslice = nil
+ nzslice []byte = []byte("abc")
+
+ zerobig [512]byte
+ nzbig [512]byte = [512]byte{511: 1}
+)
+
+func BenchmarkConvT2Ezero(b *testing.B) {
+ b.Run("zero", func(b *testing.B) {
+ b.Run("16", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = zero16
+ }
+ })
+ b.Run("32", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = zero32
+ }
+ })
+ b.Run("64", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = zero64
+ }
+ })
+ b.Run("str", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = zerostr
+ }
+ })
+ b.Run("slice", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = zeroslice
+ }
+ })
+ b.Run("big", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = zerobig
+ }
+ })
+ })
+ b.Run("nonzero", func(b *testing.B) {
+ b.Run("16", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = one16
+ }
+ })
+ b.Run("32", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = one32
+ }
+ })
+ b.Run("64", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = one64
+ }
+ })
+ b.Run("str", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = nzstr
+ }
+ })
+ b.Run("slice", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = nzslice
+ }
+ })
+ b.Run("big", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = nzbig
+ }
+ })
+ })
+}
diff --git a/libgo/go/runtime/internal/sys/intrinsics.go b/libgo/go/runtime/internal/sys/intrinsics.go
index 43acf34b801..29282800fdc 100644
--- a/libgo/go/runtime/internal/sys/intrinsics.go
+++ b/libgo/go/runtime/internal/sys/intrinsics.go
@@ -14,22 +14,22 @@ func builtinCtz64(uint64) int32
// Ctz64 counts trailing (low-order) zeroes,
// and if all are zero, then 64.
-func Ctz64(x uint64) uint64 {
+func Ctz64(x uint64) int {
if x == 0 {
return 64
}
- return uint64(builtinCtz64(x))
+ return int(builtinCtz64(x))
}
//go:nosplit
// Ctz32 counts trailing (low-order) zeroes,
// and if all are zero, then 32.
-func Ctz32(x uint32) uint32 {
+func Ctz32(x uint32) int {
if x == 0 {
return 32
}
- return uint32(builtinCtz32(x))
+ return int(builtinCtz32(x))
}
//extern __builtin_bswap64
diff --git a/libgo/go/runtime/internal/sys/intrinsics_test.go b/libgo/go/runtime/internal/sys/intrinsics_test.go
index 1f2c8daa962..0444183e9dc 100644
--- a/libgo/go/runtime/internal/sys/intrinsics_test.go
+++ b/libgo/go/runtime/internal/sys/intrinsics_test.go
@@ -6,17 +6,17 @@ import (
)
func TestCtz64(t *testing.T) {
- for i := uint(0); i <= 64; i++ {
- x := uint64(5) << i
- if got := sys.Ctz64(x); got != uint64(i) {
+ for i := 0; i <= 64; i++ {
+ x := uint64(5) << uint(i)
+ if got := sys.Ctz64(x); got != i {
t.Errorf("Ctz64(%d)=%d, want %d", x, got, i)
}
}
}
func TestCtz32(t *testing.T) {
- for i := uint(0); i <= 32; i++ {
- x := uint32(5) << i
- if got := sys.Ctz32(x); got != uint32(i) {
+ for i := 0; i <= 32; i++ {
+ x := uint32(5) << uint(i)
+ if got := sys.Ctz32(x); got != i {
t.Errorf("Ctz32(%d)=%d, want %d", x, got, i)
}
}
diff --git a/libgo/go/runtime/lfstack.go b/libgo/go/runtime/lfstack.go
index 2f2958c8869..4787c5be3fa 100644
--- a/libgo/go/runtime/lfstack.go
+++ b/libgo/go/runtime/lfstack.go
@@ -3,10 +3,6 @@
// license that can be found in the LICENSE file.
// Lock-free stack.
-// Initialize head to 0, compare with 0 to test for emptiness.
-// The stack does not keep pointers to nodes,
-// so they can be garbage collected if there are no other pointers to nodes.
-// The following code runs only in non-preemptible contexts.
package runtime
@@ -15,36 +11,47 @@ import (
"unsafe"
)
-// Temporary for C code to call:
-//go:linkname lfstackpush runtime.lfstackpush
-//go:linkname lfstackpop runtime.lfstackpop
+// lfstack is the head of a lock-free stack.
+//
+// The zero value of lfstack is an empty list.
+//
+// This stack is intrusive. Nodes must embed lfnode as the first field.
+//
+// The stack does not keep GC-visible pointers to nodes, so the caller
+// is responsible for ensuring the nodes are not garbage collected
+// (typically by allocating them from manually-managed memory).
+type lfstack uint64
-func lfstackpush(head *uint64, node *lfnode) {
+func (head *lfstack) push(node *lfnode) {
node.pushcnt++
new := lfstackPack(node, node.pushcnt)
if node1 := lfstackUnpack(new); node1 != node {
- print("runtime: lfstackpush invalid packing: node=", node, " cnt=", hex(node.pushcnt), " packed=", hex(new), " -> node=", node1, "\n")
- throw("lfstackpush")
+ print("runtime: lfstack.push invalid packing: node=", node, " cnt=", hex(node.pushcnt), " packed=", hex(new), " -> node=", node1, "\n")
+ throw("lfstack.push")
}
for {
- old := atomic.Load64(head)
+ old := atomic.Load64((*uint64)(head))
node.next = old
- if atomic.Cas64(head, old, new) {
+ if atomic.Cas64((*uint64)(head), old, new) {
break
}
}
}
-func lfstackpop(head *uint64) unsafe.Pointer {
+func (head *lfstack) pop() unsafe.Pointer {
for {
- old := atomic.Load64(head)
+ old := atomic.Load64((*uint64)(head))
if old == 0 {
return nil
}
node := lfstackUnpack(old)
next := atomic.Load64(&node.next)
- if atomic.Cas64(head, old, next) {
+ if atomic.Cas64((*uint64)(head), old, next) {
return unsafe.Pointer(node)
}
}
}
+
+func (head *lfstack) empty() bool {
+ return atomic.Load64((*uint64)(head)) == 0
+}
diff --git a/libgo/go/runtime/lock_futex.go b/libgo/go/runtime/lock_futex.go
index 9877bc35606..7ddd3786624 100644
--- a/libgo/go/runtime/lock_futex.go
+++ b/libgo/go/runtime/lock_futex.go
@@ -50,6 +50,7 @@ const (
// affect mutex's state.
// We use the uintptr mutex.key and note.key as a uint32.
+//go:nosplit
func key32(p *uintptr) *uint32 {
return (*uint32)(unsafe.Pointer(p))
}
@@ -152,9 +153,17 @@ func notesleep(n *note) {
if gp != gp.m.g0 {
throw("notesleep not on g0")
}
+ ns := int64(-1)
+ if *cgo_yield != nil {
+ // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
+ ns = 10e6
+ }
for atomic.Load(key32(&n.key)) == 0 {
gp.m.blocked = true
- futexsleep(key32(&n.key), 0, -1)
+ futexsleep(key32(&n.key), 0, ns)
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
gp.m.blocked = false
}
}
@@ -168,9 +177,16 @@ func notetsleep_internal(n *note, ns int64) bool {
gp := getg()
if ns < 0 {
+ if *cgo_yield != nil {
+ // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
+ ns = 10e6
+ }
for atomic.Load(key32(&n.key)) == 0 {
gp.m.blocked = true
- futexsleep(key32(&n.key), 0, -1)
+ futexsleep(key32(&n.key), 0, ns)
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
gp.m.blocked = false
}
return true
@@ -182,8 +198,14 @@ func notetsleep_internal(n *note, ns int64) bool {
deadline := nanotime() + ns
for {
+ if *cgo_yield != nil && ns > 10e6 {
+ ns = 10e6
+ }
gp.m.blocked = true
futexsleep(key32(&n.key), 0, ns)
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
gp.m.blocked = false
if atomic.Load(key32(&n.key)) != 0 {
break
diff --git a/libgo/go/runtime/lock_sema.go b/libgo/go/runtime/lock_sema.go
index 57fee1985e3..52a2376dc5e 100644
--- a/libgo/go/runtime/lock_sema.go
+++ b/libgo/go/runtime/lock_sema.go
@@ -175,7 +175,16 @@ func notesleep(n *note) {
}
// Queued. Sleep.
gp.m.blocked = true
- semasleep(-1)
+ if *cgo_yield == nil {
+ semasleep(-1)
+ } else {
+ // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
+ const ns = 10e6
+ for atomic.Loaduintptr(&n.key) == 0 {
+ semasleep(ns)
+ asmcgocall(*cgo_yield, nil)
+ }
+ }
gp.m.blocked = false
}
@@ -198,7 +207,15 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
if ns < 0 {
// Queued. Sleep.
gp.m.blocked = true
- semasleep(-1)
+ if *cgo_yield == nil {
+ semasleep(-1)
+ } else {
+ // Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
+ const ns = 10e6
+ for semasleep(ns) < 0 {
+ asmcgocall(*cgo_yield, nil)
+ }
+ }
gp.m.blocked = false
return true
}
@@ -207,12 +224,18 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
for {
// Registered. Sleep.
gp.m.blocked = true
+ if *cgo_yield != nil && ns > 10e6 {
+ ns = 10e6
+ }
if semasleep(ns) >= 0 {
gp.m.blocked = false
// Acquired semaphore, semawakeup unregistered us.
// Done.
return true
}
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
gp.m.blocked = false
// Interrupted or timed out. Still registered. Semaphore not acquired.
ns = deadline - nanotime()
diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go
index 3912fc2da58..796cd8a7c64 100644
--- a/libgo/go/runtime/malloc.go
+++ b/libgo/go/runtime/malloc.go
@@ -122,7 +122,7 @@ const (
// Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
_TinySize = 16
- _TinySizeClass = 2
+ _TinySizeClass = int8(2)
_FixAllocChunk = 16 << 10 // Chunk size for FixAlloc
_MaxMHeapList = 1 << (20 - _PageShift) // Maximum page length for fixed-size list in MHeap.
@@ -159,7 +159,11 @@ const (
_MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*(32-(sys.GoarchMips+sys.GoarchMipsle))
_MHeapMap_Bits = _MHeapMap_TotalBits - _PageShift
- _MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
+ // _MaxMem is the maximum heap arena size minus 1.
+ //
+ // On 32-bit, this is also the maximum heap pointer value,
+ // since the arena starts at address 0.
+ _MaxMem = 1<<_MHeapMap_TotalBits - 1
// Max number of threads to run garbage collection.
// 2, 3, and 4 are all plausible maximums depending
@@ -167,8 +171,6 @@ const (
// collector scales well to 32 cpus.
_MaxGcproc = 32
- _MaxArena32 = 1<<32 - 1
-
// minLegalPointer is the smallest possible legal pointer.
// This is the smallest possible architectural page size,
// since we assume that the first page is never mapped.
@@ -250,18 +252,21 @@ func mallocinit() {
throw("bad system page size")
}
- var p, bitmapSize, spansSize, pSize, limit uintptr
+ // The auxiliary regions start at p and are laid out in the
+ // following order: spans, bitmap, arena.
+ var p, pSize uintptr
var reserved bool
- // limit = runtime.memlimit();
- // See https://golang.org/issue/5049
- // TODO(rsc): Fix after 1.1.
- limit = 0
+ // The spans array holds one *mspan per _PageSize of arena.
+ var spansSize uintptr = (_MaxMem + 1) / _PageSize * sys.PtrSize
+ spansSize = round(spansSize, _PageSize)
+ // The bitmap holds 2 bits per word of arena.
+ var bitmapSize uintptr = (_MaxMem + 1) / (sys.PtrSize * 8 / 2)
+ bitmapSize = round(bitmapSize, _PageSize)
// Set up the allocation arena, a contiguous area of memory where
- // allocated data will be found. The arena begins with a bitmap large
- // enough to hold 2 bits per allocated word.
- if sys.PtrSize == 8 && (limit == 0 || limit > 1<<30) {
+ // allocated data will be found.
+ if sys.PtrSize == 8 {
// On a 64-bit machine, allocate from a single contiguous reservation.
// 512 GB (MaxMem) should be big enough for now.
//
@@ -294,9 +299,7 @@ func mallocinit() {
// On AIX, mmap adresses range start at 0x07000000_00000000 for 64 bits
// processes.
arenaSize := round(_MaxMem, _PageSize)
- bitmapSize = arenaSize / (sys.PtrSize * 8 / 2)
- spansSize = arenaSize / _PageSize * sys.PtrSize
- spansSize = round(spansSize, _PageSize)
+ pSize = bitmapSize + spansSize + arenaSize + _PageSize
for i := 0; i <= 0x7f; i++ {
switch {
case GOARCH == "arm64" && GOOS == "darwin":
@@ -309,7 +312,6 @@ func mallocinit() {
default:
p = uintptr(i)<<40 | uintptrMask&(0x00c0<<32)
}
- pSize = bitmapSize + spansSize + arenaSize + _PageSize
p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
if p != 0 || GOOS == "aix" { // Useless to loop on AIX, as i is forced to 1
break
@@ -327,6 +329,15 @@ func mallocinit() {
// When that gets used up, we'll start asking the kernel
// for any memory anywhere.
+ // We want to start the arena low, but if we're linked
+ // against C code, it's possible global constructors
+ // have called malloc and adjusted the process' brk.
+ // Query the brk so we can avoid trying to map the
+ // arena over it (which will cause the kernel to put
+ // the arena somewhere else, likely at a high
+ // address).
+ procBrk := sbrk0()
+
// If we fail to allocate, try again with a smaller arena.
// This is necessary on Android L where we share a process
// with ART, which reserves virtual memory aggressively.
@@ -340,15 +351,6 @@ func mallocinit() {
}
for _, arenaSize := range &arenaSizes {
- bitmapSize = (_MaxArena32 + 1) / (sys.PtrSize * 8 / 2)
- spansSize = (_MaxArena32 + 1) / _PageSize * sys.PtrSize
- if limit > 0 && arenaSize+bitmapSize+spansSize > limit {
- bitmapSize = (limit / 9) &^ ((1 << _PageShift) - 1)
- arenaSize = bitmapSize * 8
- spansSize = arenaSize / _PageSize * sys.PtrSize
- }
- spansSize = round(spansSize, _PageSize)
-
// SysReserve treats the address we ask for, end, as a hint,
// not as an absolute requirement. If we ask for the end
// of the data segment but the operating system requires
@@ -360,6 +362,12 @@ func mallocinit() {
// to a MB boundary.
p = round(getEnd()+(1<<18), 1<<20)
pSize = bitmapSize + spansSize + arenaSize + _PageSize
+ if p <= procBrk && procBrk < p+pSize {
+ // Move the start above the brk,
+ // leaving some room for future brk
+ // expansion.
+ p = round(procBrk+(1<<20), 1<<20)
+ }
p = uintptr(sysReserve(unsafe.Pointer(p), pSize, &reserved))
if p != 0 {
break
@@ -374,18 +382,22 @@ func mallocinit() {
// so SysReserve can give us a PageSize-unaligned pointer.
// To overcome this we ask for PageSize more and round up the pointer.
p1 := round(p, _PageSize)
+ pSize -= p1 - p
spansStart := p1
- mheap_.bitmap = p1 + spansSize + bitmapSize
+ p1 += spansSize
+ mheap_.bitmap = p1 + bitmapSize
+ p1 += bitmapSize
if sys.PtrSize == 4 {
// Set arena_start such that we can accept memory
// reservations located anywhere in the 4GB virtual space.
mheap_.arena_start = 0
} else {
- mheap_.arena_start = p1 + (spansSize + bitmapSize)
+ mheap_.arena_start = p1
}
mheap_.arena_end = p + pSize
- mheap_.arena_used = p1 + (spansSize + bitmapSize)
+ mheap_.arena_used = p1
+ mheap_.arena_alloc = p1
mheap_.arena_reserved = reserved
if mheap_.arena_start&(_PageSize-1) != 0 {
@@ -404,62 +416,78 @@ func mallocinit() {
// h.arena_start and h.arena_end. sysAlloc returns nil on failure.
// There is no corresponding free function.
func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
- if n > h.arena_end-h.arena_used {
- // We are in 32-bit mode, maybe we didn't use all possible address space yet.
- // Reserve some more space.
+ // strandLimit is the maximum number of bytes to strand from
+ // the current arena block. If we would need to strand more
+ // than this, we fall back to sysAlloc'ing just enough for
+ // this allocation.
+ const strandLimit = 16 << 20
+
+ if n > h.arena_end-h.arena_alloc {
+ // If we haven't grown the arena to _MaxMem yet, try
+ // to reserve some more address space.
p_size := round(n+_PageSize, 256<<20)
new_end := h.arena_end + p_size // Careful: can overflow
- if h.arena_end <= new_end && new_end-h.arena_start-1 <= _MaxArena32 {
+ if h.arena_end <= new_end && new_end-h.arena_start-1 <= _MaxMem {
// TODO: It would be bad if part of the arena
// is reserved and part is not.
var reserved bool
p := uintptr(sysReserve(unsafe.Pointer(h.arena_end), p_size, &reserved))
if p == 0 {
- return nil
+ // TODO: Try smaller reservation
+ // growths in case we're in a crowded
+ // 32-bit address space.
+ goto reservationFailed
}
// p can be just about anywhere in the address
// space, including before arena_end.
if p == h.arena_end {
+ // The new block is contiguous with
+ // the current block. Extend the
+ // current arena block.
h.arena_end = new_end
h.arena_reserved = reserved
- } else if h.arena_end < p && p+p_size-h.arena_start-1 <= _MaxArena32 {
+ } else if h.arena_start <= p && p+p_size-h.arena_start-1 <= _MaxMem && h.arena_end-h.arena_alloc < strandLimit {
+ // We were able to reserve more memory
+ // within the arena space, but it's
+ // not contiguous with our previous
+ // reservation. It could be before or
+ // after our current arena_used.
+ //
// Keep everything page-aligned.
// Our pages are bigger than hardware pages.
h.arena_end = p + p_size
- used := p + (-p & (_PageSize - 1))
- h.mapBits(used)
- h.mapSpans(used)
- h.arena_used = used
+ p = round(p, _PageSize)
+ h.arena_alloc = p
h.arena_reserved = reserved
} else {
- // We got a mapping, but it's not
- // linear with our current arena, so
- // we can't use it.
+ // We got a mapping, but either
+ //
+ // 1) It's not in the arena, so we
+ // can't use it. (This should never
+ // happen on 32-bit.)
+ //
+ // 2) We would need to discard too
+ // much of our current arena block to
+ // use it.
//
- // TODO: Make it possible to allocate
- // from this. We can't decrease
- // arena_used, but we could introduce
- // a new variable for the current
- // allocation position.
-
// We haven't added this allocation to
// the stats, so subtract it from a
// fake stat (but avoid underflow).
+ //
+ // We'll fall back to a small sysAlloc.
stat := uint64(p_size)
sysFree(unsafe.Pointer(p), p_size, &stat)
}
}
}
- if n <= h.arena_end-h.arena_used {
+ if n <= h.arena_end-h.arena_alloc {
// Keep taking from our reservation.
- p := h.arena_used
+ p := h.arena_alloc
sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
- h.mapBits(p + n)
- h.mapSpans(p + n)
- h.arena_used = p + n
- if raceenabled {
- racemapshadow(unsafe.Pointer(p), n)
+ h.arena_alloc += n
+ if h.arena_alloc > h.arena_used {
+ h.setArenaUsed(h.arena_alloc, true)
}
if p&(_PageSize-1) != 0 {
@@ -468,8 +496,9 @@ func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
return unsafe.Pointer(p)
}
+reservationFailed:
// If using 64-bit, our reservation is all we have.
- if h.arena_end-h.arena_start > _MaxArena32 {
+ if sys.PtrSize != 4 {
return nil
}
@@ -481,28 +510,18 @@ func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
return nil
}
- if p < h.arena_start || p+p_size-h.arena_start > _MaxArena32 {
- top := ^uintptr(0)
- if top-h.arena_start-1 > _MaxArena32 {
- top = h.arena_start + _MaxArena32 + 1
- }
+ if p < h.arena_start || p+p_size-h.arena_start > _MaxMem {
+ // This shouldn't be possible because _MaxMem is the
+ // whole address space on 32-bit.
+ top := uint64(h.arena_start) + _MaxMem
print("runtime: memory allocated by OS (", hex(p), ") not in usable range [", hex(h.arena_start), ",", hex(top), ")\n")
sysFree(unsafe.Pointer(p), p_size, &memstats.heap_sys)
return nil
}
- p_end := p + p_size
p += -p & (_PageSize - 1)
if p+n > h.arena_used {
- h.mapBits(p + n)
- h.mapSpans(p + n)
- h.arena_used = p + n
- if p_end > h.arena_end {
- h.arena_end = p_end
- }
- if raceenabled {
- racemapshadow(unsafe.Pointer(p), n)
- }
+ h.setArenaUsed(p+n, true)
}
if p&(_PageSize-1) != 0 {
@@ -525,7 +544,7 @@ func nextFreeFast(s *mspan) gclinkptr {
if freeidx%64 == 0 && freeidx != s.nelems {
return 0
}
- s.allocCache >>= (theBit + 1)
+ s.allocCache >>= uint(theBit + 1)
s.freeindex = freeidx
v := gclinkptr(result*s.elemsize + s.base())
s.allocCount++
@@ -541,8 +560,8 @@ func nextFreeFast(s *mspan) gclinkptr {
// weight allocation. If it is a heavy weight allocation the caller must
// determine whether a new GC cycle needs to be started or if the GC is active
// whether this goroutine needs to assist the GC.
-func (c *mcache) nextFree(sizeclass uint8) (v gclinkptr, s *mspan, shouldhelpgc bool) {
- s = c.alloc[sizeclass]
+func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, shouldhelpgc bool) {
+ s = c.alloc[spc]
shouldhelpgc = false
freeIndex := s.nextFreeIndex()
if freeIndex == s.nelems {
@@ -552,10 +571,10 @@ func (c *mcache) nextFree(sizeclass uint8) (v gclinkptr, s *mspan, shouldhelpgc
throw("s.allocCount != s.nelems && freeIndex == s.nelems")
}
systemstack(func() {
- c.refill(int32(sizeclass))
+ c.refill(spc)
})
shouldhelpgc = true
- s = c.alloc[sizeclass]
+ s = c.alloc[spc]
freeIndex = s.nextFreeIndex()
}
@@ -693,10 +712,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
return x
}
// Allocate a new maxTinySize block.
- span := c.alloc[tinySizeClass]
+ span := c.alloc[tinySpanClass]
v := nextFreeFast(span)
if v == 0 {
- v, _, shouldhelpgc = c.nextFree(tinySizeClass)
+ v, _, shouldhelpgc = c.nextFree(tinySpanClass)
}
x = unsafe.Pointer(v)
(*[2]uint64)(x)[0] = 0
@@ -716,10 +735,11 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
sizeclass = size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv]
}
size = uintptr(class_to_size[sizeclass])
- span := c.alloc[sizeclass]
+ spc := makeSpanClass(sizeclass, noscan)
+ span := c.alloc[spc]
v := nextFreeFast(span)
if v == 0 {
- v, span, shouldhelpgc = c.nextFree(sizeclass)
+ v, span, shouldhelpgc = c.nextFree(spc)
}
x = unsafe.Pointer(v)
if needzero && span.needzero != 0 {
@@ -730,7 +750,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
var s *mspan
shouldhelpgc = true
systemstack(func() {
- s = largeAlloc(size, needzero)
+ s = largeAlloc(size, needzero, noscan)
})
s.freeindex = 1
s.allocCount = 1
@@ -739,9 +759,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
var scanSize uintptr
- if noscan {
- heapBitsSetTypeNoScan(uintptr(x))
- } else {
+ if !noscan {
heapBitsSetType(uintptr(x), size, dataSize, typ)
if dataSize > typ.size {
// Array allocation. If there are any
@@ -803,8 +821,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
assistG.gcAssistBytes -= int64(size - dataSize)
}
- if shouldhelpgc && gcShouldStart(false) {
- gcStart(gcBackgroundMode, false)
+ if shouldhelpgc {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(gcBackgroundMode, t)
+ }
}
if getg().preempt {
@@ -818,7 +838,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
return x
}
-func largeAlloc(size uintptr, needzero bool) *mspan {
+func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
// print("largeAlloc size=", size, "\n")
if size+_PageSize < size {
@@ -834,7 +854,7 @@ func largeAlloc(size uintptr, needzero bool) *mspan {
// pays the debt down to npage pages.
deductSweepCredit(npages*_PageSize, npages)
- s := mheap_.alloc(npages, 0, true, needzero)
+ s := mheap_.alloc(npages, makeSpanClass(0, noscan), true, needzero)
if s == nil {
throw("out of memory")
}
@@ -924,7 +944,7 @@ func nextSampleNoFP() int32 {
rate = 0x3fffffff
}
if rate != 0 {
- return int32(int(fastrand()) % (2 * rate))
+ return int32(fastrand() % uint32(2*rate))
}
return 0
}
diff --git a/libgo/go/runtime/malloc_test.go b/libgo/go/runtime/malloc_test.go
index bc5530c2f23..0d43cf65976 100644
--- a/libgo/go/runtime/malloc_test.go
+++ b/libgo/go/runtime/malloc_test.go
@@ -6,6 +6,8 @@ package runtime_test
import (
"flag"
+ "fmt"
+ "reflect"
. "runtime"
"testing"
"time"
@@ -22,24 +24,62 @@ func TestMemStats(t *testing.T) {
st := new(MemStats)
ReadMemStats(st)
- // Everything except HeapReleased, HeapIdle, and NumGC,
- // because they indeed can be 0.
- if st.Alloc == 0 || st.TotalAlloc == 0 || st.Sys == 0 || st.Lookups == 0 ||
- st.Mallocs == 0 || st.Frees == 0 || st.HeapAlloc == 0 || st.HeapSys == 0 ||
- st.HeapInuse == 0 || st.HeapObjects == 0 || st.StackInuse == 0 ||
- st.StackSys == 0 || st.MSpanInuse == 0 || st.MSpanSys == 0 || st.MCacheInuse == 0 ||
- st.MCacheSys == 0 || st.BuckHashSys == 0 || st.GCSys == 0 || st.OtherSys == 0 ||
- st.NextGC == 0 || st.NumForcedGC == 0 {
- t.Fatalf("Zero value: %+v", *st)
- }
-
- if st.Alloc > 1e10 || st.TotalAlloc > 1e11 || st.Sys > 1e10 || st.Lookups > 1e10 ||
- st.Mallocs > 1e10 || st.Frees > 1e10 || st.HeapAlloc > 1e10 || st.HeapSys > 1e10 ||
- st.HeapIdle > 1e10 || st.HeapInuse > 1e10 || st.HeapObjects > 1e10 || st.StackInuse > 1e10 ||
- st.StackSys > 1e10 || st.MSpanInuse > 1e10 || st.MSpanSys > 1e10 || st.MCacheInuse > 1e10 ||
- st.MCacheSys > 1e10 || st.BuckHashSys > 1e10 || st.GCSys > 1e10 || st.OtherSys > 1e10 ||
- st.NextGC > 1e10 || st.NumGC > 1e9 || st.NumForcedGC > 1e9 || st.PauseTotalNs > 1e11 {
- t.Fatalf("Insanely high value (overflow?): %+v", *st)
+ nz := func(x interface{}) error {
+ if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
+ return nil
+ }
+ return fmt.Errorf("zero value")
+ }
+ le := func(thresh float64) func(interface{}) error {
+ return func(x interface{}) error {
+ if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
+ return nil
+ }
+ return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
+ }
+ }
+ eq := func(x interface{}) func(interface{}) error {
+ return func(y interface{}) error {
+ if x == y {
+ return nil
+ }
+ return fmt.Errorf("want %v", x)
+ }
+ }
+ // Of the uint fields, HeapReleased, HeapIdle can be 0.
+ // PauseTotalNs can be 0 if timer resolution is poor.
+ //
+ // TODO: Test that GCCPUFraction is <= 0.99. This currently
+ // fails on windows/386. (Issue #19319)
+ fields := map[string][]func(interface{}) error{
+ "Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
+ "Lookups": {nz, le(1e10)}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
+ "HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
+ "HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
+ "StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
+ "MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
+ "MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
+ "BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
+ "NextGC": {nz, le(1e10)}, "LastGC": {nz},
+ "PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
+ "NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
+ "GCCPUFraction": nil, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
+ "BySize": nil,
+ }
+
+ rst := reflect.ValueOf(st).Elem()
+ for i := 0; i < rst.Type().NumField(); i++ {
+ name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
+ checks, ok := fields[name]
+ if !ok {
+ t.Errorf("unknown MemStats field %s", name)
+ continue
+ }
+ for _, check := range checks {
+ if err := check(val); err != nil {
+ t.Errorf("%s = %v: %s", name, val, err)
+ }
+ }
}
if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
st.BuckHashSys+st.GCSys+st.OtherSys {
diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go
index 9b5b051250e..37c959f8327 100644
--- a/libgo/go/runtime/map_test.go
+++ b/libgo/go/runtime/map_test.go
@@ -10,6 +10,7 @@ import (
"reflect"
"runtime"
"sort"
+ "strconv"
"strings"
"sync"
"testing"
@@ -594,6 +595,14 @@ func TestMapLargeValNoPointer(t *testing.T) {
}
}
+// Test that making a map with a large or invalid hint
+// doesn't panic. (Issue 19926).
+func TestIgnoreBogusMapHint(t *testing.T) {
+ for _, hint := range []int64{-1, 1 << 62} {
+ _ = make(map[int]int, hint)
+ }
+}
+
func benchmarkMapPop(b *testing.B, n int) {
m := map[int]int{}
for i := 0; i < b.N; i++ {
@@ -625,3 +634,86 @@ func TestNonEscapingMap(t *testing.T) {
t.Fatalf("want 0 allocs, got %v", n)
}
}
+
+func benchmarkMapAssignInt32(b *testing.B, n int) {
+ a := make(map[int32]int)
+ for i := 0; i < b.N; i++ {
+ a[int32(i&(n-1))] = i
+ }
+}
+
+func benchmarkMapDeleteInt32(b *testing.B, n int) {
+ a := make(map[int32]int)
+ for i := 0; i < n*b.N; i++ {
+ a[int32(i)] = i
+ }
+ b.ResetTimer()
+ for i := 0; i < n*b.N; i = i + n {
+ delete(a, int32(i))
+ }
+}
+
+func benchmarkMapAssignInt64(b *testing.B, n int) {
+ a := make(map[int64]int)
+ for i := 0; i < b.N; i++ {
+ a[int64(i&(n-1))] = i
+ }
+}
+
+func benchmarkMapDeleteInt64(b *testing.B, n int) {
+ a := make(map[int64]int)
+ for i := 0; i < n*b.N; i++ {
+ a[int64(i)] = i
+ }
+ b.ResetTimer()
+ for i := 0; i < n*b.N; i = i + n {
+ delete(a, int64(i))
+ }
+}
+
+func benchmarkMapAssignStr(b *testing.B, n int) {
+ k := make([]string, n)
+ for i := 0; i < len(k); i++ {
+ k[i] = strconv.Itoa(i)
+ }
+ b.ResetTimer()
+ a := make(map[string]int)
+ for i := 0; i < b.N; i++ {
+ a[k[i&(n-1)]] = i
+ }
+}
+
+func benchmarkMapDeleteStr(b *testing.B, n int) {
+ k := make([]string, n*b.N)
+ for i := 0; i < n*b.N; i++ {
+ k[i] = strconv.Itoa(i)
+ }
+ a := make(map[string]int)
+ for i := 0; i < n*b.N; i++ {
+ a[k[i]] = i
+ }
+ b.ResetTimer()
+ for i := 0; i < n*b.N; i = i + n {
+ delete(a, k[i])
+ }
+}
+
+func runWith(f func(*testing.B, int), v ...int) func(*testing.B) {
+ return func(b *testing.B) {
+ for _, n := range v {
+ b.Run(strconv.Itoa(n), func(b *testing.B) { f(b, n) })
+ }
+ }
+}
+
+func BenchmarkMapAssign(b *testing.B) {
+ b.Run("Int32", runWith(benchmarkMapAssignInt32, 1<<8, 1<<16))
+ b.Run("Int64", runWith(benchmarkMapAssignInt64, 1<<8, 1<<16))
+ b.Run("Str", runWith(benchmarkMapAssignStr, 1<<8, 1<<16))
+}
+
+func BenchmarkMapDelete(b *testing.B) {
+ b.Run("Int32", runWith(benchmarkMapDeleteInt32, 1, 2, 4))
+ b.Run("Int64", runWith(benchmarkMapDeleteInt64, 1, 2, 4))
+ b.Run("Str", runWith(benchmarkMapDeleteStr, 1, 2, 4))
+}
diff --git a/libgo/go/runtime/mapspeed_test.go b/libgo/go/runtime/mapspeed_test.go
index ac93119d77d..aec0c51f3fd 100644
--- a/libgo/go/runtime/mapspeed_test.go
+++ b/libgo/go/runtime/mapspeed_test.go
@@ -5,6 +5,7 @@ package runtime_test
import (
"fmt"
+ "strconv"
"strings"
"testing"
)
@@ -308,6 +309,20 @@ func BenchmarkSmallKeyMap(b *testing.B) {
}
}
+func BenchmarkMapPopulate(b *testing.B) {
+ for size := 1; size < 1000000; size *= 10 {
+ b.Run(strconv.Itoa(size), func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ m := make(map[int]bool)
+ for j := 0; j < size; j++ {
+ m[j] = true
+ }
+ }
+ })
+ }
+}
+
type ComplexAlgKey struct {
a, b, c int64
_ int
diff --git a/libgo/go/runtime/mbarrier.go b/libgo/go/runtime/mbarrier.go
index 3a463c8b1ba..d54016f0ba9 100644
--- a/libgo/go/runtime/mbarrier.go
+++ b/libgo/go/runtime/mbarrier.go
@@ -156,6 +156,11 @@ func gcmarkwb_m(slot *uintptr, ptr uintptr) {
// combine the read and the write. Checking inheap is
// insufficient since we need to track changes to
// roots outside the heap.
+ //
+ // Note: profbuf.go omits a barrier during signal handler
+ // profile logging; that's safe only because this deletion barrier exists.
+ // If we remove the deletion barrier, we'll have to work out
+ // a new way to handle the profile logging.
if slot1 := uintptr(unsafe.Pointer(slot)); slot1 >= minPhysPageSize {
if optr := *slot; optr != 0 {
shade(optr)
@@ -238,6 +243,7 @@ func writebarrierptr_prewrite(dst *uintptr, src uintptr) {
}
// typedmemmove copies a value of type t to dst from src.
+// Must be nosplit, see #16026.
//go:nosplit
func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if typ.kind&kindNoPointers == 0 {
diff --git a/libgo/go/runtime/mbitmap.go b/libgo/go/runtime/mbitmap.go
index a7ccc650ada..d1a58202352 100644
--- a/libgo/go/runtime/mbitmap.go
+++ b/libgo/go/runtime/mbitmap.go
@@ -45,6 +45,11 @@
// not checkmarked, and is the dead encoding.
// These properties must be preserved when modifying the encoding.
//
+// The bitmap for noscan spans is not maintained. Code must ensure
+// that an object is scannable before consulting its bitmap by
+// checking either the noscan bit in the span or by consulting its
+// type's information.
+//
// Checkmarks
//
// In a concurrent garbage collector, one worries about failing to mark
@@ -134,13 +139,9 @@ func subtract1(p *byte) *byte {
return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
}
-// mHeap_MapBits is called each time arena_used is extended.
-// It maps any additional bitmap memory needed for the new arena memory.
-// It must be called with the expected new value of arena_used,
-// *before* h.arena_used has been updated.
-// Waiting to update arena_used until after the memory has been mapped
-// avoids faults when other threads try access the bitmap immediately
-// after observing the change to arena_used.
+// mapBits maps any additional bitmap memory needed for the new arena memory.
+//
+// Don't call this directly. Call mheap.setArenaUsed.
//
//go:nowritebarrier
func (h *mheap) mapBits(arena_used uintptr) {
@@ -186,10 +187,8 @@ type markBits struct {
//go:nosplit
func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
- whichByte := allocBitIndex / 8
- whichBit := allocBitIndex % 8
- bytePtr := addb(s.allocBits, whichByte)
- return markBits{bytePtr, uint8(1 << whichBit), allocBitIndex}
+ bytep, mask := s.allocBits.bitp(allocBitIndex)
+ return markBits{bytep, mask, allocBitIndex}
}
// refillaCache takes 8 bytes s.allocBits starting at whichByte
@@ -197,7 +196,7 @@ func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
// can be used. It then places these 8 bytes into the cached 64 bit
// s.allocCache.
func (s *mspan) refillAllocCache(whichByte uintptr) {
- bytes := (*[8]uint8)(unsafe.Pointer(addb(s.allocBits, whichByte)))
+ bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
aCache := uint64(0)
aCache |= uint64(bytes[0])
aCache |= uint64(bytes[1]) << (1 * 8)
@@ -248,7 +247,7 @@ func (s *mspan) nextFreeIndex() uintptr {
return snelems
}
- s.allocCache >>= (bitIndex + 1)
+ s.allocCache >>= uint(bitIndex + 1)
sfreeindex = result + 1
if sfreeindex%64 == 0 && sfreeindex != snelems {
@@ -269,10 +268,8 @@ func (s *mspan) isFree(index uintptr) bool {
if index < s.freeindex {
return false
}
- whichByte := index / 8
- whichBit := index % 8
- byteVal := *addb(s.allocBits, whichByte)
- return byteVal&uint8(1<= mheap_.arena_used {
@@ -404,7 +394,7 @@ func heapBitsForObject(p, refBase, refOff uintptr, forStack bool) (base uintptr,
// Consult the span table to find the block beginning.
s = mheap_.spans[idx]
if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
- if s == nil || s.state == _MSpanStack || forStack {
+ if s == nil || s.state == _MSpanManual || forStack {
// If s is nil, the virtual address has never been part of the heap.
// This pointer may be to some mmap'd region, so we allow it.
// Pointers into stacks are also ok, the runtime manages these explicitly.
@@ -434,6 +424,7 @@ func heapBitsForObject(p, refBase, refOff uintptr, forStack bool) (base uintptr,
print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
gcDumpObject("object", refBase, refOff)
}
+ getg().m.traceback = 2
throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
}
return
@@ -524,16 +515,6 @@ func (h heapBits) isPointer() bool {
return h.bits()&bitPointer != 0
}
-// hasPointers reports whether the given object has any pointers.
-// It must be told how large the object at h is for efficiency.
-// h must describe the initial word of the object.
-func (h heapBits) hasPointers(size uintptr) bool {
- if size == sys.PtrSize { // 1-word objects are always pointers
- return true
- }
- return (*h.bitp>>h.shift)&bitScan != 0
-}
-
// isCheckmarked reports whether the heap bits have the checkmarked bit set.
// It must be told how large the object at h is, because the encoding of the
// checkmark bit varies by size.
@@ -839,23 +820,23 @@ var oneBitCount = [256]uint8{
4, 5, 5, 6, 5, 6, 6, 7,
5, 6, 6, 7, 6, 7, 7, 8}
-// countFree runs through the mark bits in a span and counts the number of free objects
-// in the span.
+// countAlloc returns the number of objects allocated in span s by
+// scanning the allocation bitmap.
// TODO:(rlh) Use popcount intrinsic.
-func (s *mspan) countFree() int {
+func (s *mspan) countAlloc() int {
count := 0
maxIndex := s.nelems / 8
for i := uintptr(0); i < maxIndex; i++ {
- mrkBits := *addb(s.gcmarkBits, i)
+ mrkBits := *s.gcmarkBits.bytep(i)
count += int(oneBitCount[mrkBits])
}
if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
- mrkBits := *addb(s.gcmarkBits, maxIndex)
+ mrkBits := *s.gcmarkBits.bytep(maxIndex)
mask := uint8((1 << bitsInLastByte) - 1)
bits := mrkBits & mask
count += int(oneBitCount[bits])
}
- return int(s.nelems) - count
+ return count
}
// heapBitsSetType records that the new allocation [x, x+size)
@@ -1076,7 +1057,9 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
endnb += endnb
}
// Truncate to a multiple of original ptrmask.
- endnb = maxBits / nb * nb
+ // Because nb+nb <= maxBits, nb fits in a byte.
+ // Byte division is cheaper than uintptr division.
+ endnb = uintptr(maxBits/byte(nb)) * nb
pbits &= 1< 0 {
- reimburseSweepCredit(usedBytes)
- }
atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
if trace.enabled {
// heap_live changed.
@@ -150,6 +166,10 @@ func (c *mcentral) uncacheSpan(s *mspan) {
// mCentral_CacheSpan conservatively counted
// unallocated slots in heap_live. Undo this.
atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
+ // cacheSpan updated alloc assuming all objects on s
+ // were going to be allocated. Adjust for any that
+ // weren't.
+ atomic.Xadd64(&c.nmalloc, -int64(n))
}
unlock(&c.lock)
}
@@ -205,11 +225,11 @@ func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
// grow allocates a new empty span from the heap and initializes it for c's size class.
func (c *mcentral) grow() *mspan {
- npages := uintptr(class_to_allocnpages[c.sizeclass])
- size := uintptr(class_to_size[c.sizeclass])
+ npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
+ size := uintptr(class_to_size[c.spanclass.sizeclass()])
n := (npages << _PageShift) / size
- s := mheap_.alloc(npages, c.sizeclass, false, true)
+ s := mheap_.alloc(npages, c.spanclass, false, true)
if s == nil {
return nil
}
diff --git a/libgo/go/runtime/mfinal.go b/libgo/go/runtime/mfinal.go
index 229ccb55387..4353ee57569 100644
--- a/libgo/go/runtime/mfinal.go
+++ b/libgo/go/runtime/mfinal.go
@@ -12,8 +12,12 @@ import (
"unsafe"
)
+// finblock is an array of finalizers to be executed. finblocks are
+// arranged in a linked list for the finalizer queue.
+//
// finblock is allocated from non-GC'd memory, so any heap pointers
-// must be specially handled.
+// must be specially handled. GC currently assumes that the finalizer
+// queue does not grow during marking (but it can shrink).
//
//go:notinheap
type finblock struct {
@@ -42,6 +46,16 @@ type finalizer struct {
}
func queuefinalizer(p unsafe.Pointer, fn *funcval, ft *functype, ot *ptrtype) {
+ if gcphase != _GCoff {
+ // Currently we assume that the finalizer queue won't
+ // grow during marking so we don't have to rescan it
+ // during mark termination. If we ever need to lift
+ // this assumption, we can do it by adding the
+ // necessary barriers to queuefinalizer (which it may
+ // have automatically).
+ throw("queuefinalizer during GC")
+ }
+
lock(&finlock)
if finq == nil || finq.cnt == uint32(len(finq.fin)) {
if finc == nil {
@@ -399,7 +413,7 @@ func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
}
n = s.elemsize
- if s.sizeclass != 0 {
+ if s.spanclass.sizeclass() != 0 {
x = add(x, (uintptr(v)-uintptr(x))/n*n)
}
return
diff --git a/libgo/go/runtime/mfixalloc.go b/libgo/go/runtime/mfixalloc.go
index fe4b0fcf2a9..7496671fbec 100644
--- a/libgo/go/runtime/mfixalloc.go
+++ b/libgo/go/runtime/mfixalloc.go
@@ -29,7 +29,7 @@ type fixalloc struct {
first func(arg, p unsafe.Pointer) // called first time p is returned
arg unsafe.Pointer
list *mlink
- chunk unsafe.Pointer
+ chunk uintptr // use uintptr instead of unsafe.Pointer to avoid write barriers
nchunk uint32
inuse uintptr // in-use bytes now
stat *uint64
@@ -54,7 +54,7 @@ func (f *fixalloc) init(size uintptr, first func(arg, p unsafe.Pointer), arg uns
f.first = first
f.arg = arg
f.list = nil
- f.chunk = nil
+ f.chunk = 0
f.nchunk = 0
f.inuse = 0
f.stat = stat
@@ -77,15 +77,15 @@ func (f *fixalloc) alloc() unsafe.Pointer {
return v
}
if uintptr(f.nchunk) < f.size {
- f.chunk = persistentalloc(_FixAllocChunk, 0, f.stat)
+ f.chunk = uintptr(persistentalloc(_FixAllocChunk, 0, f.stat))
f.nchunk = _FixAllocChunk
}
- v := f.chunk
+ v := unsafe.Pointer(f.chunk)
if f.first != nil {
f.first(f.arg, v)
}
- f.chunk = add(f.chunk, f.size)
+ f.chunk = f.chunk + f.size
f.nchunk -= uint32(f.size)
f.inuse += f.size
return v
diff --git a/libgo/go/runtime/mgc.go b/libgo/go/runtime/mgc.go
index a4fc2be803c..31c4be86fe4 100644
--- a/libgo/go/runtime/mgc.go
+++ b/libgo/go/runtime/mgc.go
@@ -178,17 +178,21 @@ func gcinit() {
throw("size of Workbuf is suboptimal")
}
+ // No sweep on the first cycle.
+ mheap_.sweepdone = 1
+
+ // Set a reasonable initial GC trigger.
+ memstats.triggerRatio = 7 / 8.0
+
+ // Fake a heap_marked value so it looks like a trigger at
+ // heapminimum is the appropriate growth from heap_marked.
+ // This will go into computing the initial GC goal.
+ memstats.heap_marked = uint64(float64(heapminimum) / (1 + memstats.triggerRatio))
+
+ // Set gcpercent from the environment. This will also compute
+ // and set the GC trigger and goal.
_ = setGCPercent(readgogc())
- memstats.gc_trigger = heapminimum
- // Compute the goal heap size based on the trigger:
- // trigger = marked * (1 + triggerRatio)
- // marked = trigger / (1 + triggerRatio)
- // goal = marked * (1 + GOGC/100)
- // = trigger / (1 + triggerRatio) * (1 + GOGC/100)
- memstats.next_gc = uint64(float64(memstats.gc_trigger) / (1 + gcController.triggerRatio) * (1 + float64(gcpercent)/100))
- if gcpercent < 0 {
- memstats.next_gc = ^uint64(0)
- }
+
work.startSema = 1
work.markDoneSema = 1
}
@@ -224,12 +228,8 @@ func setGCPercent(in int32) (out int32) {
}
gcpercent = in
heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100
- if gcController.triggerRatio > float64(gcpercent)/100 {
- gcController.triggerRatio = float64(gcpercent) / 100
- }
- // This is either in gcinit or followed by a STW GC, both of
- // which will reset other stats like memstats.gc_trigger and
- // memstats.next_gc to appropriate values.
+ // Update pacing in response to gcpercent change.
+ gcSetTriggerRatio(memstats.triggerRatio)
unlock(&mheap_.lock)
return out
}
@@ -239,7 +239,9 @@ func setGCPercent(in int32) (out int32) {
var gcphase uint32
// The compiler knows about this variable.
-// If you change it, you must change the compiler too.
+// If you change it, you must change builtin/runtime.go, too.
+// If you change the first four bytes, you must also change the write
+// barrier insertion code.
var writeBarrier struct {
enabled bool // compiler emits a check of this before calling write barrier
pad [3]byte // compiler uses 32-bit load for "enabled" field
@@ -329,10 +331,10 @@ var gcMarkWorkerModeStrings = [...]string{
// utilization between assist and background marking to be 25% of
// GOMAXPROCS. The high-level design of this algorithm is documented
// at https://golang.org/s/go15gcpacing.
-var gcController = gcControllerState{
- // Initial trigger ratio guess.
- triggerRatio: 7 / 8.0,
-}
+//
+// All fields of gcController are used only during a single mark
+// cycle.
+var gcController gcControllerState
type gcControllerState struct {
// scanWork is the total scan work performed this cycle. This
@@ -403,14 +405,6 @@ type gcControllerState struct {
// beginning of each cycle.
fractionalUtilizationGoal float64
- // triggerRatio is the heap growth ratio at which the garbage
- // collection cycle should start. E.g., if this is 0.6, then
- // GC should start when the live heap has reached 1.6 times
- // the heap size marked by the previous cycle. This should be
- // ⤠GOGC/100 so the trigger heap size is less than the goal
- // heap size. This is updated at the end of of each cycle.
- triggerRatio float64
-
_ [sys.CacheLineSize]byte
// fractionalMarkWorkersNeeded is the number of fractional
@@ -439,7 +433,7 @@ func (c *gcControllerState) startCycle() {
// first cycle) or may be much smaller (resulting in a large
// error response).
if memstats.gc_trigger <= heapminimum {
- memstats.heap_marked = uint64(float64(memstats.gc_trigger) / (1 + c.triggerRatio))
+ memstats.heap_marked = uint64(float64(memstats.gc_trigger) / (1 + memstats.triggerRatio))
}
// Re-compute the heap goal for this cycle in case something
@@ -495,17 +489,12 @@ func (c *gcControllerState) startCycle() {
// revise updates the assist ratio during the GC cycle to account for
// improved estimates. This should be called either under STW or
-// whenever memstats.heap_scan or memstats.heap_live is updated (with
-// mheap_.lock held).
+// whenever memstats.heap_scan, memstats.heap_live, or
+// memstats.next_gc is updated (with mheap_.lock held).
//
// It should only be called when gcBlackenEnabled != 0 (because this
// is when assists are enabled and the necessary statistics are
// available).
-//
-// TODO: Consider removing the periodic controller update altogether.
-// Since we switched to allocating black, in theory we shouldn't have
-// to change the assist ratio. However, this is still a useful hook
-// that we've found many uses for when experimenting.
func (c *gcControllerState) revise() {
// Compute the expected scan work remaining.
//
@@ -536,7 +525,7 @@ func (c *gcControllerState) revise() {
}
// Compute the heap distance remaining.
- heapDistance := int64(memstats.next_gc) - int64(memstats.heap_live)
+ heapDistance := int64(memstats.next_gc) - int64(atomic.Load64(&memstats.heap_live))
if heapDistance <= 0 {
// This shouldn't happen, but if it does, avoid
// dividing by zero or setting the assist negative.
@@ -550,10 +539,15 @@ func (c *gcControllerState) revise() {
c.assistBytesPerWork = float64(heapDistance) / float64(scanWorkExpected)
}
-// endCycle updates the GC controller state at the end of the
-// concurrent part of the GC cycle.
-func (c *gcControllerState) endCycle() {
- h_t := c.triggerRatio // For debugging
+// endCycle computes the trigger ratio for the next cycle.
+func (c *gcControllerState) endCycle() float64 {
+ if work.userForced {
+ // Forced GC means this cycle didn't start at the
+ // trigger, so where it finished isn't good
+ // information about how to adjust the trigger.
+ // Just leave it where it is.
+ return memstats.triggerRatio
+ }
// Proportional response gain for the trigger controller. Must
// be in [0, 1]. Lower values smooth out transient effects but
@@ -582,25 +576,17 @@ func (c *gcControllerState) endCycle() {
utilization += float64(c.assistTime) / float64(assistDuration*int64(gomaxprocs))
}
- triggerError := goalGrowthRatio - c.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-c.triggerRatio)
+ triggerError := goalGrowthRatio - memstats.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-memstats.triggerRatio)
// Finally, we adjust the trigger for next time by this error,
// damped by the proportional gain.
- c.triggerRatio += triggerGain * triggerError
- if c.triggerRatio < 0 {
- // This can happen if the mutator is allocating very
- // quickly or the GC is scanning very slowly.
- c.triggerRatio = 0
- } else if c.triggerRatio > goalGrowthRatio*0.95 {
- // Ensure there's always a little margin so that the
- // mutator assist ratio isn't infinity.
- c.triggerRatio = goalGrowthRatio * 0.95
- }
+ triggerRatio := memstats.triggerRatio + triggerGain*triggerError
if debug.gcpacertrace > 0 {
// Print controller state in terms of the design
// document.
H_m_prev := memstats.heap_marked
+ h_t := memstats.triggerRatio
H_T := memstats.gc_trigger
h_a := actualGrowthRatio
H_a := memstats.heap_live
@@ -620,6 +606,8 @@ func (c *gcControllerState) endCycle() {
" u_a/u_g=", u_a/u_g,
"\n")
}
+
+ return triggerRatio
}
// enlistWorker encourages another dedicated mark worker to start on
@@ -651,7 +639,7 @@ func (c *gcControllerState) enlistWorker() {
}
myID := gp.m.p.ptr().id
for tries := 0; tries < 5; tries++ {
- id := int32(fastrand() % uint32(gomaxprocs-1))
+ id := int32(fastrandn(uint32(gomaxprocs - 1)))
if id >= myID {
id++
}
@@ -701,9 +689,6 @@ func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g {
// This P is now dedicated to marking until the end of
// the concurrent mark phase.
_p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
- // TODO(austin): This P isn't going to run anything
- // else for a while, so kick everything out of its run
- // queue.
} else {
if !decIfPositive(&c.fractionalMarkWorkersNeeded) {
// No more workers are need right now.
@@ -761,6 +746,120 @@ func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g {
return gp
}
+// gcSetTriggerRatio sets the trigger ratio and updates everything
+// derived from it: the absolute trigger, the heap goal, mark pacing,
+// and sweep pacing.
+//
+// This can be called any time. If GC is the in the middle of a
+// concurrent phase, it will adjust the pacing of that phase.
+//
+// This depends on gcpercent, memstats.heap_marked, and
+// memstats.heap_live. These must be up to date.
+//
+// mheap_.lock must be held or the world must be stopped.
+func gcSetTriggerRatio(triggerRatio float64) {
+ // Set the trigger ratio, capped to reasonable bounds.
+ if triggerRatio < 0 {
+ // This can happen if the mutator is allocating very
+ // quickly or the GC is scanning very slowly.
+ triggerRatio = 0
+ } else if gcpercent >= 0 {
+ // Ensure there's always a little margin so that the
+ // mutator assist ratio isn't infinity.
+ maxTriggerRatio := 0.95 * float64(gcpercent) / 100
+ if triggerRatio > maxTriggerRatio {
+ triggerRatio = maxTriggerRatio
+ }
+ }
+ memstats.triggerRatio = triggerRatio
+
+ // Compute the absolute GC trigger from the trigger ratio.
+ //
+ // We trigger the next GC cycle when the allocated heap has
+ // grown by the trigger ratio over the marked heap size.
+ trigger := ^uint64(0)
+ if gcpercent >= 0 {
+ trigger = uint64(float64(memstats.heap_marked) * (1 + triggerRatio))
+ // Don't trigger below the minimum heap size.
+ minTrigger := heapminimum
+ if !gosweepdone() {
+ // Concurrent sweep happens in the heap growth
+ // from heap_live to gc_trigger, so ensure
+ // that concurrent sweep has some heap growth
+ // in which to perform sweeping before we
+ // start the next GC cycle.
+ sweepMin := atomic.Load64(&memstats.heap_live) + sweepMinHeapDistance*uint64(gcpercent)/100
+ if sweepMin > minTrigger {
+ minTrigger = sweepMin
+ }
+ }
+ if trigger < minTrigger {
+ trigger = minTrigger
+ }
+ if int64(trigger) < 0 {
+ print("runtime: next_gc=", memstats.next_gc, " heap_marked=", memstats.heap_marked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "triggerRatio=", triggerRatio, " minTrigger=", minTrigger, "\n")
+ throw("gc_trigger underflow")
+ }
+ }
+ memstats.gc_trigger = trigger
+
+ // Compute the next GC goal, which is when the allocated heap
+ // has grown by GOGC/100 over the heap marked by the last
+ // cycle.
+ goal := ^uint64(0)
+ if gcpercent >= 0 {
+ goal = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100
+ if goal < trigger {
+ // The trigger ratio is always less than GOGC/100, but
+ // other bounds on the trigger may have raised it.
+ // Push up the goal, too.
+ goal = trigger
+ }
+ }
+ memstats.next_gc = goal
+ if trace.enabled {
+ traceNextGC()
+ }
+
+ // Update mark pacing.
+ if gcphase != _GCoff {
+ gcController.revise()
+ }
+
+ // Update sweep pacing.
+ if gosweepdone() {
+ mheap_.sweepPagesPerByte = 0
+ } else {
+ // Concurrent sweep needs to sweep all of the in-use
+ // pages by the time the allocated heap reaches the GC
+ // trigger. Compute the ratio of in-use pages to sweep
+ // per byte allocated, accounting for the fact that
+ // some might already be swept.
+ heapLiveBasis := atomic.Load64(&memstats.heap_live)
+ heapDistance := int64(trigger) - int64(heapLiveBasis)
+ // Add a little margin so rounding errors and
+ // concurrent sweep are less likely to leave pages
+ // unswept when GC starts.
+ heapDistance -= 1024 * 1024
+ if heapDistance < _PageSize {
+ // Avoid setting the sweep ratio extremely high
+ heapDistance = _PageSize
+ }
+ pagesSwept := atomic.Load64(&mheap_.pagesSwept)
+ sweepDistancePages := int64(mheap_.pagesInUse) - int64(pagesSwept)
+ if sweepDistancePages <= 0 {
+ mheap_.sweepPagesPerByte = 0
+ } else {
+ mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
+ mheap_.sweepHeapLiveBasis = heapLiveBasis
+ // Write pagesSweptBasis last, since this
+ // signals concurrent sweeps to recompute
+ // their debt.
+ atomic.Store64(&mheap_.pagesSweptBasis, pagesSwept)
+ }
+ }
+}
+
// gcGoalUtilization is the goal CPU utilization for background
// marking as a fraction of GOMAXPROCS.
const gcGoalUtilization = 0.25
@@ -783,10 +882,23 @@ const gcAssistTimeSlack = 5000
const gcOverAssistWork = 64 << 10
var work struct {
- full uint64 // lock-free list of full blocks workbuf
- empty uint64 // lock-free list of empty blocks workbuf
+ full lfstack // lock-free list of full blocks workbuf
+ empty lfstack // lock-free list of empty blocks workbuf
pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
+ wbufSpans struct {
+ lock mutex
+ // free is a list of spans dedicated to workbufs, but
+ // that don't currently contain any workbufs.
+ free mSpanList
+ // busy is a list of all spans containing workbufs on
+ // one of the workbuf lists.
+ busy mSpanList
+ }
+
+ // Restore 64-bit alignment on 32-bit.
+ _ uint32
+
// bytesMarked is the number of bytes marked this cycle. This
// includes bytes blackened in scanned objects, noscan objects
// that go straight to black, and permagrey objects scanned by
@@ -816,15 +928,13 @@ var work struct {
// should pass gcDrainBlock to gcDrain to block in the
// getfull() barrier. Otherwise, they should pass gcDrainNoBlock.
//
- // TODO: This is a temporary fallback to support
- // debug.gcrescanstacks > 0 and to work around some known
- // races. Remove this when we remove the debug option and fix
- // the races.
+ // TODO: This is a temporary fallback to work around races
+ // that cause early mark termination.
helperDrainBlock bool
// Number of roots of various root types. Set by gcMarkRootPrepare.
- nFlushCacheRoots int
- nDataRoots, nSpanRoots, nStackRoots, nRescanRoots int
+ nFlushCacheRoots int
+ nDataRoots, nSpanRoots, nStackRoots int
// markrootDone indicates that roots have been marked at least
// once during the current GC cycle. This is checked by root
@@ -860,6 +970,10 @@ var work struct {
// mode is the concurrency mode of the current GC cycle.
mode gcMode
+ // userForced indicates the current GC cycle was forced by an
+ // explicit user call.
+ userForced bool
+
// totaltime is the CPU nanoseconds spent in GC since the
// program started if debug.gctrace > 0.
totaltime int64
@@ -876,14 +990,19 @@ var work struct {
head, tail guintptr
}
- // rescan is a list of G's that need to be rescanned during
- // mark termination. A G adds itself to this list when it
- // first invalidates its stack scan.
- rescan struct {
+ // sweepWaiters is a list of blocked goroutines to wake when
+ // we transition from mark termination to sweep.
+ sweepWaiters struct {
lock mutex
- list []guintptr
+ head guintptr
}
+ // cycles is the number of completed GC cycles, where a GC
+ // cycle is sweep termination, mark, mark termination, and
+ // sweep. This differs from memstats.numgc, which is
+ // incremented at mark termination.
+ cycles uint32
+
// Timing/utilization stats for this cycle.
stwprocs, maxprocs int32
tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start
@@ -899,7 +1018,94 @@ var work struct {
// garbage collection is complete. It may also block the entire
// program.
func GC() {
- gcStart(gcForceBlockMode, false)
+ // We consider a cycle to be: sweep termination, mark, mark
+ // termination, and sweep. This function shouldn't return
+ // until a full cycle has been completed, from beginning to
+ // end. Hence, we always want to finish up the current cycle
+ // and start a new one. That means:
+ //
+ // 1. In sweep termination, mark, or mark termination of cycle
+ // N, wait until mark termination N completes and transitions
+ // to sweep N.
+ //
+ // 2. In sweep N, help with sweep N.
+ //
+ // At this point we can begin a full cycle N+1.
+ //
+ // 3. Trigger cycle N+1 by starting sweep termination N+1.
+ //
+ // 4. Wait for mark termination N+1 to complete.
+ //
+ // 5. Help with sweep N+1 until it's done.
+ //
+ // This all has to be written to deal with the fact that the
+ // GC may move ahead on its own. For example, when we block
+ // until mark termination N, we may wake up in cycle N+2.
+
+ gp := getg()
+
+ // Prevent the GC phase or cycle count from changing.
+ lock(&work.sweepWaiters.lock)
+ n := atomic.Load(&work.cycles)
+ if gcphase == _GCmark {
+ // Wait until sweep termination, mark, and mark
+ // termination of cycle N complete.
+ gp.schedlink = work.sweepWaiters.head
+ work.sweepWaiters.head.set(gp)
+ goparkunlock(&work.sweepWaiters.lock, "wait for GC cycle", traceEvGoBlock, 1)
+ } else {
+ // We're in sweep N already.
+ unlock(&work.sweepWaiters.lock)
+ }
+
+ // We're now in sweep N or later. Trigger GC cycle N+1, which
+ // will first finish sweep N if necessary and then enter sweep
+ // termination N+1.
+ gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerCycle, n: n + 1})
+
+ // Wait for mark termination N+1 to complete.
+ lock(&work.sweepWaiters.lock)
+ if gcphase == _GCmark && atomic.Load(&work.cycles) == n+1 {
+ gp.schedlink = work.sweepWaiters.head
+ work.sweepWaiters.head.set(gp)
+ goparkunlock(&work.sweepWaiters.lock, "wait for GC cycle", traceEvGoBlock, 1)
+ } else {
+ unlock(&work.sweepWaiters.lock)
+ }
+
+ // Finish sweep N+1 before returning. We do this both to
+ // complete the cycle and because runtime.GC() is often used
+ // as part of tests and benchmarks to get the system into a
+ // relatively stable and isolated state.
+ for atomic.Load(&work.cycles) == n+1 && gosweepone() != ^uintptr(0) {
+ sweep.nbgsweep++
+ Gosched()
+ }
+
+ // Callers may assume that the heap profile reflects the
+ // just-completed cycle when this returns (historically this
+ // happened because this was a STW GC), but right now the
+ // profile still reflects mark termination N, not N+1.
+ //
+ // As soon as all of the sweep frees from cycle N+1 are done,
+ // we can go ahead and publish the heap profile.
+ //
+ // First, wait for sweeping to finish. (We know there are no
+ // more spans on the sweep queue, but we may be concurrently
+ // sweeping spans, so we have to wait.)
+ for atomic.Load(&work.cycles) == n+1 && atomic.Load(&mheap_.sweepers) != 0 {
+ Gosched()
+ }
+
+ // Now we're really done with sweeping, so we can publish the
+ // stable heap profile. Only do this if we haven't already hit
+ // another mark termination.
+ mp := acquirem()
+ cycle := atomic.Load(&work.cycles)
+ if cycle == n+1 || (gcphase == _GCmark && cycle == n+2) {
+ mProf_PostSweep()
+ }
+ releasem(mp)
}
// gcMode indicates how concurrent a GC cycle should be.
@@ -911,24 +1117,75 @@ const (
gcForceBlockMode // stop-the-world GC now and STW sweep (forced by user)
)
-// gcShouldStart returns true if the exit condition for the _GCoff
-// phase has been met. The exit condition should be tested when
-// allocating.
-//
-// If forceTrigger is true, it ignores the current heap size, but
-// checks all other conditions. In general this should be false.
-func gcShouldStart(forceTrigger bool) bool {
- return gcphase == _GCoff && (forceTrigger || memstats.heap_live >= memstats.gc_trigger) && memstats.enablegc && panicking == 0 && gcpercent >= 0
+// A gcTrigger is a predicate for starting a GC cycle. Specifically,
+// it is an exit condition for the _GCoff phase.
+type gcTrigger struct {
+ kind gcTriggerKind
+ now int64 // gcTriggerTime: current time
+ n uint32 // gcTriggerCycle: cycle number to start
+}
+
+type gcTriggerKind int
+
+const (
+ // gcTriggerAlways indicates that a cycle should be started
+ // unconditionally, even if GOGC is off or we're in a cycle
+ // right now. This cannot be consolidated with other cycles.
+ gcTriggerAlways gcTriggerKind = iota
+
+ // gcTriggerHeap indicates that a cycle should be started when
+ // the heap size reaches the trigger heap size computed by the
+ // controller.
+ gcTriggerHeap
+
+ // gcTriggerTime indicates that a cycle should be started when
+ // it's been more than forcegcperiod nanoseconds since the
+ // previous GC cycle.
+ gcTriggerTime
+
+ // gcTriggerCycle indicates that a cycle should be started if
+ // we have not yet started cycle number gcTrigger.n (relative
+ // to work.cycles).
+ gcTriggerCycle
+)
+
+// test returns true if the trigger condition is satisfied, meaning
+// that the exit condition for the _GCoff phase has been met. The exit
+// condition should be tested when allocating.
+func (t gcTrigger) test() bool {
+ if !memstats.enablegc || panicking != 0 {
+ return false
+ }
+ if t.kind == gcTriggerAlways {
+ return true
+ }
+ if gcphase != _GCoff || gcpercent < 0 {
+ return false
+ }
+ switch t.kind {
+ case gcTriggerHeap:
+ // Non-atomic access to heap_live for performance. If
+ // we are going to trigger on this, this thread just
+ // atomically wrote heap_live anyway and we'll see our
+ // own write.
+ return memstats.heap_live >= memstats.gc_trigger
+ case gcTriggerTime:
+ lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime))
+ return lastgc != 0 && t.now-lastgc > forcegcperiod
+ case gcTriggerCycle:
+ // t.n > work.cycles, but accounting for wraparound.
+ return int32(t.n-work.cycles) > 0
+ }
+ return true
}
-// gcStart transitions the GC from _GCoff to _GCmark (if mode ==
-// gcBackgroundMode) or _GCmarktermination (if mode !=
-// gcBackgroundMode) by performing sweep termination and GC
-// initialization.
+// gcStart transitions the GC from _GCoff to _GCmark (if
+// !mode.stwMark) or _GCmarktermination (if mode.stwMark) by
+// performing sweep termination and GC initialization.
//
// This may return without performing this transition in some cases,
// such as when called on a system stack or with locks held.
-func gcStart(mode gcMode, forceTrigger bool) {
+func gcStart(mode gcMode, trigger gcTrigger) {
// Since this is called from malloc and malloc is called in
// the guts of a number of libraries that might be holding
// locks, don't attempt to start GC in non-preemptible or
@@ -951,29 +1208,21 @@ func gcStart(mode gcMode, forceTrigger bool) {
//
// We check the transition condition continuously here in case
// this G gets delayed in to the next GC cycle.
- for (mode != gcBackgroundMode || gcShouldStart(forceTrigger)) && gosweepone() != ^uintptr(0) {
+ for trigger.test() && gosweepone() != ^uintptr(0) {
sweep.nbgsweep++
}
// Perform GC initialization and the sweep termination
// transition.
- //
- // If this is a forced GC, don't acquire the transition lock
- // or re-check the transition condition because we
- // specifically *don't* want to share the transition with
- // another thread.
- useStartSema := mode == gcBackgroundMode
- if useStartSema {
- semacquire(&work.startSema, 0)
- // Re-check transition condition under transition lock.
- if !gcShouldStart(forceTrigger) {
- semrelease(&work.startSema)
- return
- }
+ semacquire(&work.startSema)
+ // Re-check transition condition under transition lock.
+ if !trigger.test() {
+ semrelease(&work.startSema)
+ return
}
// For stats, check if this GC was forced by the user.
- forced := mode != gcBackgroundMode
+ work.userForced = trigger.kind == gcTriggerAlways || trigger.kind == gcTriggerCycle
// In gcstoptheworld debug mode, upgrade the mode accordingly.
// We do this after re-checking the transition condition so
@@ -988,7 +1237,7 @@ func gcStart(mode gcMode, forceTrigger bool) {
}
// Ok, we're doing it! Stop everybody else
- semacquire(&worldsema, 0)
+ semacquire(&worldsema)
if trace.enabled {
traceGCStart()
@@ -1000,13 +1249,13 @@ func gcStart(mode gcMode, forceTrigger bool) {
gcResetMarkState()
- now := nanotime()
work.stwprocs, work.maxprocs = gcprocs(), gomaxprocs
- work.tSweepTerm = now
- work.heap0 = memstats.heap_live
+ work.heap0 = atomic.Load64(&memstats.heap_live)
work.pauseNS = 0
work.mode = mode
+ now := nanotime()
+ work.tSweepTerm = now
work.pauseStart = now
systemstack(stopTheWorldWithSema)
// Finish sweep before we start concurrent scan.
@@ -1017,6 +1266,7 @@ func gcStart(mode gcMode, forceTrigger bool) {
// reclaimed until the next GC cycle.
clearpools()
+ work.cycles++
if mode == gcBackgroundMode { // Do as much work concurrently as possible
gcController.startCycle()
work.heapGoal = memstats.next_gc
@@ -1029,18 +1279,7 @@ func gcStart(mode gcMode, forceTrigger bool) {
// the time we start the world and begin
// scanning.
//
- // It's necessary to enable write barriers
- // during the scan phase for several reasons:
- //
- // They must be enabled for writes to higher
- // stack frames before we scan stacks and
- // install stack barriers because this is how
- // we track writes to inactive stack frames.
- // (Alternatively, we could not install stack
- // barriers over frame boundaries with
- // up-pointers).
- //
- // They must be enabled before assists are
+ // Write barriers must be enabled before assists are
// enabled because they must be enabled before
// any non-leaf heap objects are marked. Since
// allocations are blocked until assists can
@@ -1079,17 +1318,11 @@ func gcStart(mode gcMode, forceTrigger bool) {
work.tMark, work.tMarkTerm = t, t
work.heapGoal = work.heap0
- if forced {
- memstats.numforcedgc++
- }
-
// Perform mark termination. This will restart the world.
- gcMarkTermination()
+ gcMarkTermination(memstats.triggerRatio)
}
- if useStartSema {
- semrelease(&work.startSema)
- }
+ semrelease(&work.startSema)
}
// gcMarkDone transitions the GC from mark 1 to mark 2 and from mark 2
@@ -1109,7 +1342,7 @@ func gcStart(mode gcMode, forceTrigger bool) {
// by mark termination.
func gcMarkDone() {
top:
- semacquire(&work.markDoneSema, 0)
+ semacquire(&work.markDoneSema)
// Re-check transition condition under transition lock.
if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) {
@@ -1204,14 +1437,14 @@ top:
// endCycle depends on all gcWork cache stats being
// flushed. This is ensured by mark 2.
- gcController.endCycle()
+ nextTriggerRatio := gcController.endCycle()
// Perform mark termination. This will restart the world.
- gcMarkTermination()
+ gcMarkTermination(nextTriggerRatio)
}
}
-func gcMarkTermination() {
+func gcMarkTermination(nextTriggerRatio float64) {
// World is stopped.
// Start marktermination which includes enabling the write barrier.
atomic.Store(&gcBlackenEnabled, 0)
@@ -1293,11 +1526,17 @@ func gcMarkTermination() {
throw("gc done but gcphase != _GCoff")
}
+ // Update GC trigger and pacing for the next cycle.
+ gcSetTriggerRatio(nextTriggerRatio)
+
// Update timing memstats
- now, unixNow := nanotime(), unixnanotime()
+ now := nanotime()
+ sec, nsec, _ := time_now()
+ unixNow := sec*1e9 + int64(nsec)
work.pauseNS += now - work.pauseStart
work.tEnd = now
- atomic.Store64(&memstats.last_gc, uint64(unixNow)) // must be Unix time to make sense to user
+ atomic.Store64(&memstats.last_gc_unix, uint64(unixNow)) // must be Unix time to make sense to user
+ atomic.Store64(&memstats.last_gc_nanotime, uint64(now)) // monotonic time for us
memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow)
memstats.pause_total_ns += uint64(work.pauseNS)
@@ -1315,25 +1554,36 @@ func gcMarkTermination() {
totalCpu := sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs)
memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu)
- memstats.numgc++
-
// Reset sweep state.
sweep.nbgsweep = 0
sweep.npausesweep = 0
+ if work.userForced {
+ memstats.numforcedgc++
+ }
+
+ // Bump GC cycle count and wake goroutines waiting on sweep.
+ lock(&work.sweepWaiters.lock)
+ memstats.numgc++
+ injectglist(work.sweepWaiters.head.ptr())
+ work.sweepWaiters.head = 0
+ unlock(&work.sweepWaiters.lock)
+
+ // Finish the current heap profiling cycle and start a new
+ // heap profiling cycle. We do this before starting the world
+ // so events don't leak into the wrong cycle.
+ mProf_NextCycle()
+
systemstack(startTheWorldWithSema)
- // Update heap profile stats if gcSweep didn't do it. This is
- // relatively expensive, so we don't want to do it while the
- // world is stopped, but it needs to happen ASAP after
- // starting the world to prevent too many allocations from the
- // next cycle leaking in. It must happen before releasing
- // worldsema since there are applications that do a
- // runtime.GC() to update the heap profile and then
- // immediately collect the profile.
- if _ConcurrentSweep && work.mode != gcForceBlockMode {
- mProf_GC()
- }
+ // Flush the heap profile so we can start a new cycle next GC.
+ // This is relatively expensive, so we don't do it with the
+ // world stopped.
+ mProf_Flush()
+
+ // Prepare workbufs for freeing by the sweeper. We do this
+ // asynchronously because it can take non-trivial time.
+ prepareFreeWorkbufs()
// Print gctrace before dropping worldsema. As soon as we drop
// worldsema another cycle could start and smash the stats
@@ -1368,7 +1618,7 @@ func gcMarkTermination() {
work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
work.heapGoal>>20, " MB goal, ",
work.maxprocs, " P")
- if work.mode != gcBackgroundMode {
+ if work.userForced {
print(" (forced)")
}
print("\n")
@@ -1521,6 +1771,25 @@ func gcBgMarkWorker(_p_ *p) {
default:
throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
case gcMarkWorkerDedicatedMode:
+ gcDrain(&_p_.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit)
+ if gp.preempt {
+ // We were preempted. This is
+ // a useful signal to kick
+ // everything out of the run
+ // queue so it can run
+ // somewhere else.
+ lock(&sched.lock)
+ for {
+ gp, _ := runqget(_p_)
+ if gp == nil {
+ break
+ }
+ globrunqput(gp)
+ }
+ unlock(&sched.lock)
+ }
+ // Go back to draining, this time
+ // without preemption.
gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit)
case gcMarkWorkerFractionalMode:
gcDrain(&_p_.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit)
@@ -1593,7 +1862,7 @@ func gcMarkWorkAvailable(p *p) bool {
if p != nil && !p.gcw.empty() {
return true
}
- if atomic.Load64(&work.full) != 0 {
+ if !work.full.empty() {
return true // global work available
}
if work.markrootNext < work.markrootJobs {
@@ -1623,24 +1892,22 @@ func gcMark(start_time int64) {
work.ndone = 0
work.nproc = uint32(gcprocs())
- if debug.gcrescanstacks == 0 && work.full == 0 && work.nDataRoots+work.nSpanRoots+work.nStackRoots+work.nRescanRoots == 0 {
+ if work.full == 0 && work.nDataRoots+work.nSpanRoots+work.nStackRoots == 0 {
// There's no work on the work queue and no root jobs
// that can produce work, so don't bother entering the
// getfull() barrier.
//
- // With the hybrid barrier enabled, this will be the
- // situation the vast majority of the time after
- // concurrent mark. However, we still need a fallback
- // for STW GC and because there are some known races
- // that occasionally leave work around for mark
- // termination.
+ // This will be the situation the vast majority of the
+ // time after concurrent mark. However, we still need
+ // a fallback for STW GC and because there are some
+ // known races that occasionally leave work around for
+ // mark termination.
//
// We're still hedging our bets here: if we do
// accidentally produce some work, we'll still process
// it, just not necessarily in parallel.
//
- // TODO(austin): When we eliminate
- // debug.gcrescanstacks: fix the races, and remove
+ // TODO(austin): Fix the races and and remove
// work draining from mark termination so we don't
// need the fallback path.
work.helperDrainBlock = false
@@ -1704,52 +1971,14 @@ func gcMark(start_time int64) {
// Update the marked heap stat.
memstats.heap_marked = work.bytesMarked
- // Trigger the next GC cycle when the allocated heap has grown
- // by triggerRatio over the marked heap size. Assume that
- // we're in steady state, so the marked heap size is the
- // same now as it was at the beginning of the GC cycle.
- memstats.gc_trigger = uint64(float64(memstats.heap_marked) * (1 + gcController.triggerRatio))
- if memstats.gc_trigger < heapminimum {
- memstats.gc_trigger = heapminimum
- }
- if int64(memstats.gc_trigger) < 0 {
- print("next_gc=", memstats.next_gc, " bytesMarked=", work.bytesMarked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "\n")
- throw("gc_trigger underflow")
- }
-
// Update other GC heap size stats. This must happen after
// cachestats (which flushes local statistics to these) and
// flushallmcaches (which modifies heap_live).
memstats.heap_live = work.bytesMarked
memstats.heap_scan = uint64(gcController.scanWork)
- minTrigger := memstats.heap_live + sweepMinHeapDistance*uint64(gcpercent)/100
- if memstats.gc_trigger < minTrigger {
- // The allocated heap is already past the trigger.
- // This can happen if the triggerRatio is very low and
- // the marked heap is less than the live heap size.
- //
- // Concurrent sweep happens in the heap growth from
- // heap_live to gc_trigger, so bump gc_trigger up to ensure
- // that concurrent sweep has some heap growth in which
- // to perform sweeping before we start the next GC
- // cycle.
- memstats.gc_trigger = minTrigger
- }
-
- // The next GC cycle should finish before the allocated heap
- // has grown by GOGC/100.
- memstats.next_gc = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100
- if gcpercent < 0 {
- memstats.next_gc = ^uint64(0)
- }
- if memstats.next_gc < memstats.gc_trigger {
- memstats.next_gc = memstats.gc_trigger
- }
-
if trace.enabled {
traceHeapAlloc()
- traceNextGC()
}
}
@@ -1767,6 +1996,7 @@ func gcSweep(mode gcMode) {
// with an empty swept list.
throw("non-empty swept list")
}
+ mheap_.pagesSwept = 0
unlock(&mheap_.lock)
if !_ConcurrentSweep || mode == gcForceBlockMode {
@@ -1774,35 +2004,23 @@ func gcSweep(mode gcMode) {
// Record that no proportional sweeping has to happen.
lock(&mheap_.lock)
mheap_.sweepPagesPerByte = 0
- mheap_.pagesSwept = 0
unlock(&mheap_.lock)
// Sweep all spans eagerly.
for sweepone() != ^uintptr(0) {
sweep.npausesweep++
}
- // Do an additional mProf_GC, because all 'free' events are now real as well.
- mProf_GC()
- mProf_GC()
+ // Free workbufs eagerly.
+ prepareFreeWorkbufs()
+ for freeSomeWbufs(false) {
+ }
+ // All "free" events for this mark/sweep cycle have
+ // now happened, so we can make this profile cycle
+ // available immediately.
+ mProf_NextCycle()
+ mProf_Flush()
return
}
- // Concurrent sweep needs to sweep all of the in-use pages by
- // the time the allocated heap reaches the GC trigger. Compute
- // the ratio of in-use pages to sweep per byte allocated.
- heapDistance := int64(memstats.gc_trigger) - int64(memstats.heap_live)
- // Add a little margin so rounding errors and concurrent
- // sweep are less likely to leave pages unswept when GC starts.
- heapDistance -= 1024 * 1024
- if heapDistance < _PageSize {
- // Avoid setting the sweep ratio extremely high
- heapDistance = _PageSize
- }
- lock(&mheap_.lock)
- mheap_.sweepPagesPerByte = float64(mheap_.pagesInUse) / float64(heapDistance)
- mheap_.pagesSwept = 0
- mheap_.spanBytesAlloc = 0
- unlock(&mheap_.lock)
-
// Background sweep.
lock(&sweep.lock)
if sweep.parked {
@@ -1820,24 +2038,16 @@ func gcSweep(mode gcMode) {
func gcResetMarkState() {
// This may be called during a concurrent phase, so make sure
// allgs doesn't change.
- if !(gcphase == _GCoff || gcphase == _GCmarktermination) {
- // Accessing gcRescan is unsafe.
- throw("bad GC phase")
- }
lock(&allglock)
for _, gp := range allgs {
gp.gcscandone = false // set to true in gcphasework
gp.gcscanvalid = false // stack has not been scanned
- gp.gcRescan = -1
gp.gcAssistBytes = 0
}
unlock(&allglock)
- // Clear rescan list.
- work.rescan.list = work.rescan.list[:0]
-
work.bytesMarked = 0
- work.initialHeapLive = memstats.heap_live
+ work.initialHeapLive = atomic.Load64(&memstats.heap_live)
work.markrootDone = false
}
diff --git a/libgo/go/runtime/mgclarge.go b/libgo/go/runtime/mgclarge.go
new file mode 100644
index 00000000000..757e88d1d9d
--- /dev/null
+++ b/libgo/go/runtime/mgclarge.go
@@ -0,0 +1,326 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Page heap.
+//
+// See malloc.go for the general overview.
+//
+// Large spans are the subject of this file. Spans consisting of less than
+// _MaxMHeapLists are held in lists of like sized spans. Larger spans
+// are held in a treap. See https://en.wikipedia.org/wiki/Treap or
+// http://faculty.washington.edu/aragon/pubs/rst89.pdf for an overview.
+// sema.go also holds an implementation of a treap.
+//
+// Each treapNode holds a single span. The treap is sorted by page size
+// and for spans of the same size a secondary sort based on start address
+// is done.
+// Spans are returned based on a best fit algorithm and for spans of the same
+// size the one at the lowest address is selected.
+//
+// The primary routines are
+// insert: adds a span to the treap
+// remove: removes the span from that treap that best fits the required size
+// removeSpan: which removes a specific span from the treap
+//
+// _mheap.lock must be held when manipulating this data structure.
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+//go:notinheap
+type mTreap struct {
+ treap *treapNode
+}
+
+//go:notinheap
+type treapNode struct {
+ right *treapNode // all treapNodes > this treap node
+ left *treapNode // all treapNodes < this treap node
+ parent *treapNode // direct parent of this node, nil if root
+ npagesKey uintptr // number of pages in spanKey, used as primary sort key
+ spanKey *mspan // span of size npagesKey, used as secondary sort key
+ priority uint32 // random number used by treap algorithm keep tree probablistically balanced
+}
+
+func (t *treapNode) init() {
+ t.right = nil
+ t.left = nil
+ t.parent = nil
+ t.spanKey = nil
+ t.npagesKey = 0
+ t.priority = 0
+}
+
+// isSpanInTreap is handy for debugging. One should hold the heap lock, usually
+// mheap_.lock().
+func (t *treapNode) isSpanInTreap(s *mspan) bool {
+ if t == nil {
+ return false
+ }
+ return t.spanKey == s || t.left.isSpanInTreap(s) || t.right.isSpanInTreap(s)
+}
+
+// walkTreap is handy for debugging.
+// Starting at some treapnode t, for example the root, do a depth first preorder walk of
+// the tree executing fn at each treap node. One should hold the heap lock, usually
+// mheap_.lock().
+func (t *treapNode) walkTreap(fn func(tn *treapNode)) {
+ if t == nil {
+ return
+ }
+ fn(t)
+ t.left.walkTreap(fn)
+ t.right.walkTreap(fn)
+}
+
+// checkTreapNode when used in conjunction with walkTreap can usually detect a
+// poorly formed treap.
+func checkTreapNode(t *treapNode) {
+ // lessThan is used to order the treap.
+ // npagesKey and npages are the primary keys.
+ // spanKey and span are the secondary keys.
+ // span == nil (0) will always be lessThan all
+ // spans of the same size.
+ lessThan := func(npages uintptr, s *mspan) bool {
+ if t.npagesKey != npages {
+ return t.npagesKey < npages
+ }
+ // t.npagesKey == npages
+ return uintptr(unsafe.Pointer(t.spanKey)) < uintptr(unsafe.Pointer(s))
+ }
+
+ if t == nil {
+ return
+ }
+ if t.spanKey.npages != t.npagesKey || t.spanKey.next != nil {
+ println("runtime: checkTreapNode treapNode t=", t, " t.npagesKey=", t.npagesKey,
+ "t.spanKey.npages=", t.spanKey.npages)
+ throw("why does span.npages and treap.ngagesKey do not match?")
+ }
+ if t.left != nil && lessThan(t.left.npagesKey, t.left.spanKey) {
+ throw("t.lessThan(t.left.npagesKey, t.left.spanKey) is not false")
+ }
+ if t.right != nil && !lessThan(t.right.npagesKey, t.right.spanKey) {
+ throw("!t.lessThan(t.left.npagesKey, t.left.spanKey) is not false")
+ }
+}
+
+// insert adds span to the large span treap.
+func (root *mTreap) insert(span *mspan) {
+ npages := span.npages
+ var last *treapNode
+ pt := &root.treap
+ for t := *pt; t != nil; t = *pt {
+ last = t
+ if t.npagesKey < npages {
+ pt = &t.right
+ } else if t.npagesKey > npages {
+ pt = &t.left
+ } else if uintptr(unsafe.Pointer(t.spanKey)) < uintptr(unsafe.Pointer(span)) {
+ // t.npagesKey == npages, so sort on span addresses.
+ pt = &t.right
+ } else if uintptr(unsafe.Pointer(t.spanKey)) > uintptr(unsafe.Pointer(span)) {
+ pt = &t.left
+ } else {
+ throw("inserting span already in treap")
+ }
+ }
+
+ // Add t as new leaf in tree of span size and unique addrs.
+ // The balanced tree is a treap using priority as the random heap priority.
+ // That is, it is a binary tree ordered according to the npagesKey,
+ // but then among the space of possible binary trees respecting those
+ // npagesKeys, it is kept balanced on average by maintaining a heap ordering
+ // on the priority: s.priority <= both s.right.priority and s.right.priority.
+ // https://en.wikipedia.org/wiki/Treap
+ // http://faculty.washington.edu/aragon/pubs/rst89.pdf
+
+ t := (*treapNode)(mheap_.treapalloc.alloc())
+ t.init()
+ t.npagesKey = span.npages
+ t.priority = fastrand()
+ t.spanKey = span
+ t.parent = last
+ *pt = t // t now at a leaf.
+ // Rotate up into tree according to priority.
+ for t.parent != nil && t.parent.priority > t.priority {
+ if t != nil && t.spanKey.npages != t.npagesKey {
+ println("runtime: insert t=", t, "t.npagesKey=", t.npagesKey)
+ println("runtime: t.spanKey=", t.spanKey, "t.spanKey.npages=", t.spanKey.npages)
+ throw("span and treap sizes do not match?")
+ }
+ if t.parent.left == t {
+ root.rotateRight(t.parent)
+ } else {
+ if t.parent.right != t {
+ throw("treap insert finds a broken treap")
+ }
+ root.rotateLeft(t.parent)
+ }
+ }
+}
+
+func (root *mTreap) removeNode(t *treapNode) *mspan {
+ if t.spanKey.npages != t.npagesKey {
+ throw("span and treap node npages do not match")
+ }
+ result := t.spanKey
+
+ // Rotate t down to be leaf of tree for removal, respecting priorities.
+ for t.right != nil || t.left != nil {
+ if t.right == nil || t.left != nil && t.left.priority < t.right.priority {
+ root.rotateRight(t)
+ } else {
+ root.rotateLeft(t)
+ }
+ }
+ // Remove t, now a leaf.
+ if t.parent != nil {
+ if t.parent.left == t {
+ t.parent.left = nil
+ } else {
+ t.parent.right = nil
+ }
+ } else {
+ root.treap = nil
+ }
+ // Return the found treapNode's span after freeing the treapNode.
+ t.spanKey = nil
+ t.npagesKey = 0
+ mheap_.treapalloc.free(unsafe.Pointer(t))
+ return result
+}
+
+// remove searches for, finds, removes from the treap, and returns the smallest
+// span that can hold npages. If no span has at least npages return nil.
+// This is slightly more complicated than a simple binary tree search
+// since if an exact match is not found the next larger node is
+// returned.
+// If the last node inspected > npagesKey not holding
+// a left node (a smaller npages) is the "best fit" node.
+func (root *mTreap) remove(npages uintptr) *mspan {
+ t := root.treap
+ for t != nil {
+ if t.spanKey == nil {
+ throw("treap node with nil spanKey found")
+ }
+ if t.npagesKey < npages {
+ t = t.right
+ } else if t.left != nil && t.left.npagesKey >= npages {
+ t = t.left
+ } else {
+ result := t.spanKey
+ root.removeNode(t)
+ return result
+ }
+ }
+ return nil
+}
+
+// removeSpan searches for, finds, deletes span along with
+// the associated treap node. If the span is not in the treap
+// then t will eventually be set to nil and the t.spanKey
+// will throw.
+func (root *mTreap) removeSpan(span *mspan) {
+ npages := span.npages
+ t := root.treap
+ for t.spanKey != span {
+ if t.npagesKey < npages {
+ t = t.right
+ } else if t.npagesKey > npages {
+ t = t.left
+ } else if uintptr(unsafe.Pointer(t.spanKey)) < uintptr(unsafe.Pointer(span)) {
+ t = t.right
+ } else if uintptr(unsafe.Pointer(t.spanKey)) > uintptr(unsafe.Pointer(span)) {
+ t = t.left
+ }
+ }
+ root.removeNode(t)
+}
+
+// scavengetreap visits each node in the treap and scavenges the
+// treapNode's span.
+func scavengetreap(treap *treapNode, now, limit uint64) uintptr {
+ if treap == nil {
+ return 0
+ }
+ return scavengeTreapNode(treap, now, limit) +
+ scavengetreap(treap.left, now, limit) +
+ scavengetreap(treap.right, now, limit)
+}
+
+// rotateLeft rotates the tree rooted at node x.
+// turning (x a (y b c)) into (y (x a b) c).
+func (root *mTreap) rotateLeft(x *treapNode) {
+ // p -> (x a (y b c))
+ p := x.parent
+ a, y := x.left, x.right
+ b, c := y.left, y.right
+
+ y.left = x
+ x.parent = y
+ y.right = c
+ if c != nil {
+ c.parent = y
+ }
+ x.left = a
+ if a != nil {
+ a.parent = x
+ }
+ x.right = b
+ if b != nil {
+ b.parent = x
+ }
+
+ y.parent = p
+ if p == nil {
+ root.treap = y
+ } else if p.left == x {
+ p.left = y
+ } else {
+ if p.right != x {
+ throw("large span treap rotateLeft")
+ }
+ p.right = y
+ }
+}
+
+// rotateRight rotates the tree rooted at node y.
+// turning (y (x a b) c) into (x a (y b c)).
+func (root *mTreap) rotateRight(y *treapNode) {
+ // p -> (y (x a b) c)
+ p := y.parent
+ x, c := y.left, y.right
+ a, b := x.left, x.right
+
+ x.left = a
+ if a != nil {
+ a.parent = x
+ }
+ x.right = y
+ y.parent = x
+ y.left = b
+ if b != nil {
+ b.parent = y
+ }
+ y.right = c
+ if c != nil {
+ c.parent = y
+ }
+
+ x.parent = p
+ if p == nil {
+ root.treap = x
+ } else if p.left == y {
+ p.left = x
+ } else {
+ if p.right != y {
+ throw("large span treap rotateRight")
+ }
+ p.right = x
+ }
+}
diff --git a/libgo/go/runtime/mgcmark.go b/libgo/go/runtime/mgcmark.go
index 93252ba8a9e..998a830caa8 100644
--- a/libgo/go/runtime/mgcmark.go
+++ b/libgo/go/runtime/mgcmark.go
@@ -93,21 +93,24 @@ func gcMarkRootPrepare() {
// termination, allglen isn't changing, so we'll scan
// all Gs.
work.nStackRoots = int(atomic.Loaduintptr(&allglen))
- work.nRescanRoots = 0
} else {
// We've already scanned span roots and kept the scan
// up-to-date during concurrent mark.
work.nSpanRoots = 0
- // On the second pass of markroot, we're just scanning
- // dirty stacks. It's safe to access rescan since the
- // world is stopped.
+ // The hybrid barrier ensures that stacks can't
+ // contain pointers to unmarked objects, so on the
+ // second markroot, there's no need to scan stacks.
work.nStackRoots = 0
- work.nRescanRoots = len(work.rescan.list)
+
+ if debug.gcrescanstacks > 0 {
+ // Scan stacks anyway for debugging.
+ work.nStackRoots = int(atomic.Loaduintptr(&allglen))
+ }
}
work.markrootNext = 0
- work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nSpanRoots + work.nStackRoots + work.nRescanRoots)
+ work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nSpanRoots + work.nStackRoots)
}
// gcMarkRootCheck checks that all roots have been scanned. It is
@@ -165,8 +168,7 @@ func markroot(gcw *gcWork, i uint32) {
baseData := baseFlushCache + uint32(work.nFlushCacheRoots)
baseSpans := baseData + uint32(work.nDataRoots)
baseStacks := baseSpans + uint32(work.nSpanRoots)
- baseRescan := baseStacks + uint32(work.nStackRoots)
- end := baseRescan + uint32(work.nRescanRoots)
+ end := baseStacks + uint32(work.nStackRoots)
// Note: if you add a case here, please also update heapdump.go:dumproots.
switch {
@@ -186,6 +188,11 @@ func markroot(gcw *gcWork, i uint32) {
}
case i == fixedRootFinalizers:
+ // Only do this once per GC cycle since we don't call
+ // queuefinalizer during marking.
+ if work.markrootDone {
+ break
+ }
for fb := allfin; fb != nil; fb = fb.alllink {
cnt := uintptr(atomic.Load(&fb.cnt))
scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw)
@@ -201,15 +208,8 @@ func markroot(gcw *gcWork, i uint32) {
default:
// the rest is scanning goroutine stacks
var gp *g
- if baseStacks <= i && i < baseRescan {
+ if baseStacks <= i && i < end {
gp = allgs[i-baseStacks]
- } else if baseRescan <= i && i < end {
- gp = work.rescan.list[i-baseRescan].ptr()
- if gp.gcRescan != int32(i-baseRescan) {
- // Looking for issue #17099.
- println("runtime: gp", gp, "found at rescan index", i-baseRescan, "but should be at", gp.gcRescan)
- throw("bad g rescan index")
- }
} else {
throw("markroot: bad index")
}
@@ -352,6 +352,7 @@ func gcAssistAlloc(gp *g) {
return
}
+ traced := false
retry:
// Compute the amount of scan work we need to do to make the
// balance positive. When the required amount of work is low,
@@ -387,10 +388,18 @@ retry:
if scanWork == 0 {
// We were able to steal all of the credit we
// needed.
+ if traced {
+ traceGCMarkAssistDone()
+ }
return
}
}
+ if trace.enabled && !traced {
+ traced = true
+ traceGCMarkAssistStart()
+ }
+
// Perform assist work
systemstack(func() {
gcAssistAlloc1(gp, scanWork)
@@ -433,6 +442,9 @@ retry:
// At this point either background GC has satisfied
// this G's assist debt, or the GC cycle is over.
}
+ if traced {
+ traceGCMarkAssistDone()
+ }
}
// gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
@@ -656,10 +668,6 @@ func doscanstack(*g, *gcWork)
// scanstack scans gp's stack, greying all pointers found on the stack.
//
-// During mark phase, it also installs stack barriers while traversing
-// gp's stack. During mark termination, it stops scanning when it
-// reaches an unhit stack barrier.
-//
// scanstack is marked go:systemstack because it must not be preempted
// while using a workbuf.
//
@@ -699,84 +707,9 @@ func scanstack(gp *g, gcw *gcWork) {
scanstackblock(uintptr(unsafe.Pointer(&gp.gcregs)), unsafe.Sizeof(gp.gcregs), gcw)
scanstackblock(uintptr(unsafe.Pointer(&gp.context)), unsafe.Sizeof(gp.context), gcw)
- if gcphase == _GCmark {
- // gp may have added itself to the rescan list between
- // when GC started and now. It's clean now, so remove
- // it. This isn't safe during mark termination because
- // mark termination is consuming this list, but it's
- // also not necessary.
- dequeueRescan(gp)
- }
gp.gcscanvalid = true
}
-// queueRescan adds gp to the stack rescan list and clears
-// gp.gcscanvalid. The caller must own gp and ensure that gp isn't
-// already on the rescan list.
-func queueRescan(gp *g) {
- if debug.gcrescanstacks == 0 {
- // Clear gcscanvalid to keep assertions happy.
- //
- // TODO: Remove gcscanvalid entirely when we remove
- // stack rescanning.
- gp.gcscanvalid = false
- return
- }
-
- if gcphase == _GCoff {
- gp.gcscanvalid = false
- return
- }
- if gp.gcRescan != -1 {
- throw("g already on rescan list")
- }
-
- lock(&work.rescan.lock)
- gp.gcscanvalid = false
-
- // Recheck gcphase under the lock in case there was a phase change.
- if gcphase == _GCoff {
- unlock(&work.rescan.lock)
- return
- }
- if len(work.rescan.list) == cap(work.rescan.list) {
- throw("rescan list overflow")
- }
- n := len(work.rescan.list)
- gp.gcRescan = int32(n)
- work.rescan.list = work.rescan.list[:n+1]
- work.rescan.list[n].set(gp)
- unlock(&work.rescan.lock)
-}
-
-// dequeueRescan removes gp from the stack rescan list, if gp is on
-// the rescan list. The caller must own gp.
-func dequeueRescan(gp *g) {
- if debug.gcrescanstacks == 0 {
- return
- }
-
- if gp.gcRescan == -1 {
- return
- }
- if gcphase == _GCoff {
- gp.gcRescan = -1
- return
- }
-
- lock(&work.rescan.lock)
- if work.rescan.list[gp.gcRescan].ptr() != gp {
- throw("bad dequeueRescan")
- }
- // Careful: gp may itself be the last G on the list.
- last := work.rescan.list[len(work.rescan.list)-1]
- work.rescan.list[gp.gcRescan] = last
- last.ptr().gcRescan = gp.gcRescan
- gp.gcRescan = -1
- work.rescan.list = work.rescan.list[:len(work.rescan.list)-1]
- unlock(&work.rescan.lock)
-}
-
type gcDrainFlags int
const (
@@ -1052,7 +985,7 @@ func scanobject(b uintptr, gcw *gcWork) {
// paths), in which case we must *not* enqueue
// oblets since their bitmaps will be
// uninitialized.
- if !hbits.hasPointers(n) {
+ if s.spanclass.noscan() {
// Bypass the whole scan.
gcw.bytesMarked += uint64(n)
return
@@ -1185,6 +1118,7 @@ func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork
// Dump the object
gcDumpObject("obj", obj, ^uintptr(0))
+ getg().m.traceback = 2
throw("checkmark found unmarked object")
}
if hbits.isCheckmarked(span.elemsize) {
@@ -1206,6 +1140,7 @@ func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork
print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
gcDumpObject("base", base, off)
gcDumpObject("obj", obj, ^uintptr(0))
+ getg().m.traceback = 2
throw("marking free object")
}
@@ -1217,7 +1152,7 @@ func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork
atomic.Or8(mbits.bytep, mbits.mask)
// If this is a noscan object, fast-track it to black
// instead of greying it.
- if !hbits.hasPointers(span.elemsize) {
+ if span.spanclass.noscan() {
gcw.bytesMarked += uint64(span.elemsize)
return
}
@@ -1250,7 +1185,7 @@ func gcDumpObject(label string, obj, off uintptr) {
print(" s=nil\n")
return
}
- print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, " s.state=")
+ print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
if 0 <= s.state && int(s.state) < len(mSpanStateNames) {
print(mSpanStateNames[s.state], "\n")
} else {
@@ -1259,7 +1194,7 @@ func gcDumpObject(label string, obj, off uintptr) {
skipped := false
size := s.elemsize
- if s.state == _MSpanStack && size == 0 {
+ if s.state == _MSpanManual && size == 0 {
// We're printing something from a stack frame. We
// don't know how big it is, so just show up to an
// including off.
diff --git a/libgo/go/runtime/mgcsweep.go b/libgo/go/runtime/mgcsweep.go
index 2b698bf74a5..c60214cdaf3 100644
--- a/libgo/go/runtime/mgcsweep.go
+++ b/libgo/go/runtime/mgcsweep.go
@@ -22,10 +22,6 @@ type sweepdata struct {
nbgsweep uint32
npausesweep uint32
-
- // pacertracegen is the sweepgen at which the last pacer trace
- // "sweep finished" message was printed.
- pacertracegen uint32
}
// finishsweep_m ensures that all spans are swept.
@@ -62,6 +58,9 @@ func bgsweep(c chan int) {
sweep.nbgsweep++
Gosched()
}
+ for freeSomeWbufs(true) {
+ Gosched()
+ }
lock(&sweep.lock)
if !gosweepdone() {
// This can happen if a GC runs between
@@ -80,20 +79,24 @@ func bgsweep(c chan int) {
//go:nowritebarrier
func sweepone() uintptr {
_g_ := getg()
+ sweepRatio := mheap_.sweepPagesPerByte // For debugging
// increment locks to ensure that the goroutine is not preempted
// in the middle of sweep thus leaving the span in an inconsistent state for next GC
_g_.m.locks++
+ if atomic.Load(&mheap_.sweepdone) != 0 {
+ _g_.m.locks--
+ return ^uintptr(0)
+ }
+ atomic.Xadd(&mheap_.sweepers, +1)
+
+ npages := ^uintptr(0)
sg := mheap_.sweepgen
for {
s := mheap_.sweepSpans[1-sg/2%2].pop()
if s == nil {
- mheap_.sweepdone = 1
- _g_.m.locks--
- if debug.gcpacertrace > 0 && atomic.Cas(&sweep.pacertracegen, sg-2, sg) {
- print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", mheap_.spanBytesAlloc>>20, "MB of spans; swept ", mheap_.pagesSwept, " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
- }
- return ^uintptr(0)
+ atomic.Store(&mheap_.sweepdone, 1)
+ break
}
if s.state != mSpanInUse {
// This can happen if direct sweeping already
@@ -108,16 +111,25 @@ func sweepone() uintptr {
if s.sweepgen != sg-2 || !atomic.Cas(&s.sweepgen, sg-2, sg-1) {
continue
}
- npages := s.npages
+ npages = s.npages
if !s.sweep(false) {
// Span is still in-use, so this returned no
// pages to the heap and the span needs to
// move to the swept in-use list.
npages = 0
}
- _g_.m.locks--
- return npages
+ break
+ }
+
+ // Decrement the number of active sweepers and if this is the
+ // last one print trace information.
+ if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
+ if debug.gcpacertrace > 0 {
+ print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
+ }
}
+ _g_.m.locks--
+ return npages
}
//go:nowritebarrier
@@ -180,15 +192,14 @@ func (s *mspan) sweep(preserve bool) bool {
}
if trace.enabled {
- traceGCSweepStart()
+ traceGCSweepSpan(s.npages * _PageSize)
}
atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
- cl := s.sizeclass
+ spc := s.spanclass
size := s.elemsize
res := false
- nfree := 0
c := _g_.m.mcache
freeToHeap := false
@@ -278,12 +289,11 @@ func (s *mspan) sweep(preserve bool) bool {
}
// Count the number of free objects in this span.
- nfree = s.countFree()
- if cl == 0 && nfree != 0 {
+ nalloc := uint16(s.countAlloc())
+ if spc.sizeclass() == 0 && nalloc == 0 {
s.needzero = 1
freeToHeap = true
}
- nalloc := uint16(s.nelems) - uint16(nfree)
nfreed := s.allocCount - nalloc
// This test is not reliable with gccgo, because of
@@ -300,13 +310,16 @@ func (s *mspan) sweep(preserve bool) bool {
// unnecessarily, but provided the pointer is not really live
// it is not otherwise a problem. So we disable the test for gccgo.
if false && nalloc > s.allocCount {
- print("runtime: nelems=", s.nelems, " nfree=", nfree, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
+ print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
throw("sweep increased allocation count")
}
s.allocCount = nalloc
wasempty := s.nextFreeIndex() == s.nelems
s.freeindex = 0 // reset allocation index to start of span.
+ if trace.enabled {
+ getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
+ }
// gcmarkBits becomes the allocBits.
// get a fresh cleared gcmarkBits in preparation for next GC
@@ -334,9 +347,9 @@ func (s *mspan) sweep(preserve bool) bool {
atomic.Store(&s.sweepgen, sweepgen)
}
- if nfreed > 0 && cl != 0 {
- c.local_nsmallfree[cl] += uintptr(nfreed)
- res = mheap_.central[cl].mcentral.freeSpan(s, preserve, wasempty)
+ if nfreed > 0 && spc.sizeclass() != 0 {
+ c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
+ res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
// MCentral_FreeSpan updates sweepgen
} else if freeToHeap {
// Free large span to heap
@@ -370,9 +383,6 @@ func (s *mspan) sweep(preserve bool) bool {
// it on the swept in-use list.
mheap_.sweepSpans[sweepgen/2%2].push(s)
}
- if trace.enabled {
- traceGCSweepDone()
- }
return res
}
@@ -385,8 +395,7 @@ func (s *mspan) sweep(preserve bool) bool {
//
// deductSweepCredit makes a worst-case assumption that all spanBytes
// bytes of the ultimately allocated span will be available for object
-// allocation. The caller should call reimburseSweepCredit if that
-// turns out not to be the case once the span is allocated.
+// allocation.
//
// deductSweepCredit is the core of the "proportional sweep" system.
// It uses statistics gathered by the garbage collector to perform
@@ -400,31 +409,28 @@ func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
return
}
- // Account for this span allocation.
- spanBytesAlloc := atomic.Xadd64(&mheap_.spanBytesAlloc, int64(spanBytes))
+ if trace.enabled {
+ traceGCSweepStart()
+ }
+
+retry:
+ sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
// Fix debt if necessary.
- pagesOwed := int64(mheap_.sweepPagesPerByte * float64(spanBytesAlloc))
- for pagesOwed-int64(atomic.Load64(&mheap_.pagesSwept)) > int64(callerSweepPages) {
+ newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
+ pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
+ for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
if gosweepone() == ^uintptr(0) {
mheap_.sweepPagesPerByte = 0
break
}
+ if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
+ // Sweep pacing changed. Recompute debt.
+ goto retry
+ }
}
-}
-// reimburseSweepCredit records that unusableBytes bytes of a
-// just-allocated span are not available for object allocation. This
-// offsets the worst-case charge performed by deductSweepCredit.
-func reimburseSweepCredit(unusableBytes uintptr) {
- if mheap_.sweepPagesPerByte == 0 {
- // Nobody cares about the credit. Avoid the atomic.
- return
- }
- nval := atomic.Xadd64(&mheap_.spanBytesAlloc, -int64(unusableBytes))
- if int64(nval) < 0 {
- // Debugging for #18043.
- print("runtime: bad spanBytesAlloc=", nval, " (was ", nval+uint64(unusableBytes), ") unusableBytes=", unusableBytes, " sweepPagesPerByte=", mheap_.sweepPagesPerByte, "\n")
- throw("spanBytesAlloc underflow")
+ if trace.enabled {
+ traceGCSweepDone()
}
}
diff --git a/libgo/go/runtime/mgcwork.go b/libgo/go/runtime/mgcwork.go
index 5eb05a767c0..461679b9343 100644
--- a/libgo/go/runtime/mgcwork.go
+++ b/libgo/go/runtime/mgcwork.go
@@ -12,8 +12,22 @@ import (
const (
_WorkbufSize = 2048 // in bytes; larger values result in less contention
+
+ // workbufAlloc is the number of bytes to allocate at a time
+ // for new workbufs. This must be a multiple of pageSize and
+ // should be a multiple of _WorkbufSize.
+ //
+ // Larger values reduce workbuf allocation overhead. Smaller
+ // values reduce heap fragmentation.
+ workbufAlloc = 32 << 10
)
+func init() {
+ if workbufAlloc%pageSize != 0 || workbufAlloc%_WorkbufSize != 0 {
+ throw("bad workbufAlloc")
+ }
+}
+
// Garbage collector work pool abstraction.
//
// This implements a producer/consumer model for pointers to grey
@@ -25,21 +39,6 @@ const (
// grey objects, thus blackening them, and then scans them,
// potentially producing new pointers to grey objects.
-// A wbufptr holds a workbuf*, but protects it from write barriers.
-// workbufs never live on the heap, so write barriers are unnecessary.
-// Write barriers on workbuf pointers may also be dangerous in the GC.
-//
-// TODO: Since workbuf is now go:notinheap, this isn't necessary.
-type wbufptr uintptr
-
-func wbufptrOf(w *workbuf) wbufptr {
- return wbufptr(unsafe.Pointer(w))
-}
-
-func (wp wbufptr) ptr() *workbuf {
- return (*workbuf)(unsafe.Pointer(wp))
-}
-
// A gcWork provides the interface to produce and consume work for the
// garbage collector.
//
@@ -75,7 +74,7 @@ type gcWork struct {
// next.
//
// Invariant: Both wbuf1 and wbuf2 are nil or neither are.
- wbuf1, wbuf2 wbufptr
+ wbuf1, wbuf2 *workbuf
// Bytes marked (blackened) on this gcWork. This is aggregated
// into work.bytesMarked by dispose.
@@ -87,12 +86,12 @@ type gcWork struct {
}
func (w *gcWork) init() {
- w.wbuf1 = wbufptrOf(getempty())
+ w.wbuf1 = getempty()
wbuf2 := trygetfull()
if wbuf2 == nil {
wbuf2 = getempty()
}
- w.wbuf2 = wbufptrOf(wbuf2)
+ w.wbuf2 = wbuf2
}
// put enqueues a pointer for the garbage collector to trace.
@@ -100,18 +99,18 @@ func (w *gcWork) init() {
//go:nowritebarrier
func (w *gcWork) put(obj uintptr) {
flushed := false
- wbuf := w.wbuf1.ptr()
+ wbuf := w.wbuf1
if wbuf == nil {
w.init()
- wbuf = w.wbuf1.ptr()
+ wbuf = w.wbuf1
// wbuf is empty at this point.
} else if wbuf.nobj == len(wbuf.obj) {
w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
- wbuf = w.wbuf1.ptr()
+ wbuf = w.wbuf1
if wbuf.nobj == len(wbuf.obj) {
putfull(wbuf)
wbuf = getempty()
- w.wbuf1 = wbufptrOf(wbuf)
+ w.wbuf1 = wbuf
flushed = true
}
}
@@ -132,7 +131,7 @@ func (w *gcWork) put(obj uintptr) {
// otherwise it returns false and the caller needs to call put.
//go:nowritebarrier
func (w *gcWork) putFast(obj uintptr) bool {
- wbuf := w.wbuf1.ptr()
+ wbuf := w.wbuf1
if wbuf == nil {
return false
} else if wbuf.nobj == len(wbuf.obj) {
@@ -151,15 +150,15 @@ func (w *gcWork) putFast(obj uintptr) bool {
// other gcWork instances or other caches.
//go:nowritebarrier
func (w *gcWork) tryGet() uintptr {
- wbuf := w.wbuf1.ptr()
+ wbuf := w.wbuf1
if wbuf == nil {
w.init()
- wbuf = w.wbuf1.ptr()
+ wbuf = w.wbuf1
// wbuf is empty at this point.
}
if wbuf.nobj == 0 {
w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
- wbuf = w.wbuf1.ptr()
+ wbuf = w.wbuf1
if wbuf.nobj == 0 {
owbuf := wbuf
wbuf = trygetfull()
@@ -167,7 +166,7 @@ func (w *gcWork) tryGet() uintptr {
return 0
}
putempty(owbuf)
- w.wbuf1 = wbufptrOf(wbuf)
+ w.wbuf1 = wbuf
}
}
@@ -180,7 +179,7 @@ func (w *gcWork) tryGet() uintptr {
// the caller is expected to call tryGet().
//go:nowritebarrier
func (w *gcWork) tryGetFast() uintptr {
- wbuf := w.wbuf1.ptr()
+ wbuf := w.wbuf1
if wbuf == nil {
return 0
}
@@ -197,15 +196,15 @@ func (w *gcWork) tryGetFast() uintptr {
// been retrieved. get returns 0 if there are no pointers remaining.
//go:nowritebarrier
func (w *gcWork) get() uintptr {
- wbuf := w.wbuf1.ptr()
+ wbuf := w.wbuf1
if wbuf == nil {
w.init()
- wbuf = w.wbuf1.ptr()
+ wbuf = w.wbuf1
// wbuf is empty at this point.
}
if wbuf.nobj == 0 {
w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
- wbuf = w.wbuf1.ptr()
+ wbuf = w.wbuf1
if wbuf.nobj == 0 {
owbuf := wbuf
wbuf = getfull()
@@ -213,7 +212,7 @@ func (w *gcWork) get() uintptr {
return 0
}
putempty(owbuf)
- w.wbuf1 = wbufptrOf(wbuf)
+ w.wbuf1 = wbuf
}
}
@@ -231,21 +230,21 @@ func (w *gcWork) get() uintptr {
//
//go:nowritebarrier
func (w *gcWork) dispose() {
- if wbuf := w.wbuf1.ptr(); wbuf != nil {
+ if wbuf := w.wbuf1; wbuf != nil {
if wbuf.nobj == 0 {
putempty(wbuf)
} else {
putfull(wbuf)
}
- w.wbuf1 = 0
+ w.wbuf1 = nil
- wbuf = w.wbuf2.ptr()
+ wbuf = w.wbuf2
if wbuf.nobj == 0 {
putempty(wbuf)
} else {
putfull(wbuf)
}
- w.wbuf2 = 0
+ w.wbuf2 = nil
}
if w.bytesMarked != 0 {
// dispose happens relatively infrequently. If this
@@ -265,14 +264,14 @@ func (w *gcWork) dispose() {
// global queue.
//go:nowritebarrier
func (w *gcWork) balance() {
- if w.wbuf1 == 0 {
+ if w.wbuf1 == nil {
return
}
- if wbuf := w.wbuf2.ptr(); wbuf.nobj != 0 {
+ if wbuf := w.wbuf2; wbuf.nobj != 0 {
putfull(wbuf)
- w.wbuf2 = wbufptrOf(getempty())
- } else if wbuf := w.wbuf1.ptr(); wbuf.nobj > 4 {
- w.wbuf1 = wbufptrOf(handoff(wbuf))
+ w.wbuf2 = getempty()
+ } else if wbuf := w.wbuf1; wbuf.nobj > 4 {
+ w.wbuf1 = handoff(wbuf)
} else {
return
}
@@ -285,7 +284,7 @@ func (w *gcWork) balance() {
// empty returns true if w has no mark work available.
//go:nowritebarrier
func (w *gcWork) empty() bool {
- return w.wbuf1 == 0 || (w.wbuf1.ptr().nobj == 0 && w.wbuf2.ptr().nobj == 0)
+ return w.wbuf1 == nil || (w.wbuf1.nobj == 0 && w.wbuf2.nobj == 0)
}
// Internally, the GC work pool is kept in arrays in work buffers.
@@ -327,23 +326,56 @@ func (b *workbuf) checkempty() {
func getempty() *workbuf {
var b *workbuf
if work.empty != 0 {
- b = (*workbuf)(lfstackpop(&work.empty))
+ b = (*workbuf)(work.empty.pop())
if b != nil {
b.checkempty()
}
}
if b == nil {
- b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), sys.CacheLineSize, &memstats.gc_sys))
+ // Allocate more workbufs.
+ var s *mspan
+ if work.wbufSpans.free.first != nil {
+ lock(&work.wbufSpans.lock)
+ s = work.wbufSpans.free.first
+ if s != nil {
+ work.wbufSpans.free.remove(s)
+ work.wbufSpans.busy.insert(s)
+ }
+ unlock(&work.wbufSpans.lock)
+ }
+ if s == nil {
+ systemstack(func() {
+ s = mheap_.allocManual(workbufAlloc/pageSize, &memstats.gc_sys)
+ })
+ if s == nil {
+ throw("out of memory")
+ }
+ // Record the new span in the busy list.
+ lock(&work.wbufSpans.lock)
+ work.wbufSpans.busy.insert(s)
+ unlock(&work.wbufSpans.lock)
+ }
+ // Slice up the span into new workbufs. Return one and
+ // put the rest on the empty list.
+ for i := uintptr(0); i+_WorkbufSize <= workbufAlloc; i += _WorkbufSize {
+ newb := (*workbuf)(unsafe.Pointer(s.base() + i))
+ newb.nobj = 0
+ if i == 0 {
+ b = newb
+ } else {
+ putempty(newb)
+ }
+ }
}
return b
}
// putempty puts a workbuf onto the work.empty list.
-// Upon entry this go routine owns b. The lfstackpush relinquishes ownership.
+// Upon entry this go routine owns b. The lfstack.push relinquishes ownership.
//go:nowritebarrier
func putempty(b *workbuf) {
b.checkempty()
- lfstackpush(&work.empty, &b.node)
+ work.empty.push(&b.node)
}
// putfull puts the workbuf on the work.full list for the GC.
@@ -352,14 +384,14 @@ func putempty(b *workbuf) {
//go:nowritebarrier
func putfull(b *workbuf) {
b.checknonempty()
- lfstackpush(&work.full, &b.node)
+ work.full.push(&b.node)
}
// trygetfull tries to get a full or partially empty workbuffer.
// If one is not immediately available return nil
//go:nowritebarrier
func trygetfull() *workbuf {
- b := (*workbuf)(lfstackpop(&work.full))
+ b := (*workbuf)(work.full.pop())
if b != nil {
b.checknonempty()
return b
@@ -380,7 +412,7 @@ func trygetfull() *workbuf {
// phase.
//go:nowritebarrier
func getfull() *workbuf {
- b := (*workbuf)(lfstackpop(&work.full))
+ b := (*workbuf)(work.full.pop())
if b != nil {
b.checknonempty()
return b
@@ -398,7 +430,7 @@ func getfull() *workbuf {
println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
throw("work.nwait > work.nproc")
}
- b = (*workbuf)(lfstackpop(&work.full))
+ b = (*workbuf)(work.full.pop())
if b != nil {
b.checknonempty()
return b
@@ -412,15 +444,11 @@ func getfull() *workbuf {
if work.nwait == work.nproc && work.markrootNext >= work.markrootJobs {
return nil
}
- _g_ := getg()
if i < 10 {
- _g_.m.gcstats.nprocyield++
procyield(20)
} else if i < 20 {
- _g_.m.gcstats.nosyield++
osyield()
} else {
- _g_.m.gcstats.nsleep++
usleep(100)
}
}
@@ -434,11 +462,49 @@ func handoff(b *workbuf) *workbuf {
b.nobj -= n
b1.nobj = n
memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
- _g_ := getg()
- _g_.m.gcstats.nhandoff++
- _g_.m.gcstats.nhandoffcnt += uint64(n)
// Put b on full list - let first half of b get stolen.
putfull(b)
return b1
}
+
+// prepareFreeWorkbufs moves busy workbuf spans to free list so they
+// can be freed to the heap. This must only be called when all
+// workbufs are on the empty list.
+func prepareFreeWorkbufs() {
+ lock(&work.wbufSpans.lock)
+ if work.full != 0 {
+ throw("cannot free workbufs when work.full != 0")
+ }
+ // Since all workbufs are on the empty list, we don't care
+ // which ones are in which spans. We can wipe the entire empty
+ // list and move all workbuf spans to the free list.
+ work.empty = 0
+ work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
+ unlock(&work.wbufSpans.lock)
+}
+
+// freeSomeWbufs frees some workbufs back to the heap and returns
+// true if it should be called again to free more.
+func freeSomeWbufs(preemptible bool) bool {
+ const batchSize = 64 // ~1â2 µs per span.
+ lock(&work.wbufSpans.lock)
+ if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
+ unlock(&work.wbufSpans.lock)
+ return false
+ }
+ systemstack(func() {
+ gp := getg().m.curg
+ for i := 0; i < batchSize && !(preemptible && gp.preempt); i++ {
+ span := work.wbufSpans.free.first
+ if span == nil {
+ break
+ }
+ work.wbufSpans.free.remove(span)
+ mheap_.freeManual(span, &memstats.gc_sys)
+ }
+ })
+ more := !work.wbufSpans.free.isEmpty()
+ unlock(&work.wbufSpans.lock)
+ return more
+}
diff --git a/libgo/go/runtime/mheap.go b/libgo/go/runtime/mheap.go
index 72627485b87..8749f971065 100644
--- a/libgo/go/runtime/mheap.go
+++ b/libgo/go/runtime/mheap.go
@@ -29,12 +29,13 @@ const minPhysPageSize = 4096
//go:notinheap
type mheap struct {
lock mutex
- free [_MaxMHeapList]mSpanList // free lists of given length
- freelarge mSpanList // free lists length >= _MaxMHeapList
- busy [_MaxMHeapList]mSpanList // busy lists of large objects of given length
- busylarge mSpanList // busy lists of large objects length >= _MaxMHeapList
+ free [_MaxMHeapList]mSpanList // free lists of given length up to _MaxMHeapList
+ freelarge mTreap // free treap of length >= _MaxMHeapList
+ busy [_MaxMHeapList]mSpanList // busy lists of large spans of given length
+ busylarge mSpanList // busy lists of large spans length >= _MaxMHeapList
sweepgen uint32 // sweep generation, see comment in mspan
sweepdone uint32 // all spans are swept
+ sweepers uint32 // number of active sweepone calls
// allspans is a slice of all mspans ever created. Each mspan
// appears exactly once.
@@ -74,37 +75,82 @@ type mheap struct {
_ uint32 // align uint64 fields on 32-bit for atomics
// Proportional sweep
- pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock
- spanBytesAlloc uint64 // bytes of spans allocated this cycle; updated atomically
- pagesSwept uint64 // pages swept this cycle; updated atomically
- sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
+ //
+ // These parameters represent a linear function from heap_live
+ // to page sweep count. The proportional sweep system works to
+ // stay in the black by keeping the current page sweep count
+ // above this line at the current heap_live.
+ //
+ // The line has slope sweepPagesPerByte and passes through a
+ // basis point at (sweepHeapLiveBasis, pagesSweptBasis). At
+ // any given time, the system is at (memstats.heap_live,
+ // pagesSwept) in this space.
+ //
+ // It's important that the line pass through a point we
+ // control rather than simply starting at a (0,0) origin
+ // because that lets us adjust sweep pacing at any time while
+ // accounting for current progress. If we could only adjust
+ // the slope, it would create a discontinuity in debt if any
+ // progress has already been made.
+ pagesInUse uint64 // pages of spans in stats _MSpanInUse; R/W with mheap.lock
+ pagesSwept uint64 // pages swept this cycle; updated atomically
+ pagesSweptBasis uint64 // pagesSwept to use as the origin of the sweep ratio; updated atomically
+ sweepHeapLiveBasis uint64 // value of heap_live to use as the origin of sweep ratio; written with lock, read without
+ sweepPagesPerByte float64 // proportional sweep ratio; written with lock, read without
// TODO(austin): pagesInUse should be a uintptr, but the 386
// compiler can't 8-byte align fields.
// Malloc stats.
- largefree uint64 // bytes freed for large objects (>maxsmallsize)
- nlargefree uint64 // number of frees for large objects (>maxsmallsize)
- nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
+ largealloc uint64 // bytes allocated for large objects
+ nlargealloc uint64 // number of large object allocations
+ largefree uint64 // bytes freed for large objects (>maxsmallsize)
+ nlargefree uint64 // number of frees for large objects (>maxsmallsize)
+ nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
// range of addresses we might see in the heap
- bitmap uintptr // Points to one byte past the end of the bitmap
- bitmap_mapped uintptr
- arena_start uintptr
- arena_used uintptr // always mHeap_Map{Bits,Spans} before updating
- arena_end uintptr
+ bitmap uintptr // Points to one byte past the end of the bitmap
+ bitmap_mapped uintptr
+
+ // The arena_* fields indicate the addresses of the Go heap.
+ //
+ // The maximum range of the Go heap is
+ // [arena_start, arena_start+_MaxMem+1).
+ //
+ // The range of the current Go heap is
+ // [arena_start, arena_used). Parts of this range may not be
+ // mapped, but the metadata structures are always mapped for
+ // the full range.
+ arena_start uintptr
+ arena_used uintptr // Set with setArenaUsed.
+
+ // The heap is grown using a linear allocator that allocates
+ // from the block [arena_alloc, arena_end). arena_alloc is
+ // often, but *not always* equal to arena_used.
+ arena_alloc uintptr
+ arena_end uintptr
+
+ // arena_reserved indicates that the memory [arena_alloc,
+ // arena_end) is reserved (e.g., mapped PROT_NONE). If this is
+ // false, we have to be careful not to clobber existing
+ // mappings here. If this is true, then we own the mapping
+ // here and *must* clobber it to use it.
arena_reserved bool
+ _ uint32 // ensure 64-bit alignment
+
// central free lists for small size classes.
// the padding makes sure that the MCentrals are
// spaced CacheLineSize bytes apart, so that each MCentral.lock
// gets its own cache line.
- central [_NumSizeClasses]struct {
+ // central is indexed by spanClass.
+ central [numSpanClasses]struct {
mcentral mcentral
- pad [sys.CacheLineSize]byte
+ pad [sys.CacheLineSize - unsafe.Sizeof(mcentral{})%sys.CacheLineSize]byte
}
spanalloc fixalloc // allocator for span*
cachealloc fixalloc // allocator for mcache*
+ treapalloc fixalloc // allocator for treapNodes* used by large objects
specialfinalizeralloc fixalloc // allocator for specialfinalizer*
specialprofilealloc fixalloc // allocator for specialprofile*
speciallock mutex // lock for special record allocators.
@@ -117,7 +163,7 @@ var mheap_ mheap
// When a MSpan is in the heap free list, state == MSpanFree
// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
//
-// When a MSpan is allocated, state == MSpanInUse or MSpanStack
+// When a MSpan is allocated, state == MSpanInUse or MSpanManual
// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
// Every MSpan is in one doubly-linked list,
@@ -125,25 +171,25 @@ var mheap_ mheap
// MCentral's span lists.
// An MSpan representing actual memory has state _MSpanInUse,
-// _MSpanStack, or _MSpanFree. Transitions between these states are
+// _MSpanManual, or _MSpanFree. Transitions between these states are
// constrained as follows:
//
-// * A span may transition from free to in-use or stack during any GC
+// * A span may transition from free to in-use or manual during any GC
// phase.
//
// * During sweeping (gcphase == _GCoff), a span may transition from
-// in-use to free (as a result of sweeping) or stack to free (as a
+// in-use to free (as a result of sweeping) or manual to free (as a
// result of stacks being freed).
//
// * During GC (gcphase != _GCoff), a span *must not* transition from
-// stack or in-use to free. Because concurrent GC may read a pointer
+// manual or in-use to free. Because concurrent GC may read a pointer
// and then look up its span, the span state must be monotonic.
type mSpanState uint8
const (
- _MSpanDead mSpanState = iota
- _MSpanInUse // allocated for garbage collected heap
- _MSpanStack // allocated for use by stack allocator
+ _MSpanDead mSpanState = iota
+ _MSpanInUse // allocated for garbage collected heap
+ _MSpanManual // allocated for manual management (e.g., stack allocator)
_MSpanFree
)
@@ -152,7 +198,7 @@ const (
var mSpanStateNames = []string{
"_MSpanDead",
"_MSpanInUse",
- "_MSpanStack",
+ "_MSpanManual",
"_MSpanFree",
}
@@ -170,15 +216,16 @@ type mspan struct {
prev *mspan // previous span in list, or nil if none
list *mSpanList // For debugging. TODO: Remove.
- startAddr uintptr // address of first byte of span aka s.base()
- npages uintptr // number of pages in span
- stackfreelist gclinkptr // list of free stacks, avoids overloading freelist
+ startAddr uintptr // address of first byte of span aka s.base()
+ npages uintptr // number of pages in span
+
+ manualFreeList gclinkptr // list of free objects in _MSpanManual spans
// freeindex is the slot index between 0 and nelems at which to begin scanning
// for the next free object in this span.
// Each allocation scans allocBits starting at freeindex until it encounters a 0
// indicating a free object. freeindex is then adjusted so that subsequent scans begin
- // just past the the newly discovered free object.
+ // just past the newly discovered free object.
//
// If freeindex == nelem, this span has no free objects.
//
@@ -224,8 +271,8 @@ type mspan struct {
// The sweep will free the old allocBits and set allocBits to the
// gcmarkBits. The gcmarkBits are replaced with a fresh zeroed
// out memory.
- allocBits *uint8
- gcmarkBits *uint8
+ allocBits *gcBits
+ gcmarkBits *gcBits
// sweep generation:
// if sweepgen == h->sweepgen - 2, the span needs sweeping
@@ -236,8 +283,8 @@ type mspan struct {
sweepgen uint32
divMul uint16 // for divide by elemsize - divMagic.mul
baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base
- allocCount uint16 // capacity - number of objects in freelist
- sizeclass uint8 // size class
+ allocCount uint16 // number of allocated objects
+ spanclass spanClass // size class and noscan (uint8)
incache bool // being used by an mcache
state mSpanState // mspaninuse etc
needzero uint8 // needs to be zeroed before allocation
@@ -292,8 +339,33 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
h.allspans = append(h.allspans, s)
}
+// A spanClass represents the size class and noscan-ness of a span.
+//
+// Each size class has a noscan spanClass and a scan spanClass. The
+// noscan spanClass contains only noscan objects, which do not contain
+// pointers and thus do not need to be scanned by the garbage
+// collector.
+type spanClass uint8
+
+const (
+ numSpanClasses = _NumSizeClasses << 1
+ tinySpanClass = spanClass(tinySizeClass<<1 | 1)
+)
+
+func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
+ return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
+}
+
+func (sc spanClass) sizeclass() int8 {
+ return int8(sc >> 1)
+}
+
+func (sc spanClass) noscan() bool {
+ return sc&1 != 0
+}
+
// inheap reports whether b is a pointer into a (potentially dead) heap object.
-// It returns false for pointers into stack spans.
+// It returns false for pointers into _MSpanManual spans.
// Non-preemptible because it is used by write barriers.
//go:nowritebarrier
//go:nosplit
@@ -309,7 +381,9 @@ func inheap(b uintptr) bool {
return true
}
-// inHeapOrStack is a variant of inheap that returns true for pointers into stack spans.
+// inHeapOrStack is a variant of inheap that returns true for pointers
+// into any allocated heap span.
+//
//go:nowritebarrier
//go:nosplit
func inHeapOrStack(b uintptr) bool {
@@ -322,10 +396,8 @@ func inHeapOrStack(b uintptr) bool {
return false
}
switch s.state {
- case mSpanInUse:
+ case mSpanInUse, _MSpanManual:
return b < s.limit
- case _MSpanStack:
- return b < s.base()+s.npages<<_PageShift
default:
return false
}
@@ -376,7 +448,7 @@ func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
}
p := s.base()
- if s.sizeclass == 0 {
+ if s.spanclass.sizeclass() == 0 {
// Large object.
if base != nil {
*base = p
@@ -401,6 +473,7 @@ func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
// Initialize the heap.
func (h *mheap) init(spansStart, spansBytes uintptr) {
+ h.treapalloc.init(unsafe.Sizeof(treapNode{}), nil, nil, &memstats.other_sys)
h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
@@ -421,26 +494,51 @@ func (h *mheap) init(spansStart, spansBytes uintptr) {
h.busy[i].init()
}
- h.freelarge.init()
h.busylarge.init()
for i := range h.central {
- h.central[i].mcentral.init(int32(i))
+ h.central[i].mcentral.init(spanClass(i))
}
sp := (*slice)(unsafe.Pointer(&h.spans))
sp.array = unsafe.Pointer(spansStart)
sp.len = 0
sp.cap = int(spansBytes / sys.PtrSize)
+
+ // Map metadata structures. But don't map race detector memory
+ // since we're not actually growing the arena here (and TSAN
+ // gets mad if you map 0 bytes).
+ h.setArenaUsed(h.arena_used, false)
}
-// mHeap_MapSpans makes sure that the spans are mapped
+// setArenaUsed extends the usable arena to address arena_used and
+// maps auxiliary VM regions for any newly usable arena space.
+//
+// racemap indicates that this memory should be managed by the race
+// detector. racemap should be true unless this is covering a VM hole.
+func (h *mheap) setArenaUsed(arena_used uintptr, racemap bool) {
+ // Map auxiliary structures *before* h.arena_used is updated.
+ // Waiting to update arena_used until after the memory has been mapped
+ // avoids faults when other threads try access these regions immediately
+ // after observing the change to arena_used.
+
+ // Map the bitmap.
+ h.mapBits(arena_used)
+
+ // Map spans array.
+ h.mapSpans(arena_used)
+
+ // Tell the race detector about the new heap memory.
+ if racemap && raceenabled {
+ racemapshadow(unsafe.Pointer(h.arena_used), arena_used-h.arena_used)
+ }
+
+ h.arena_used = arena_used
+}
+
+// mapSpans makes sure that the spans are mapped
// up to the new value of arena_used.
//
-// It must be called with the expected new value of arena_used,
-// *before* h.arena_used has been updated.
-// Waiting to update arena_used until after the memory has been mapped
-// avoids faults when other threads try access the bitmap immediately
-// after observing the change to arena_used.
+// Don't call this directly. Call mheap.setArenaUsed.
func (h *mheap) mapSpans(arena_used uintptr) {
// Map spans array, PageSize at a time.
n := arena_used
@@ -466,7 +564,7 @@ retry:
if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
list.remove(s)
// swept spans are at the end of the list
- list.insertBack(s)
+ list.insertBack(s) // Puts it back on a busy list. s is not in the treap at this point.
unlock(&h.lock)
snpages := s.npages
if s.sweep(false) {
@@ -533,7 +631,7 @@ func (h *mheap) reclaim(npage uintptr) {
// Allocate a new span of npage pages from the heap for GC'd memory
// and record its size class in the HeapMap and HeapMapCache.
-func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
+func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
_g_ := getg()
lock(&h.lock)
@@ -547,7 +645,13 @@ func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
// If GC kept a bit for whether there were any marks
// in a span, we could release these free spans
// at the end of GC and eliminate this entirely.
+ if trace.enabled {
+ traceGCSweepStart()
+ }
h.reclaim(npage)
+ if trace.enabled {
+ traceGCSweepDone()
+ }
}
// transfer stats from cache to global
@@ -556,7 +660,7 @@ func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
_g_.m.mcache.local_tinyallocs = 0
- s := h.allocSpanLocked(npage)
+ s := h.allocSpanLocked(npage, &memstats.heap_inuse)
if s != nil {
// Record span info, because gc needs to be
// able to map interior pointer to containing span.
@@ -564,8 +668,8 @@ func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list.
s.state = _MSpanInUse
s.allocCount = 0
- s.sizeclass = uint8(sizeclass)
- if sizeclass == 0 {
+ s.spanclass = spanclass
+ if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
s.elemsize = s.npages << _PageShift
s.divShift = 0
s.divMul = 0
@@ -584,9 +688,11 @@ func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
h.pagesInUse += uint64(npage)
if large {
memstats.heap_objects++
+ mheap_.largealloc += uint64(s.elemsize)
+ mheap_.nlargealloc++
atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift))
// Swept spans are at the end of lists.
- if s.npages < uintptr(len(h.free)) {
+ if s.npages < uintptr(len(h.busy)) {
h.busy[s.npages].insertBack(s)
} else {
h.busylarge.insertBack(s)
@@ -615,13 +721,13 @@ func (h *mheap) alloc_m(npage uintptr, sizeclass int32, large bool) *mspan {
return s
}
-func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool) *mspan {
+func (h *mheap) alloc(npage uintptr, spanclass spanClass, large bool, needzero bool) *mspan {
// Don't do any operations that lock the heap on the G stack.
// It might trigger stack growth, and the stack growth code needs
// to be able to allocate heap.
var s *mspan
systemstack(func() {
- s = h.alloc_m(npage, sizeclass, large)
+ s = h.alloc_m(npage, spanclass, large)
})
if s != nil {
@@ -633,29 +739,46 @@ func (h *mheap) alloc(npage uintptr, sizeclass int32, large bool, needzero bool)
return s
}
-func (h *mheap) allocStack(npage uintptr) *mspan {
- _g_ := getg()
- if _g_ != _g_.m.g0 {
- throw("mheap_allocstack not on g0 stack")
- }
+// allocManual allocates a manually-managed span of npage pages.
+// allocManual returns nil if allocation fails.
+//
+// allocManual adds the bytes used to *stat, which should be a
+// memstats in-use field. Unlike allocations in the GC'd heap, the
+// allocation does *not* count toward heap_inuse or heap_sys.
+//
+// The memory backing the returned span may not be zeroed if
+// span.needzero is set.
+//
+// allocManual must be called on the system stack to prevent stack
+// growth. Since this is used by the stack allocator, stack growth
+// during allocManual would self-deadlock.
+//
+//go:systemstack
+func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
lock(&h.lock)
- s := h.allocSpanLocked(npage)
+ s := h.allocSpanLocked(npage, stat)
if s != nil {
- s.state = _MSpanStack
- s.stackfreelist = 0
+ s.state = _MSpanManual
+ s.manualFreeList = 0
s.allocCount = 0
- memstats.stacks_inuse += uint64(s.npages << _PageShift)
+ s.spanclass = 0
+ s.nelems = 0
+ s.elemsize = 0
+ s.limit = s.base() + s.npages<<_PageShift
+ // Manually manged memory doesn't count toward heap_sys.
+ memstats.heap_sys -= uint64(s.npages << _PageShift)
}
- // This unlock acts as a release barrier. See mHeap_Alloc_m.
+ // This unlock acts as a release barrier. See mheap.alloc_m.
unlock(&h.lock)
+
return s
}
// Allocates a span of the given size. h must be locked.
// The returned span has been removed from the
// free list, but its state is still MSpanFree.
-func (h *mheap) allocSpanLocked(npage uintptr) *mspan {
+func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
var list *mSpanList
var s *mspan
@@ -664,13 +787,12 @@ func (h *mheap) allocSpanLocked(npage uintptr) *mspan {
list = &h.free[i]
if !list.isEmpty() {
s = list.first
+ list.remove(s)
goto HaveSpan
}
}
-
// Best fit in list of large spans.
- list = &h.freelarge
- s = h.allocLarge(npage)
+ s = h.allocLarge(npage) // allocLarge removed s from h.freelarge for us
if s == nil {
if !h.grow(npage) {
return nil
@@ -689,10 +811,6 @@ HaveSpan:
if s.npages < npage {
throw("MHeap_AllocLocked - bad npages")
}
- list.remove(s)
- if s.inList() {
- throw("still in list")
- }
if s.npreleased > 0 {
sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
memstats.heap_released -= uint64(s.npreleased << _PageShift)
@@ -711,8 +829,8 @@ HaveSpan:
h.spans[p] = t
h.spans[p+t.npages-1] = t
t.needzero = s.needzero
- s.state = _MSpanStack // prevent coalescing with s
- t.state = _MSpanStack
+ s.state = _MSpanManual // prevent coalescing with s
+ t.state = _MSpanManual
h.freeSpanLocked(t, false, false, s.unusedsince)
s.state = _MSpanFree
}
@@ -723,7 +841,7 @@ HaveSpan:
h.spans[p+n] = s
}
- memstats.heap_inuse += uint64(npage << _PageShift)
+ *stat += uint64(npage << _PageShift)
memstats.heap_idle -= uint64(npage << _PageShift)
//println("spanalloc", hex(s.start<<_PageShift))
@@ -733,24 +851,19 @@ HaveSpan:
return s
}
-// Allocate a span of exactly npage pages from the list of large spans.
-func (h *mheap) allocLarge(npage uintptr) *mspan {
- return bestFit(&h.freelarge, npage, nil)
+// Large spans have a minimum size of 1MByte. The maximum number of large spans to support
+// 1TBytes is 1 million, experimentation using random sizes indicates that the depth of
+// the tree is less that 2x that of a perfectly balanced tree. For 1TByte can be referenced
+// by a perfectly balanced tree with a a depth of 20. Twice that is an acceptable 40.
+func (h *mheap) isLargeSpan(npages uintptr) bool {
+ return npages >= uintptr(len(h.free))
}
-// Search list for smallest span with >= npage pages.
-// If there are multiple smallest spans, take the one
-// with the earliest starting address.
-func bestFit(list *mSpanList, npage uintptr, best *mspan) *mspan {
- for s := list.first; s != nil; s = s.next {
- if s.npages < npage {
- continue
- }
- if best == nil || s.npages < best.npages || (s.npages == best.npages && s.base() < best.base()) {
- best = s
- }
- }
- return best
+// allocLarge allocates a span of at least npage pages from the treap of large spans.
+// Returns nil if no such span currently exists.
+func (h *mheap) allocLarge(npage uintptr) *mspan {
+ // Search treap for smallest span with >= npage pages.
+ return h.freelarge.remove(npage)
}
// Try to add at least npage pages of memory to the heap,
@@ -849,22 +962,30 @@ func (h *mheap) freeSpan(s *mspan, acct int32) {
})
}
-func (h *mheap) freeStack(s *mspan) {
- _g_ := getg()
- if _g_ != _g_.m.g0 {
- throw("mheap_freestack not on g0 stack")
- }
+// freeManual frees a manually-managed span returned by allocManual.
+// stat must be the same as the stat passed to the allocManual that
+// allocated s.
+//
+// This must only be called when gcphase == _GCoff. See mSpanState for
+// an explanation.
+//
+// freeManual must be called on the system stack to prevent stack
+// growth, just like allocManual.
+//
+//go:systemstack
+func (h *mheap) freeManual(s *mspan, stat *uint64) {
s.needzero = 1
lock(&h.lock)
- memstats.stacks_inuse -= uint64(s.npages << _PageShift)
- h.freeSpanLocked(s, true, true, 0)
+ *stat -= uint64(s.npages << _PageShift)
+ memstats.heap_sys += uint64(s.npages << _PageShift)
+ h.freeSpanLocked(s, false, true, 0)
unlock(&h.lock)
}
// s must be on a busy list (h.busy or h.busylarge) or unlinked.
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
switch s.state {
- case _MSpanStack:
+ case _MSpanManual:
if s.allocCount != 0 {
throw("MHeap_FreeSpanLocked - invalid stack free")
}
@@ -900,50 +1021,98 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
// Coalesce with earlier, later spans.
p := (s.base() - h.arena_start) >> _PageShift
if p > 0 {
- t := h.spans[p-1]
- if t != nil && t.state == _MSpanFree {
- s.startAddr = t.startAddr
- s.npages += t.npages
- s.npreleased = t.npreleased // absorb released pages
- s.needzero |= t.needzero
- p -= t.npages
+ before := h.spans[p-1]
+ if before != nil && before.state == _MSpanFree {
+ // Now adjust s.
+ s.startAddr = before.startAddr
+ s.npages += before.npages
+ s.npreleased = before.npreleased // absorb released pages
+ s.needzero |= before.needzero
+ p -= before.npages
h.spans[p] = s
- h.freeList(t.npages).remove(t)
- t.state = _MSpanDead
- h.spanalloc.free(unsafe.Pointer(t))
+ // The size is potentially changing so the treap needs to delete adjacent nodes and
+ // insert back as a combined node.
+ if h.isLargeSpan(before.npages) {
+ // We have a t, it is large so it has to be in the treap so we can remove it.
+ h.freelarge.removeSpan(before)
+ } else {
+ h.freeList(before.npages).remove(before)
+ }
+ before.state = _MSpanDead
+ h.spanalloc.free(unsafe.Pointer(before))
}
}
+
+ // Now check to see if next (greater addresses) span is free and can be coalesced.
if (p + s.npages) < uintptr(len(h.spans)) {
- t := h.spans[p+s.npages]
- if t != nil && t.state == _MSpanFree {
- s.npages += t.npages
- s.npreleased += t.npreleased
- s.needzero |= t.needzero
+ after := h.spans[p+s.npages]
+ if after != nil && after.state == _MSpanFree {
+ s.npages += after.npages
+ s.npreleased += after.npreleased
+ s.needzero |= after.needzero
h.spans[p+s.npages-1] = s
- h.freeList(t.npages).remove(t)
- t.state = _MSpanDead
- h.spanalloc.free(unsafe.Pointer(t))
+ if h.isLargeSpan(after.npages) {
+ h.freelarge.removeSpan(after)
+ } else {
+ h.freeList(after.npages).remove(after)
+ }
+ after.state = _MSpanDead
+ h.spanalloc.free(unsafe.Pointer(after))
}
}
- // Insert s into appropriate list.
- h.freeList(s.npages).insert(s)
+ // Insert s into appropriate list or treap.
+ if h.isLargeSpan(s.npages) {
+ h.freelarge.insert(s)
+ } else {
+ h.freeList(s.npages).insert(s)
+ }
}
func (h *mheap) freeList(npages uintptr) *mSpanList {
- if npages < uintptr(len(h.free)) {
- return &h.free[npages]
- }
- return &h.freelarge
+ return &h.free[npages]
}
func (h *mheap) busyList(npages uintptr) *mSpanList {
- if npages < uintptr(len(h.free)) {
+ if npages < uintptr(len(h.busy)) {
return &h.busy[npages]
}
return &h.busylarge
}
+func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr {
+ s := t.spanKey
+ var sumreleased uintptr
+ if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
+ start := s.base()
+ end := start + s.npages<<_PageShift
+ if physPageSize > _PageSize {
+ // We can only release pages in
+ // physPageSize blocks, so round start
+ // and end in. (Otherwise, madvise
+ // will round them *out* and release
+ // more memory than we want.)
+ start = (start + physPageSize - 1) &^ (physPageSize - 1)
+ end &^= physPageSize - 1
+ if end <= start {
+ // start and end don't span a
+ // whole physical page.
+ return sumreleased
+ }
+ }
+ len := end - start
+ released := len - (s.npreleased << _PageShift)
+ if physPageSize > _PageSize && released == 0 {
+ return sumreleased
+ }
+ memstats.heap_released += uint64(released)
+ sumreleased += released
+ s.npreleased = len >> _PageShift
+ sysUnused(unsafe.Pointer(start), len)
+ }
+ return sumreleased
+}
+
func scavengelist(list *mSpanList, now, limit uint64) uintptr {
if list.isEmpty() {
return 0
@@ -984,27 +1153,31 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
}
func (h *mheap) scavenge(k int32, now, limit uint64) {
+ // Disallow malloc or panic while holding the heap lock. We do
+ // this here because this is an non-mallocgc entry-point to
+ // the mheap API.
+ gp := getg()
+ gp.m.mallocing++
lock(&h.lock)
var sumreleased uintptr
for i := 0; i < len(h.free); i++ {
sumreleased += scavengelist(&h.free[i], now, limit)
}
- sumreleased += scavengelist(&h.freelarge, now, limit)
+ sumreleased += scavengetreap(h.freelarge.treap, now, limit)
unlock(&h.lock)
+ gp.m.mallocing--
if debug.gctrace > 0 {
if sumreleased > 0 {
print("scvg", k, ": ", sumreleased>>20, " MB released\n")
}
- // TODO(dvyukov): these stats are incorrect as we don't subtract stack usage from heap.
- // But we can't call ReadMemStats on g0 holding locks.
print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
}
}
//go:linkname runtime_debug_freeOSMemory runtime_debug.freeOSMemory
func runtime_debug_freeOSMemory() {
- gcStart(gcForceBlockMode, false)
+ GC()
systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) })
}
@@ -1017,7 +1190,7 @@ func (span *mspan) init(base uintptr, npages uintptr) {
span.startAddr = base
span.npages = npages
span.allocCount = 0
- span.sizeclass = 0
+ span.spanclass = 0
span.incache = false
span.elemsize = 0
span.state = _MSpanDead
@@ -1043,7 +1216,8 @@ func (list *mSpanList) init() {
func (list *mSpanList) remove(span *mspan) {
if span.list != list {
- println("runtime: failed MSpanList_Remove", span, span.prev, span.list, list)
+ print("runtime: failed MSpanList_Remove span.npages=", span.npages,
+ " span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n")
throw("MSpanList_Remove")
}
if list.first == span {
@@ -1085,7 +1259,7 @@ func (list *mSpanList) insert(span *mspan) {
func (list *mSpanList) insertBack(span *mspan) {
if span.next != nil || span.prev != nil || span.list != nil {
- println("failed MSpanList_InsertBack", span, span.next, span.prev, span.list)
+ println("runtime: failed MSpanList_InsertBack", span, span.next, span.prev, span.list)
throw("MSpanList_InsertBack")
}
span.prev = list.last
@@ -1100,6 +1274,31 @@ func (list *mSpanList) insertBack(span *mspan) {
span.list = list
}
+// takeAll removes all spans from other and inserts them at the front
+// of list.
+func (list *mSpanList) takeAll(other *mSpanList) {
+ if other.isEmpty() {
+ return
+ }
+
+ // Reparent everything in other to list.
+ for s := other.first; s != nil; s = s.next {
+ s.list = list
+ }
+
+ // Concatenate the lists.
+ if list.isEmpty() {
+ *list = *other
+ } else {
+ // Neither list is empty. Put other before list.
+ other.last.next = list.first
+ list.first.prev = other.last
+ list.first = other.first
+ }
+
+ other.first, other.last = nil, nil
+}
+
const (
_KindSpecialFinalizer = 1
_KindSpecialProfile = 2
@@ -1311,6 +1510,22 @@ func freespecial(s *special, p unsafe.Pointer, size uintptr) {
}
}
+// gcBits is an alloc/mark bitmap. This is always used as *gcBits.
+//
+//go:notinheap
+type gcBits uint8
+
+// bytep returns a pointer to the n'th byte of b.
+func (b *gcBits) bytep(n uintptr) *uint8 {
+ return addb((*uint8)(b), n)
+}
+
+// bitp returns a pointer to the byte containing bit n and a mask for
+// selecting that bit from *bytep.
+func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) {
+ return b.bytep(n / 8), 1 << (n % 8)
+}
+
const gcBitsChunkBytes = uintptr(64 << 10)
const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
@@ -1320,42 +1535,87 @@ type gcBitsHeader struct {
}
//go:notinheap
-type gcBits struct {
+type gcBitsArena struct {
// gcBitsHeader // side step recursive type bug (issue 14620) by including fields by hand.
- free uintptr // free is the index into bits of the next free byte.
- next *gcBits
- bits [gcBitsChunkBytes - gcBitsHeaderBytes]uint8
+ free uintptr // free is the index into bits of the next free byte; read/write atomically
+ next *gcBitsArena
+ bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
}
var gcBitsArenas struct {
lock mutex
- free *gcBits
- next *gcBits
- current *gcBits
- previous *gcBits
+ free *gcBitsArena
+ next *gcBitsArena // Read atomically. Write atomically under lock.
+ current *gcBitsArena
+ previous *gcBitsArena
+}
+
+// tryAlloc allocates from b or returns nil if b does not have enough room.
+// This is safe to call concurrently.
+func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits {
+ if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) {
+ return nil
+ }
+ // Try to allocate from this block.
+ end := atomic.Xadduintptr(&b.free, bytes)
+ if end > uintptr(len(b.bits)) {
+ return nil
+ }
+ // There was enough room.
+ start := end - bytes
+ return &b.bits[start]
}
// newMarkBits returns a pointer to 8 byte aligned bytes
// to be used for a span's mark bits.
-func newMarkBits(nelems uintptr) *uint8 {
- lock(&gcBitsArenas.lock)
+func newMarkBits(nelems uintptr) *gcBits {
blocksNeeded := uintptr((nelems + 63) / 64)
bytesNeeded := blocksNeeded * 8
- if gcBitsArenas.next == nil ||
- gcBitsArenas.next.free+bytesNeeded > uintptr(len(gcBits{}.bits)) {
- // Allocate a new arena.
- fresh := newArena()
- fresh.next = gcBitsArenas.next
- gcBitsArenas.next = fresh
- }
- if gcBitsArenas.next.free >= gcBitsChunkBytes {
- println("runtime: gcBitsArenas.next.free=", gcBitsArenas.next.free, gcBitsChunkBytes)
+
+ // Try directly allocating from the current head arena.
+ head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next)))
+ if p := head.tryAlloc(bytesNeeded); p != nil {
+ return p
+ }
+
+ // There's not enough room in the head arena. We may need to
+ // allocate a new arena.
+ lock(&gcBitsArenas.lock)
+ // Try the head arena again, since it may have changed. Now
+ // that we hold the lock, the list head can't change, but its
+ // free position still can.
+ if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
+ unlock(&gcBitsArenas.lock)
+ return p
+ }
+
+ // Allocate a new arena. This may temporarily drop the lock.
+ fresh := newArenaMayUnlock()
+ // If newArenaMayUnlock dropped the lock, another thread may
+ // have put a fresh arena on the "next" list. Try allocating
+ // from next again.
+ if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
+ // Put fresh back on the free list.
+ // TODO: Mark it "already zeroed"
+ fresh.next = gcBitsArenas.free
+ gcBitsArenas.free = fresh
+ unlock(&gcBitsArenas.lock)
+ return p
+ }
+
+ // Allocate from the fresh arena. We haven't linked it in yet, so
+ // this cannot race and is guaranteed to succeed.
+ p := fresh.tryAlloc(bytesNeeded)
+ if p == nil {
throw("markBits overflow")
}
- result := &gcBitsArenas.next.bits[gcBitsArenas.next.free]
- gcBitsArenas.next.free += bytesNeeded
+
+ // Add the fresh arena to the "next" list.
+ fresh.next = gcBitsArenas.next
+ atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh))
+
unlock(&gcBitsArenas.lock)
- return result
+ return p
}
// newAllocBits returns a pointer to 8 byte aligned bytes
@@ -1364,7 +1624,7 @@ func newMarkBits(nelems uintptr) *uint8 {
// allocation bits. For spans not being initialized the
// the mark bits are repurposed as allocation bits when
// the span is swept.
-func newAllocBits(nelems uintptr) *uint8 {
+func newAllocBits(nelems uintptr) *gcBits {
return newMarkBits(nelems)
}
@@ -1398,18 +1658,21 @@ func nextMarkBitArenaEpoch() {
}
gcBitsArenas.previous = gcBitsArenas.current
gcBitsArenas.current = gcBitsArenas.next
- gcBitsArenas.next = nil // newMarkBits calls newArena when needed
+ atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil) // newMarkBits calls newArena when needed
unlock(&gcBitsArenas.lock)
}
-// newArena allocates and zeroes a gcBits arena.
-func newArena() *gcBits {
- var result *gcBits
+// newArenaMayUnlock allocates and zeroes a gcBits arena.
+// The caller must hold gcBitsArena.lock. This may temporarily release it.
+func newArenaMayUnlock() *gcBitsArena {
+ var result *gcBitsArena
if gcBitsArenas.free == nil {
- result = (*gcBits)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys))
+ unlock(&gcBitsArenas.lock)
+ result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys))
if result == nil {
throw("runtime: cannot allocate memory")
}
+ lock(&gcBitsArenas.lock)
} else {
result = gcBitsArenas.free
gcBitsArenas.free = gcBitsArenas.free.next
@@ -1418,7 +1681,7 @@ func newArena() *gcBits {
result.next = nil
// If result.bits is not 8 byte aligned adjust index so
// that &result.bits[result.free] is 8 byte aligned.
- if uintptr(unsafe.Offsetof(gcBits{}.bits))&7 == 0 {
+ if uintptr(unsafe.Offsetof(gcBitsArena{}.bits))&7 == 0 {
result.free = 0
} else {
result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)
diff --git a/libgo/go/runtime/mksizeclasses.go b/libgo/go/runtime/mksizeclasses.go
index 0f897ba8e69..0cb2b33a8cd 100644
--- a/libgo/go/runtime/mksizeclasses.go
+++ b/libgo/go/runtime/mksizeclasses.go
@@ -48,7 +48,7 @@ func main() {
flag.Parse()
var b bytes.Buffer
- fmt.Fprintln(&b, "// AUTO-GENERATED by mksizeclasses.go; DO NOT EDIT")
+ fmt.Fprintln(&b, "// Code generated by mksizeclasses.go; DO NOT EDIT.")
fmt.Fprintln(&b, "//go:generate go run mksizeclasses.go")
fmt.Fprintln(&b)
fmt.Fprintln(&b, "package runtime")
diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go
index 87f84a72acd..f31c88c5cae 100644
--- a/libgo/go/runtime/mprof.go
+++ b/libgo/go/runtime/mprof.go
@@ -64,27 +64,70 @@ type memRecord struct {
// come only after a GC during concurrent sweeping. So if we would
// naively count them, we would get a skew toward mallocs.
//
- // Mallocs are accounted in recent stats.
- // Explicit frees are accounted in recent stats.
- // GC frees are accounted in prev stats.
- // After GC prev stats are added to final stats and
- // recent stats are moved into prev stats.
- allocs uintptr
- frees uintptr
- alloc_bytes uintptr
- free_bytes uintptr
-
- // changes between next-to-last GC and last GC
- prev_allocs uintptr
- prev_frees uintptr
- prev_alloc_bytes uintptr
- prev_free_bytes uintptr
-
- // changes since last GC
- recent_allocs uintptr
- recent_frees uintptr
- recent_alloc_bytes uintptr
- recent_free_bytes uintptr
+ // Hence, we delay information to get consistent snapshots as
+ // of mark termination. Allocations count toward the next mark
+ // termination's snapshot, while sweep frees count toward the
+ // previous mark termination's snapshot:
+ //
+ // MT MT MT MT
+ // .·| .·| .·| .·|
+ // .Â·Ë | .Â·Ë | .Â·Ë | .Â·Ë |
+ // .Â·Ë | .Â·Ë | .Â·Ë | .Â·Ë |
+ // .Â·Ë |.Â·Ë |.Â·Ë |.Â·Ë |
+ //
+ // alloc â â² â free
+ // â â
â
â
â
â
â
â
â
â
â
â
P
+ // C+2 â C+1 â C
+ //
+ // alloc â â² â free
+ // â â
â
â
â
â
â
â
â
â
â
â
P
+ // C+2 â C+1 â C
+ //
+ // Since we can't publish a consistent snapshot until all of
+ // the sweep frees are accounted for, we wait until the next
+ // mark termination ("MT" above) to publish the previous mark
+ // termination's snapshot ("P" above). To do this, allocation
+ // and free events are accounted to *future* heap profile
+ // cycles ("C+n" above) and we only publish a cycle once all
+ // of the events from that cycle must be done. Specifically:
+ //
+ // Mallocs are accounted to cycle C+2.
+ // Explicit frees are accounted to cycle C+2.
+ // GC frees (done during sweeping) are accounted to cycle C+1.
+ //
+ // After mark termination, we increment the global heap
+ // profile cycle counter and accumulate the stats from cycle C
+ // into the active profile.
+
+ // active is the currently published profile. A profiling
+ // cycle can be accumulated into active once its complete.
+ active memRecordCycle
+
+ // future records the profile events we're counting for cycles
+ // that have not yet been published. This is ring buffer
+ // indexed by the global heap profile cycle C and stores
+ // cycles C, C+1, and C+2. Unlike active, these counts are
+ // only for a single cycle; they are not cumulative across
+ // cycles.
+ //
+ // We store cycle C here because there's a window between when
+ // C becomes the active cycle and when we've flushed it to
+ // active.
+ future [3]memRecordCycle
+}
+
+// memRecordCycle
+type memRecordCycle struct {
+ allocs, frees uintptr
+ alloc_bytes, free_bytes uintptr
+}
+
+// add accumulates b into a. It does not zero b.
+func (a *memRecordCycle) add(b *memRecordCycle) {
+ a.allocs += b.allocs
+ a.frees += b.frees
+ a.alloc_bytes += b.alloc_bytes
+ a.free_bytes += b.free_bytes
}
// A blockRecord is the bucket data for a bucket of type blockProfile,
@@ -100,8 +143,21 @@ var (
xbuckets *bucket // mutex profile buckets
buckhash *[179999]*bucket
bucketmem uintptr
+
+ mProf struct {
+ // All fields in mProf are protected by proflock.
+
+ // cycle is the global heap profile cycle. This wraps
+ // at mProfCycleWrap.
+ cycle uint32
+ // flushed indicates that future[cycle] in all buckets
+ // has been flushed to the active profile.
+ flushed bool
+ }
)
+const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
+
// newBucket allocates a bucket with the given type and number of stack entries.
func newBucket(typ bucketType, nstk int) *bucket {
size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(location{})
@@ -212,30 +268,71 @@ func eqslice(x, y []location) bool {
return true
}
-func mprof_GC() {
+// mProf_NextCycle publishes the next heap profile cycle and creates a
+// fresh heap profile cycle. This operation is fast and can be done
+// during STW. The caller must call mProf_Flush before calling
+// mProf_NextCycle again.
+//
+// This is called by mark termination during STW so allocations and
+// frees after the world is started again count towards a new heap
+// profiling cycle.
+func mProf_NextCycle() {
+ lock(&proflock)
+ // We explicitly wrap mProf.cycle rather than depending on
+ // uint wraparound because the memRecord.future ring does not
+ // itself wrap at a power of two.
+ mProf.cycle = (mProf.cycle + 1) % mProfCycleWrap
+ mProf.flushed = false
+ unlock(&proflock)
+}
+
+// mProf_Flush flushes the events from the current heap profiling
+// cycle into the active profile. After this it is safe to start a new
+// heap profiling cycle with mProf_NextCycle.
+//
+// This is called by GC after mark termination starts the world. In
+// contrast with mProf_NextCycle, this is somewhat expensive, but safe
+// to do concurrently.
+func mProf_Flush() {
+ lock(&proflock)
+ if !mProf.flushed {
+ mProf_FlushLocked()
+ mProf.flushed = true
+ }
+ unlock(&proflock)
+}
+
+func mProf_FlushLocked() {
+ c := mProf.cycle
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
- mp.allocs += mp.prev_allocs
- mp.frees += mp.prev_frees
- mp.alloc_bytes += mp.prev_alloc_bytes
- mp.free_bytes += mp.prev_free_bytes
- mp.prev_allocs = mp.recent_allocs
- mp.prev_frees = mp.recent_frees
- mp.prev_alloc_bytes = mp.recent_alloc_bytes
- mp.prev_free_bytes = mp.recent_free_bytes
-
- mp.recent_allocs = 0
- mp.recent_frees = 0
- mp.recent_alloc_bytes = 0
- mp.recent_free_bytes = 0
+ // Flush cycle C into the published profile and clear
+ // it for reuse.
+ mpc := &mp.future[c%uint32(len(mp.future))]
+ mp.active.add(mpc)
+ *mpc = memRecordCycle{}
}
}
-// Record that a gc just happened: all the 'recent' statistics are now real.
-func mProf_GC() {
+// mProf_PostSweep records that all sweep frees for this GC cycle have
+// completed. This has the effect of publishing the heap profile
+// snapshot as of the last mark termination without advancing the heap
+// profile cycle.
+func mProf_PostSweep() {
lock(&proflock)
- mprof_GC()
+ // Flush cycle C+1 to the active profile so everything as of
+ // the last mark termination becomes visible. *Don't* advance
+ // the cycle, since we're still accumulating allocs in cycle
+ // C+2, which have to become C+1 in the next mark termination
+ // and so on.
+ c := mProf.cycle
+ for b := mbuckets; b != nil; b = b.allnext {
+ mp := b.mp()
+ mpc := &mp.future[(c+1)%uint32(len(mp.future))]
+ mp.active.add(mpc)
+ *mpc = memRecordCycle{}
+ }
unlock(&proflock)
}
@@ -245,9 +342,11 @@ func mProf_Malloc(p unsafe.Pointer, size uintptr) {
nstk := callers(4, stk[:])
lock(&proflock)
b := stkbucket(memProfile, size, stk[:nstk], true)
+ c := mProf.cycle
mp := b.mp()
- mp.recent_allocs++
- mp.recent_alloc_bytes += size
+ mpc := &mp.future[(c+2)%uint32(len(mp.future))]
+ mpc.allocs++
+ mpc.alloc_bytes += size
unlock(&proflock)
// Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
@@ -262,9 +361,11 @@ func mProf_Malloc(p unsafe.Pointer, size uintptr) {
// Called when freeing a profiled block.
func mProf_Free(b *bucket, size uintptr) {
lock(&proflock)
+ c := mProf.cycle
mp := b.mp()
- mp.prev_frees++
- mp.prev_free_bytes += size
+ mpc := &mp.future[(c+1)%uint32(len(mp.future))]
+ mpc.frees++
+ mpc.free_bytes += size
unlock(&proflock)
}
@@ -298,7 +399,7 @@ func blockevent(cycles int64, skip int) {
cycles = 1
}
if blocksampled(cycles) {
- saveblockevent(cycles, skip+1, blockProfile, &blockprofilerate)
+ saveblockevent(cycles, skip+1, blockProfile)
}
}
@@ -310,7 +411,7 @@ func blocksampled(cycles int64) bool {
return true
}
-func saveblockevent(cycles int64, skip int, which bucketType, ratep *uint64) {
+func saveblockevent(cycles int64, skip int, which bucketType) {
gp := getg()
var nstk int
var stk [maxStack]location
@@ -355,7 +456,7 @@ func mutexevent(cycles int64, skip int) {
// TODO(pjw): measure impact of always calling fastrand vs using something
// like malloc.go:nextSample()
if rate > 0 && int64(fastrand())%rate == 0 {
- saveblockevent(cycles, skip+1, mutexProfile, &mutexprofilerate)
+ saveblockevent(cycles, skip+1, mutexProfile)
}
}
@@ -443,13 +544,17 @@ func (r *MemProfileRecord) Stack() []uintptr {
// of calling MemProfile directly.
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
lock(&proflock)
+ // If we're between mProf_NextCycle and mProf_Flush, take care
+ // of flushing to the active profile so we only have to look
+ // at the active profile below.
+ mProf_FlushLocked()
clear := true
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
- if inuseZero || mp.alloc_bytes != mp.free_bytes {
+ if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
n++
}
- if mp.allocs != 0 || mp.frees != 0 {
+ if mp.active.allocs != 0 || mp.active.frees != 0 {
clear = false
}
}
@@ -457,13 +562,15 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
// Absolutely no data, suggesting that a garbage collection
// has not yet happened. In order to allow profiling when
// garbage collection is disabled from the beginning of execution,
- // accumulate stats as if a GC just happened, and recount buckets.
- mprof_GC()
- mprof_GC()
+ // accumulate all of the cycles, and recount buckets.
n = 0
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
- if inuseZero || mp.alloc_bytes != mp.free_bytes {
+ for c := range mp.future {
+ mp.active.add(&mp.future[c])
+ mp.future[c] = memRecordCycle{}
+ }
+ if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
n++
}
}
@@ -473,7 +580,7 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
idx := 0
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
- if inuseZero || mp.alloc_bytes != mp.free_bytes {
+ if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
record(&p[idx], b)
idx++
}
@@ -486,10 +593,10 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
// Write b's data to r.
func record(r *MemProfileRecord, b *bucket) {
mp := b.mp()
- r.AllocBytes = int64(mp.alloc_bytes)
- r.FreeBytes = int64(mp.free_bytes)
- r.AllocObjects = int64(mp.allocs)
- r.FreeObjects = int64(mp.frees)
+ r.AllocBytes = int64(mp.active.alloc_bytes)
+ r.FreeBytes = int64(mp.active.free_bytes)
+ r.AllocObjects = int64(mp.active.allocs)
+ r.FreeObjects = int64(mp.active.frees)
for i, loc := range b.stk() {
if i >= len(r.Stack0) {
break
@@ -505,7 +612,7 @@ func iterate_memprof(fn func(*bucket, uintptr, *location, uintptr, uintptr, uint
lock(&proflock)
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
- fn(b, b.nstk, &b.stk()[0], b.size, mp.allocs, mp.frees)
+ fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
}
unlock(&proflock)
}
diff --git a/libgo/go/runtime/msize.go b/libgo/go/runtime/msize.go
index 438c9875135..0accb83eb89 100644
--- a/libgo/go/runtime/msize.go
+++ b/libgo/go/runtime/msize.go
@@ -9,28 +9,6 @@
package runtime
-// sizeToClass(0 <= n <= MaxSmallSize) returns the size class,
-// 1 <= sizeclass < NumSizeClasses, for n.
-// Size class 0 is reserved to mean "not small".
-//
-// The sizeToClass lookup is implemented using two arrays,
-// one mapping sizes <= 1024 to their class and one mapping
-// sizes >= 1024 and <= MaxSmallSize to their class.
-// All objects are 8-aligned, so the first array is indexed by
-// the size divided by 8 (rounded up). Objects >= 1024 bytes
-// are 128-aligned, so the second array is indexed by the
-// size divided by 128 (rounded up). The arrays are constants
-// in sizeclass.go generated by mksizeclass.go.
-func sizeToClass(size uint32) uint32 {
- if size > _MaxSmallSize {
- throw("invalid size")
- }
- if size > smallSizeMax-8 {
- return uint32(size_to_class128[(size-smallSizeMax+largeSizeDiv-1)/largeSizeDiv])
- }
- return uint32(size_to_class8[(size+smallSizeDiv-1)/smallSizeDiv])
-}
-
// Returns size of the memory block that mallocgc will allocate if you ask for the size.
func roundupsize(size uintptr) uintptr {
if size < _MaxSmallSize {
diff --git a/libgo/go/runtime/mstats.go b/libgo/go/runtime/mstats.go
index aa3cfef0e10..71dc2239854 100644
--- a/libgo/go/runtime/mstats.go
+++ b/libgo/go/runtime/mstats.go
@@ -33,13 +33,12 @@ type mstats struct {
// Statistics about malloc heap.
// Protected by mheap.lock
//
- // In mstats, heap_sys and heap_inuse includes stack memory,
- // while in MemStats stack memory is separated out from the
- // heap stats.
+ // Like MemStats, heap_sys and heap_inuse do not count memory
+ // in manually-managed spans.
heap_alloc uint64 // bytes allocated and not yet freed (same as alloc above)
- heap_sys uint64 // virtual address space obtained from system
+ heap_sys uint64 // virtual address space obtained from system for GC'd heap
heap_idle uint64 // bytes in idle spans
- heap_inuse uint64 // bytes in non-idle spans
+ heap_inuse uint64 // bytes in _MSpanInUse spans
heap_released uint64 // bytes released to the os
heap_objects uint64 // total number of allocated objects
@@ -59,7 +58,7 @@ type mstats struct {
// Statistics about allocation of low-level fixed-size structures.
// Protected by FixAlloc locks.
- stacks_inuse uint64 // this number is included in heap_inuse above; differs from MemStats.StackInuse
+ stacks_inuse uint64 // bytes in manually-managed stack spans
stacks_sys uint64 // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
mspan_inuse uint64 // mspan structures
mspan_sys uint64
@@ -72,7 +71,7 @@ type mstats struct {
// Statistics about garbage collector.
// Protected by mheap or stopping the world during GC.
next_gc uint64 // goal heap_live for when next GC ends; ^0 if disabled
- last_gc uint64 // last gc (in absolute time)
+ last_gc_unix uint64 // last gc (in unix time)
pause_total_ns uint64
pause_ns [256]uint64 // circular buffer of recent gc pause lengths
pause_end [256]uint64 // circular buffer of recent gc end times (nanoseconds since 1970)
@@ -92,13 +91,26 @@ type mstats struct {
// Statistics below here are not exported to MemStats directly.
- tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly
+ last_gc_nanotime uint64 // last gc (monotonic time)
+ tinyallocs uint64 // number of tiny allocations that didn't cause actual allocation; not exported to go directly
+
+ // triggerRatio is the heap growth ratio that triggers marking.
+ //
+ // E.g., if this is 0.6, then GC should start when the live
+ // heap has reached 1.6 times the heap size marked by the
+ // previous cycle. This should be ⤠GOGC/100 so the trigger
+ // heap size is less than the goal heap size. This is set
+ // during mark termination for the next cycle's trigger.
+ triggerRatio float64
// gc_trigger is the heap size that triggers marking.
//
// When heap_live ⥠gc_trigger, the mark phase will start.
// This is also the heap size by which proportional sweeping
// must be complete.
+ //
+ // This is computed from triggerRatio during mark termination
+ // for the next cycle's trigger.
gc_trigger uint64
// heap_live is the number of bytes considered live by the GC.
@@ -121,6 +133,8 @@ type mstats struct {
// leads to a conservative GC rate rather than a GC rate that
// is potentially too low.
//
+ // Reads should likewise be atomic (or during STW).
+ //
// Whenever this is updated, call traceHeapAlloc() and
// gcController.revise().
heap_live uint64
@@ -451,22 +465,18 @@ func ReadMemStats(m *MemStats) {
}
func readmemstats_m(stats *MemStats) {
- updatememstats(nil)
+ updatememstats()
// The size of the trailing by_size array differs between
// mstats and MemStats. NumSizeClasses was changed, but we
// cannot change MemStats because of backward compatibility.
memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats)
- // Stack numbers are part of the heap numbers, separate those out for user consumption
+ // memstats.stacks_sys is only memory mapped directly for OS stacks.
+ // Add in heap-allocated stack memory for user consumption.
stats.StackSys += stats.StackInuse
- stats.HeapInuse -= stats.StackInuse
- stats.HeapSys -= stats.StackInuse
}
-// For gccgo this is in runtime/mgc0.c.
-func updatememstats(stats *gcstats)
-
//go:linkname readGCStats runtime_debug.readGCStats
func readGCStats(pauses *[]uint64) {
systemstack(func() {
@@ -500,7 +510,7 @@ func readGCStats_m(pauses *[]uint64) {
p[n+i] = memstats.pause_end[j]
}
- p[n+n] = memstats.last_gc
+ p[n+n] = memstats.last_gc_unix
p[n+n+1] = uint64(memstats.numgc)
p[n+n+2] = memstats.pause_total_ns
unlock(&mheap_.lock)
@@ -508,26 +518,15 @@ func readGCStats_m(pauses *[]uint64) {
}
//go:nowritebarrier
-func updatememstats(stats *gcstats) {
- if stats != nil {
- *stats = gcstats{}
- }
- for mp := allm; mp != nil; mp = mp.alllink {
- if stats != nil {
- src := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(&mp.gcstats))
- dst := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(stats))
- for i, v := range src {
- dst[i] += v
- }
- mp.gcstats = gcstats{}
- }
- }
-
+func updatememstats() {
memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys +
memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys
+ // We also count stacks_inuse as sys memory.
+ memstats.sys += memstats.stacks_inuse
+
// Calculate memory allocator stats.
// During program execution we only count number of frees and amount of freed memory.
// Current number of alive object in the heap and amount of alive heap memory
@@ -550,45 +549,49 @@ func updatememstats(stats *gcstats) {
// Aggregate local stats.
cachestats()
- // Scan all spans and count number of alive objects.
- lock(&mheap_.lock)
- for _, s := range mheap_.allspans {
- if s.state != mSpanInUse {
+ // Collect allocation stats. This is safe and consistent
+ // because the world is stopped.
+ var smallFree, totalAlloc, totalFree uint64
+ // Collect per-spanclass stats.
+ for spc := range mheap_.central {
+ // The mcaches are now empty, so mcentral stats are
+ // up-to-date.
+ c := &mheap_.central[spc].mcentral
+ memstats.nmalloc += c.nmalloc
+ i := spanClass(spc).sizeclass()
+ memstats.by_size[i].nmalloc += c.nmalloc
+ totalAlloc += c.nmalloc * uint64(class_to_size[i])
+ }
+ // Collect per-sizeclass stats.
+ for i := 0; i < _NumSizeClasses; i++ {
+ if i == 0 {
+ memstats.nmalloc += mheap_.nlargealloc
+ totalAlloc += mheap_.largealloc
+ totalFree += mheap_.largefree
+ memstats.nfree += mheap_.nlargefree
continue
}
- if s.sizeclass == 0 {
- memstats.nmalloc++
- memstats.alloc += uint64(s.elemsize)
- } else {
- memstats.nmalloc += uint64(s.allocCount)
- memstats.by_size[s.sizeclass].nmalloc += uint64(s.allocCount)
- memstats.alloc += uint64(s.allocCount) * uint64(s.elemsize)
- }
- }
- unlock(&mheap_.lock)
- // Aggregate by size class.
- smallfree := uint64(0)
- memstats.nfree = mheap_.nlargefree
- for i := 0; i < len(memstats.by_size); i++ {
+ // The mcache stats have been flushed to mheap_.
memstats.nfree += mheap_.nsmallfree[i]
memstats.by_size[i].nfree = mheap_.nsmallfree[i]
- memstats.by_size[i].nmalloc += mheap_.nsmallfree[i]
- smallfree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
+ smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
}
+ totalFree += smallFree
+
memstats.nfree += memstats.tinyallocs
- memstats.nmalloc += memstats.nfree
+ memstats.nmalloc += memstats.tinyallocs
// Calculate derived stats.
- memstats.total_alloc = memstats.alloc + mheap_.largefree + smallfree
+ memstats.total_alloc = totalAlloc
+ memstats.alloc = totalAlloc - totalFree
memstats.heap_alloc = memstats.alloc
memstats.heap_objects = memstats.nmalloc - memstats.nfree
}
//go:nowritebarrier
func cachestats() {
- for i := 0; ; i++ {
- p := allp[i]
+ for _, p := range &allp {
if p == nil {
break
}
diff --git a/libgo/go/runtime/mstkbar.go b/libgo/go/runtime/mstkbar.go
deleted file mode 100644
index 616c220132f..00000000000
--- a/libgo/go/runtime/mstkbar.go
+++ /dev/null
@@ -1,395 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-// Garbage collector: stack barriers
-//
-// Stack barriers enable the garbage collector to determine how much
-// of a gorountine stack has changed between when a stack is scanned
-// during the concurrent scan phase and when it is re-scanned during
-// the stop-the-world mark termination phase. Mark termination only
-// needs to re-scan the changed part, so for deep stacks this can
-// significantly reduce GC pause time compared to the alternative of
-// re-scanning whole stacks. The deeper the stacks, the more stack
-// barriers help.
-//
-// When stacks are scanned during the concurrent scan phase, the stack
-// scan installs stack barriers by selecting stack frames and
-// overwriting the saved return PCs (or link registers) of these
-// frames with the PC of a "stack barrier trampoline". Later, when a
-// selected frame returns, it "returns" to this trampoline instead of
-// returning to its actual caller. The trampoline records that the
-// stack has unwound past this frame and jumps to the original return
-// PC recorded when the stack barrier was installed. Mark termination
-// re-scans only as far as the first frame that hasn't hit a stack
-// barrier and then removes and un-hit stack barriers.
-//
-// This scheme is very lightweight. No special code is required in the
-// mutator to record stack unwinding and the trampoline is only a few
-// assembly instructions.
-//
-// Book-keeping
-// ------------
-//
-// The primary cost of stack barriers is book-keeping: the runtime has
-// to record the locations of all stack barriers and the original
-// return PCs in order to return to the correct caller when a stack
-// barrier is hit and so it can remove un-hit stack barriers. In order
-// to minimize this cost, the Go runtime places stack barriers in
-// exponentially-spaced frames, starting 1K past the current frame.
-// The book-keeping structure hence grows logarithmically with the
-// size of the stack and mark termination re-scans at most twice as
-// much stack as necessary.
-//
-// The runtime reserves space for this book-keeping structure at the
-// top of the stack allocation itself (just above the outermost
-// frame). This is necessary because the regular memory allocator can
-// itself grow the stack, and hence can't be used when allocating
-// stack-related structures.
-//
-// For debugging, the runtime also supports installing stack barriers
-// at every frame. However, this requires significantly more
-// book-keeping space.
-//
-// Correctness
-// -----------
-//
-// The runtime and the compiler cooperate to ensure that all objects
-// reachable from the stack as of mark termination are marked.
-// Anything unchanged since the concurrent scan phase will be marked
-// because it is marked by the concurrent scan. After the concurrent
-// scan, there are three possible classes of stack modifications that
-// must be tracked:
-//
-// 1) Mutator writes below the lowest un-hit stack barrier. This
-// includes all writes performed by an executing function to its own
-// stack frame. This part of the stack will be re-scanned by mark
-// termination, which will mark any objects made reachable from
-// modifications to this part of the stack.
-//
-// 2) Mutator writes above the lowest un-hit stack barrier. It's
-// possible for a mutator to modify the stack above the lowest un-hit
-// stack barrier if a higher frame has passed down a pointer to a
-// stack variable in its frame. This is called an "up-pointer". The
-// compiler ensures that writes through up-pointers have an
-// accompanying write barrier (it simply doesn't distinguish between
-// writes through up-pointers and writes through heap pointers). This
-// write barrier marks any object made reachable from modifications to
-// this part of the stack.
-//
-// 3) Runtime writes to the stack. Various runtime operations such as
-// sends to unbuffered channels can write to arbitrary parts of the
-// stack, including above the lowest un-hit stack barrier. We solve
-// this in two ways. In many cases, the runtime can perform an
-// explicit write barrier operation like in case 2. However, in the
-// case of bulk memory move (typedmemmove), the runtime doesn't
-// necessary have ready access to a pointer bitmap for the memory
-// being copied, so it simply unwinds any stack barriers below the
-// destination.
-//
-// Gotchas
-// -------
-//
-// Anything that inspects or manipulates the stack potentially needs
-// to understand stack barriers. The most obvious case is that
-// gentraceback needs to use the original return PC when it encounters
-// the stack barrier trampoline. Anything that unwinds the stack such
-// as panic/recover must unwind stack barriers in tandem with
-// unwinding the stack.
-//
-// Stack barriers require that any goroutine whose stack has been
-// scanned must execute write barriers. Go solves this by simply
-// enabling write barriers globally during the concurrent scan phase.
-// However, traditionally, write barriers are not enabled during this
-// phase.
-//
-// Synchronization
-// ---------------
-//
-// For the most part, accessing and modifying stack barriers is
-// synchronized around GC safe points. Installing stack barriers
-// forces the G to a safe point, while all other operations that
-// modify stack barriers run on the G and prevent it from reaching a
-// safe point.
-//
-// Subtlety arises when a G may be tracebacked when *not* at a safe
-// point. This happens during sigprof. For this, each G has a "stack
-// barrier lock" (see gcLockStackBarriers, gcUnlockStackBarriers).
-// Operations that manipulate stack barriers acquire this lock, while
-// sigprof tries to acquire it and simply skips the traceback if it
-// can't acquire it. There is one exception for performance and
-// complexity reasons: hitting a stack barrier manipulates the stack
-// barrier list without acquiring the stack barrier lock. For this,
-// gentraceback performs a special fix up if the traceback starts in
-// the stack barrier function.
-
-package runtime
-
-import (
- "runtime/internal/atomic"
- "runtime/internal/sys"
- "unsafe"
-)
-
-const debugStackBarrier = false
-
-// firstStackBarrierOffset is the approximate byte offset at
-// which to place the first stack barrier from the current SP.
-// This is a lower bound on how much stack will have to be
-// re-scanned during mark termination. Subsequent barriers are
-// placed at firstStackBarrierOffset * 2^n offsets.
-//
-// For debugging, this can be set to 0, which will install a
-// stack barrier at every frame. If you do this, you may also
-// have to raise _StackMin, since the stack barrier
-// bookkeeping will use a large amount of each stack.
-var firstStackBarrierOffset = 1024
-
-// gcMaxStackBarriers returns the maximum number of stack barriers
-// that can be installed in a stack of stackSize bytes.
-func gcMaxStackBarriers(stackSize int) (n int) {
- if debug.gcstackbarrieroff > 0 {
- return 0
- }
-
- if firstStackBarrierOffset == 0 {
- // Special debugging case for inserting stack barriers
- // at every frame. Steal half of the stack for the
- // []stkbar. Technically, if the stack were to consist
- // solely of return PCs we would need two thirds of
- // the stack, but stealing that much breaks things and
- // this doesn't happen in practice.
- return stackSize / 2 / int(unsafe.Sizeof(stkbar{}))
- }
-
- offset := firstStackBarrierOffset
- for offset < stackSize {
- n++
- offset *= 2
- }
- return n + 1
-}
-
-// gcInstallStackBarrier installs a stack barrier over the return PC of frame.
-//go:nowritebarrier
-func gcInstallStackBarrier(gp *g, frame *stkframe) bool {
- if frame.lr == 0 {
- if debugStackBarrier {
- print("not installing stack barrier with no LR, goid=", gp.goid, "\n")
- }
- return false
- }
-
- if frame.fn.entry == cgocallback_gofuncPC {
- // cgocallback_gofunc doesn't return to its LR;
- // instead, its return path puts LR in g.sched.pc and
- // switches back to the system stack on which
- // cgocallback_gofunc was originally called. We can't
- // have a stack barrier in g.sched.pc, so don't
- // install one in this frame.
- if debugStackBarrier {
- print("not installing stack barrier over LR of cgocallback_gofunc, goid=", gp.goid, "\n")
- }
- return false
- }
-
- // Save the return PC and overwrite it with stackBarrier.
- var lrUintptr uintptr
- if usesLR {
- lrUintptr = frame.sp
- } else {
- lrUintptr = frame.fp - sys.RegSize
- }
- lrPtr := (*sys.Uintreg)(unsafe.Pointer(lrUintptr))
- if debugStackBarrier {
- print("install stack barrier at ", hex(lrUintptr), " over ", hex(*lrPtr), ", goid=", gp.goid, "\n")
- if uintptr(*lrPtr) != frame.lr {
- print("frame.lr=", hex(frame.lr))
- throw("frame.lr differs from stack LR")
- }
- }
-
- gp.stkbar = gp.stkbar[:len(gp.stkbar)+1]
- stkbar := &gp.stkbar[len(gp.stkbar)-1]
- stkbar.savedLRPtr = lrUintptr
- stkbar.savedLRVal = uintptr(*lrPtr)
- *lrPtr = sys.Uintreg(stackBarrierPC)
- return true
-}
-
-// gcRemoveStackBarriers removes all stack barriers installed in gp's stack.
-//
-// gp's stack barriers must be locked.
-//
-//go:nowritebarrier
-func gcRemoveStackBarriers(gp *g) {
- if debugStackBarrier && gp.stkbarPos != 0 {
- print("hit ", gp.stkbarPos, " stack barriers, goid=", gp.goid, "\n")
- }
-
- // Remove stack barriers that we didn't hit.
- for _, stkbar := range gp.stkbar[gp.stkbarPos:] {
- gcRemoveStackBarrier(gp, stkbar)
- }
-
- // Clear recorded stack barriers so copystack doesn't try to
- // adjust them.
- gp.stkbarPos = 0
- gp.stkbar = gp.stkbar[:0]
-}
-
-// gcRemoveStackBarrier removes a single stack barrier. It is the
-// inverse operation of gcInstallStackBarrier.
-//
-// This is nosplit to ensure gp's stack does not move.
-//
-//go:nowritebarrier
-//go:nosplit
-func gcRemoveStackBarrier(gp *g, stkbar stkbar) {
- if debugStackBarrier {
- print("remove stack barrier at ", hex(stkbar.savedLRPtr), " with ", hex(stkbar.savedLRVal), ", goid=", gp.goid, "\n")
- }
- lrPtr := (*sys.Uintreg)(unsafe.Pointer(stkbar.savedLRPtr))
- if val := *lrPtr; val != sys.Uintreg(stackBarrierPC) {
- printlock()
- print("at *", hex(stkbar.savedLRPtr), " expected stack barrier PC ", hex(stackBarrierPC), ", found ", hex(val), ", goid=", gp.goid, "\n")
- print("gp.stkbar=")
- gcPrintStkbars(gp, -1)
- print(", gp.stack=[", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
- throw("stack barrier lost")
- }
- *lrPtr = sys.Uintreg(stkbar.savedLRVal)
-}
-
-// gcTryRemoveAllStackBarriers tries to remove stack barriers from all
-// Gs in gps. It is best-effort and efficient. If it can't remove
-// barriers from a G immediately, it will simply skip it.
-func gcTryRemoveAllStackBarriers(gps []*g) {
- for _, gp := range gps {
- retry:
- for {
- switch s := readgstatus(gp); s {
- default:
- break retry
-
- case _Grunnable, _Gsyscall, _Gwaiting:
- if !castogscanstatus(gp, s, s|_Gscan) {
- continue
- }
- gcLockStackBarriers(gp)
- gcRemoveStackBarriers(gp)
- gcUnlockStackBarriers(gp)
- restartg(gp)
- break retry
- }
- }
- }
-}
-
-// gcPrintStkbars prints the stack barriers of gp for debugging. It
-// places a "@@@" marker at gp.stkbarPos. If marker >= 0, it will also
-// place a "==>" marker before the marker'th entry.
-func gcPrintStkbars(gp *g, marker int) {
- print("[")
- for i, s := range gp.stkbar {
- if i > 0 {
- print(" ")
- }
- if i == int(gp.stkbarPos) {
- print("@@@ ")
- }
- if i == marker {
- print("==> ")
- }
- print("*", hex(s.savedLRPtr), "=", hex(s.savedLRVal))
- }
- if int(gp.stkbarPos) == len(gp.stkbar) {
- print(" @@@")
- }
- if marker == len(gp.stkbar) {
- print(" ==>")
- }
- print("]")
-}
-
-// gcUnwindBarriers marks all stack barriers up the frame containing
-// sp as hit and removes them. This is used during stack unwinding for
-// panic/recover and by heapBitsBulkBarrier to force stack re-scanning
-// when its destination is on the stack.
-//
-// This is nosplit to ensure gp's stack does not move.
-//
-//go:nosplit
-func gcUnwindBarriers(gp *g, sp uintptr) {
- gcLockStackBarriers(gp)
- // On LR machines, if there is a stack barrier on the return
- // from the frame containing sp, this will mark it as hit even
- // though it isn't, but it's okay to be conservative.
- before := gp.stkbarPos
- for int(gp.stkbarPos) < len(gp.stkbar) && gp.stkbar[gp.stkbarPos].savedLRPtr < sp {
- gcRemoveStackBarrier(gp, gp.stkbar[gp.stkbarPos])
- gp.stkbarPos++
- }
- gcUnlockStackBarriers(gp)
- if debugStackBarrier && gp.stkbarPos != before {
- print("skip barriers below ", hex(sp), " in goid=", gp.goid, ": ")
- // We skipped barriers between the "==>" marker
- // (before) and the "@@@" marker (gp.stkbarPos).
- gcPrintStkbars(gp, int(before))
- print("\n")
- }
-}
-
-// nextBarrierPC returns the original return PC of the next stack barrier.
-// Used by getcallerpc, so it must be nosplit.
-//go:nosplit
-func nextBarrierPC() uintptr {
- gp := getg()
- return gp.stkbar[gp.stkbarPos].savedLRVal
-}
-
-// setNextBarrierPC sets the return PC of the next stack barrier.
-// Used by setcallerpc, so it must be nosplit.
-//go:nosplit
-func setNextBarrierPC(pc uintptr) {
- gp := getg()
- gcLockStackBarriers(gp)
- gp.stkbar[gp.stkbarPos].savedLRVal = pc
- gcUnlockStackBarriers(gp)
-}
-
-// gcLockStackBarriers synchronizes with tracebacks of gp's stack
-// during sigprof for installation or removal of stack barriers. It
-// blocks until any current sigprof is done tracebacking gp's stack
-// and then disallows profiling tracebacks of gp's stack.
-//
-// This is necessary because a sigprof during barrier installation or
-// removal could observe inconsistencies between the stkbar array and
-// the stack itself and crash.
-//
-//go:nosplit
-func gcLockStackBarriers(gp *g) {
- // Disable preemption so scanstack cannot run while the caller
- // is manipulating the stack barriers.
- acquirem()
- for !atomic.Cas(&gp.stackLock, 0, 1) {
- osyield()
- }
-}
-
-//go:nosplit
-func gcTryLockStackBarriers(gp *g) bool {
- mp := acquirem()
- result := atomic.Cas(&gp.stackLock, 0, 1)
- if !result {
- releasem(mp)
- }
- return result
-}
-
-func gcUnlockStackBarriers(gp *g) {
- atomic.Store(&gp.stackLock, 0)
- releasem(getg().m)
-}
diff --git a/libgo/go/runtime/net_plan9.go b/libgo/go/runtime/net_plan9.go
index 10fd089aea3..77ae8c612d7 100644
--- a/libgo/go/runtime/net_plan9.go
+++ b/libgo/go/runtime/net_plan9.go
@@ -8,12 +8,12 @@ import (
_ "unsafe"
)
-//go:linkname runtime_ignoreHangup net.runtime_ignoreHangup
+//go:linkname runtime_ignoreHangup internal_poll.runtime_ignoreHangup
func runtime_ignoreHangup() {
getg().m.ignoreHangup = true
}
-//go:linkname runtime_unignoreHangup net.runtime_unignoreHangup
+//go:linkname runtime_unignoreHangup internal_poll.runtime_unignoreHangup
func runtime_unignoreHangup(sig string) {
getg().m.ignoreHangup = false
}
diff --git a/libgo/go/runtime/netpoll.go b/libgo/go/runtime/netpoll.go
index 8932455a19a..e9bbfecb5b3 100644
--- a/libgo/go/runtime/netpoll.go
+++ b/libgo/go/runtime/netpoll.go
@@ -80,12 +80,13 @@ type pollCache struct {
}
var (
- netpollInited uint32
- pollcache pollCache
+ netpollInited uint32
+ pollcache pollCache
+ netpollWaiters uint32
)
-//go:linkname net_runtime_pollServerInit net.runtime_pollServerInit
-func net_runtime_pollServerInit() {
+//go:linkname poll_runtime_pollServerInit internal_poll.runtime_pollServerInit
+func poll_runtime_pollServerInit() {
netpollinit()
atomic.Store(&netpollInited, 1)
}
@@ -94,15 +95,23 @@ func netpollinited() bool {
return atomic.Load(&netpollInited) != 0
}
-//go:linkname net_runtime_pollOpen net.runtime_pollOpen
-func net_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
+//go:linkname poll_runtime_pollServerDescriptor internal_poll.runtime_pollServerDescriptor
+
+// poll_runtime_pollServerDescriptor returns the descriptor being used,
+// or ^uintptr(0) if the system does not use a poll descriptor.
+func poll_runtime_pollServerDescriptor() uintptr {
+ return netpolldescriptor()
+}
+
+//go:linkname poll_runtime_pollOpen internal_poll.runtime_pollOpen
+func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
pd := pollcache.alloc()
lock(&pd.lock)
if pd.wg != 0 && pd.wg != pdReady {
- throw("netpollOpen: blocked write on free descriptor")
+ throw("runtime: blocked write on free polldesc")
}
if pd.rg != 0 && pd.rg != pdReady {
- throw("netpollOpen: blocked read on free descriptor")
+ throw("runtime: blocked read on free polldesc")
}
pd.fd = fd
pd.closing = false
@@ -118,16 +127,16 @@ func net_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
return pd, int(errno)
}
-//go:linkname net_runtime_pollClose net.runtime_pollClose
-func net_runtime_pollClose(pd *pollDesc) {
+//go:linkname poll_runtime_pollClose internal_poll.runtime_pollClose
+func poll_runtime_pollClose(pd *pollDesc) {
if !pd.closing {
- throw("netpollClose: close w/o unblock")
+ throw("runtime: close polldesc w/o unblock")
}
if pd.wg != 0 && pd.wg != pdReady {
- throw("netpollClose: blocked write on closing descriptor")
+ throw("runtime: blocked write on closing polldesc")
}
if pd.rg != 0 && pd.rg != pdReady {
- throw("netpollClose: blocked read on closing descriptor")
+ throw("runtime: blocked read on closing polldesc")
}
netpollclose(pd.fd)
pollcache.free(pd)
@@ -140,8 +149,8 @@ func (c *pollCache) free(pd *pollDesc) {
unlock(&c.lock)
}
-//go:linkname net_runtime_pollReset net.runtime_pollReset
-func net_runtime_pollReset(pd *pollDesc, mode int) int {
+//go:linkname poll_runtime_pollReset internal_poll.runtime_pollReset
+func poll_runtime_pollReset(pd *pollDesc, mode int) int {
err := netpollcheckerr(pd, int32(mode))
if err != 0 {
return err
@@ -154,8 +163,8 @@ func net_runtime_pollReset(pd *pollDesc, mode int) int {
return 0
}
-//go:linkname net_runtime_pollWait net.runtime_pollWait
-func net_runtime_pollWait(pd *pollDesc, mode int) int {
+//go:linkname poll_runtime_pollWait internal_poll.runtime_pollWait
+func poll_runtime_pollWait(pd *pollDesc, mode int) int {
err := netpollcheckerr(pd, int32(mode))
if err != 0 {
return err
@@ -176,16 +185,16 @@ func net_runtime_pollWait(pd *pollDesc, mode int) int {
return 0
}
-//go:linkname net_runtime_pollWaitCanceled net.runtime_pollWaitCanceled
-func net_runtime_pollWaitCanceled(pd *pollDesc, mode int) {
+//go:linkname poll_runtime_pollWaitCanceled internal_poll.runtime_pollWaitCanceled
+func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) {
// This function is used only on windows after a failed attempt to cancel
// a pending async IO operation. Wait for ioready, ignore closing or timeouts.
for !netpollblock(pd, int32(mode), true) {
}
}
-//go:linkname net_runtime_pollSetDeadline net.runtime_pollSetDeadline
-func net_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
+//go:linkname poll_runtime_pollSetDeadline internal_poll.runtime_pollSetDeadline
+func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
lock(&pd.lock)
if pd.closing {
unlock(&pd.lock)
@@ -247,18 +256,18 @@ func net_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
}
unlock(&pd.lock)
if rg != nil {
- goready(rg, 3)
+ netpollgoready(rg, 3)
}
if wg != nil {
- goready(wg, 3)
+ netpollgoready(wg, 3)
}
}
-//go:linkname net_runtime_pollUnblock net.runtime_pollUnblock
-func net_runtime_pollUnblock(pd *pollDesc) {
+//go:linkname poll_runtime_pollUnblock internal_poll.runtime_pollUnblock
+func poll_runtime_pollUnblock(pd *pollDesc) {
lock(&pd.lock)
if pd.closing {
- throw("netpollUnblock: already closing")
+ throw("runtime: unblock on closing polldesc")
}
pd.closing = true
pd.seq++
@@ -276,10 +285,10 @@ func net_runtime_pollUnblock(pd *pollDesc) {
}
unlock(&pd.lock)
if rg != nil {
- goready(rg, 3)
+ netpollgoready(rg, 3)
}
if wg != nil {
- goready(wg, 3)
+ netpollgoready(wg, 3)
}
}
@@ -315,7 +324,19 @@ func netpollcheckerr(pd *pollDesc, mode int32) int {
}
func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
- return atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
+ r := atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
+ if r {
+ // Bump the count of goroutines waiting for the poller.
+ // The scheduler uses this to decide whether to block
+ // waiting for the poller if there is nothing else to do.
+ atomic.Xadd(&netpollWaiters, 1)
+ }
+ return r
+}
+
+func netpollgoready(gp *g, traceskip int) {
+ atomic.Xadd(&netpollWaiters, -1)
+ goready(gp, traceskip+1)
}
// returns true if IO is ready, or false if timedout or closed
@@ -334,7 +355,7 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
return true
}
if old != 0 {
- throw("netpollblock: double wait")
+ throw("runtime: double wait")
}
if atomic.Casuintptr(gpp, 0, pdWait) {
break
@@ -350,7 +371,7 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
// be careful to not lose concurrent READY notification
old := atomic.Xchguintptr(gpp, 0)
if old > pdWait {
- throw("netpollblock: corrupted state")
+ throw("runtime: corrupted polldesc")
}
return old == pdReady
}
@@ -396,7 +417,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
var rg *g
if read {
if pd.rd <= 0 || pd.rt.f == nil {
- throw("netpolldeadlineimpl: inconsistent read deadline")
+ throw("runtime: inconsistent read deadline")
}
pd.rd = -1
atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock
@@ -405,7 +426,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
var wg *g
if write {
if pd.wd <= 0 || pd.wt.f == nil && !read {
- throw("netpolldeadlineimpl: inconsistent write deadline")
+ throw("runtime: inconsistent write deadline")
}
pd.wd = -1
atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock
@@ -413,10 +434,10 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
}
unlock(&pd.lock)
if rg != nil {
- goready(rg, 0)
+ netpollgoready(rg, 0)
}
if wg != nil {
- goready(wg, 0)
+ netpollgoready(wg, 0)
}
}
diff --git a/libgo/go/runtime/netpoll_epoll.go b/libgo/go/runtime/netpoll_epoll.go
index 247692ef042..ced399d781e 100644
--- a/libgo/go/runtime/netpoll_epoll.go
+++ b/libgo/go/runtime/netpoll_epoll.go
@@ -47,6 +47,10 @@ func netpollinit() {
throw("netpollinit: failed to create descriptor")
}
+func netpolldescriptor() uintptr {
+ return uintptr(epfd)
+}
+
func netpollopen(fd uintptr, pd *pollDesc) int32 {
var ev epollevent
ev.events = _EPOLLIN | _EPOLLOUT | _EPOLLRDHUP | _EPOLLETpos
@@ -66,7 +70,7 @@ func netpollclose(fd uintptr) int32 {
}
func netpollarm(pd *pollDesc, mode int) {
- throw("unused")
+ throw("runtime: unused")
}
// polls for ready network connections
@@ -86,7 +90,7 @@ retry:
e := errno()
if e != _EINTR {
println("runtime: epollwait on fd", epfd, "failed with", e)
- throw("epollwait failed")
+ throw("runtime: netpoll failed")
}
goto retry
}
diff --git a/libgo/go/runtime/netpoll_kqueue.go b/libgo/go/runtime/netpoll_kqueue.go
index eae4f21d1df..47927fe7c37 100644
--- a/libgo/go/runtime/netpoll_kqueue.go
+++ b/libgo/go/runtime/netpoll_kqueue.go
@@ -32,11 +32,15 @@ func netpollinit() {
kq = kqueue()
if kq < 0 {
println("netpollinit: kqueue failed with", errno())
- throw("netpollinit: kqueue failed")
+ throw("runtime: netpollinit failed")
}
closeonexec(kq)
}
+func netpolldescriptor() uintptr {
+ return uintptr(kq)
+}
+
func netpollopen(fd uintptr, pd *pollDesc) int32 {
// Arm both EVFILT_READ and EVFILT_WRITE in edge-triggered mode (EV_CLEAR)
// for the whole fd lifetime. The notifications are automatically unregistered
@@ -64,7 +68,7 @@ func netpollclose(fd uintptr) int32 {
}
func netpollarm(pd *pollDesc, mode int) {
- throw("unused")
+ throw("runtime: unused")
}
// Polls for ready network connections.
@@ -85,7 +89,7 @@ retry:
e := errno()
if e != _EINTR {
println("runtime: kevent on fd", kq, "failed with", e)
- throw("kevent failed")
+ throw("runtime: netpoll failed")
}
goto retry
}
diff --git a/libgo/go/runtime/netpoll_nacl.go b/libgo/go/runtime/netpoll_nacl.go
index 5cbc3003214..dc5a55ec846 100644
--- a/libgo/go/runtime/netpoll_nacl.go
+++ b/libgo/go/runtime/netpoll_nacl.go
@@ -10,6 +10,10 @@ package runtime
func netpollinit() {
}
+func netpolldescriptor() uintptr {
+ return ^uintptr(0)
+}
+
func netpollopen(fd uintptr, pd *pollDesc) int32 {
return 0
}
diff --git a/libgo/go/runtime/netpoll_solaris.go b/libgo/go/runtime/netpoll_solaris.go
index cc6754cd2eb..e1e73857f40 100644
--- a/libgo/go/runtime/netpoll_solaris.go
+++ b/libgo/go/runtime/netpoll_solaris.go
@@ -96,8 +96,12 @@ func netpollinit() {
return
}
- print("netpollinit: failed to create port (", errno(), ")\n")
- throw("netpollinit: failed to create port")
+ print("runtime: port_create failed (errno=", errno(), ")\n")
+ throw("runtime: netpollinit failed")
+}
+
+func netpolldescriptor() uintptr {
+ return uintptr(portfd)
}
func netpollopen(fd uintptr, pd *pollDesc) int32 {
@@ -139,8 +143,8 @@ func netpollupdate(pd *pollDesc, set, clear uint32) {
}
if events != 0 && port_associate(portfd, _PORT_SOURCE_FD, pd.fd, events, uintptr(unsafe.Pointer(pd))) != 0 {
- print("netpollupdate: failed to associate (", errno(), ")\n")
- throw("netpollupdate: failed to associate")
+ print("runtime: port_associate failed (errno=", errno(), ")\n")
+ throw("runtime: netpollupdate failed")
}
pd.user = events
}
@@ -154,7 +158,7 @@ func netpollarm(pd *pollDesc, mode int) {
case 'w':
netpollupdate(pd, _POLLOUT, 0)
default:
- throw("netpollarm: bad mode")
+ throw("runtime: bad mode")
}
unlock(&pd.lock)
}
@@ -177,8 +181,8 @@ retry:
var n uint32 = 1
if port_getn(portfd, &events[0], uint32(len(events)), &n, wait) < 0 {
if e := errno(); e != _EINTR {
- print("runtime: port_getn on fd ", portfd, " failed with ", e, "\n")
- throw("port_getn failed")
+ print("runtime: port_getn on fd ", portfd, " failed (errno=", e, ")\n")
+ throw("runtime: netpoll failed")
}
goto retry
}
diff --git a/libgo/go/runtime/netpoll_stub.go b/libgo/go/runtime/netpoll_stub.go
index 09f64ad9b5b..a4d6b4608ac 100644
--- a/libgo/go/runtime/netpoll_stub.go
+++ b/libgo/go/runtime/netpoll_stub.go
@@ -6,6 +6,8 @@
package runtime
+var netpollWaiters uint32
+
// Polls for ready network connections.
// Returns list of goroutines that become runnable.
func netpoll(block bool) (gp *g) {
diff --git a/libgo/go/runtime/netpoll_windows.go b/libgo/go/runtime/netpoll_windows.go
index 7ad115850d5..79dafb02796 100644
--- a/libgo/go/runtime/netpoll_windows.go
+++ b/libgo/go/runtime/netpoll_windows.go
@@ -12,7 +12,8 @@ const _DWORD_MAX = 0xffffffff
const _INVALID_HANDLE_VALUE = ^uintptr(0)
-// net_op must be the same as beginning of net.operation. Keep these in sync.
+// net_op must be the same as beginning of internal/poll.operation.
+// Keep these in sync.
type net_op struct {
// used by windows
o overlapped
@@ -35,11 +36,15 @@ var iocphandle uintptr = _INVALID_HANDLE_VALUE // completion port io handle
func netpollinit() {
iocphandle = stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX)
if iocphandle == 0 {
- println("netpoll: failed to create iocp handle (errno=", getlasterror(), ")")
- throw("netpoll: failed to create iocp handle")
+ println("runtime: CreateIoCompletionPort failed (errno=", getlasterror(), ")")
+ throw("runtime: netpollinit failed")
}
}
+func netpolldescriptor() uintptr {
+ return iocphandle
+}
+
func netpollopen(fd uintptr, pd *pollDesc) int32 {
if stdcall4(_CreateIoCompletionPort, fd, iocphandle, 0, 0) == 0 {
return -int32(getlasterror())
@@ -53,7 +58,7 @@ func netpollclose(fd uintptr) int32 {
}
func netpollarm(pd *pollDesc, mode int) {
- throw("unused")
+ throw("runtime: unused")
}
// Polls for completed network IO.
@@ -89,8 +94,8 @@ retry:
if !block && errno == _WAIT_TIMEOUT {
return nil
}
- println("netpoll: GetQueuedCompletionStatusEx failed (errno=", errno, ")")
- throw("netpoll: GetQueuedCompletionStatusEx failed")
+ println("runtime: GetQueuedCompletionStatusEx failed (errno=", errno, ")")
+ throw("runtime: netpoll failed")
}
mp.blocked = false
for i = 0; i < n; i++ {
@@ -116,8 +121,8 @@ retry:
return nil
}
if op == nil {
- println("netpoll: GetQueuedCompletionStatus failed (errno=", errno, ")")
- throw("netpoll: GetQueuedCompletionStatus failed")
+ println("runtime: GetQueuedCompletionStatus failed (errno=", errno, ")")
+ throw("runtime: netpoll failed")
}
// dequeued failed IO packet, so report that
}
@@ -132,12 +137,13 @@ retry:
func handlecompletion(gpp *guintptr, op *net_op, errno int32, qty uint32) {
if op == nil {
- throw("netpoll: GetQueuedCompletionStatus returned op == nil")
+ println("runtime: GetQueuedCompletionStatus returned op == nil")
+ throw("runtime: netpoll failed")
}
mode := op.mode
if mode != 'r' && mode != 'w' {
- println("netpoll: GetQueuedCompletionStatus returned invalid mode=", mode)
- throw("netpoll: GetQueuedCompletionStatus returned invalid mode")
+ println("runtime: GetQueuedCompletionStatus returned invalid mode=", mode)
+ throw("runtime: netpoll failed")
}
op.errno = errno
op.qty = qty
diff --git a/libgo/go/runtime/numcpu_freebsd_test.go b/libgo/go/runtime/numcpu_freebsd_test.go
new file mode 100644
index 00000000000..e78890a6a4b
--- /dev/null
+++ b/libgo/go/runtime/numcpu_freebsd_test.go
@@ -0,0 +1,15 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import "testing"
+
+func TestFreeBSDNumCPU(t *testing.T) {
+ got := runTestProg(t, "testprog", "FreeBSDNumCPU")
+ want := "OK\n"
+ if got != want {
+ t.Fatalf("expected %q, but got:\n%s", want, got)
+ }
+}
diff --git a/libgo/go/runtime/os_gccgo.go b/libgo/go/runtime/os_gccgo.go
index db3ea48ef48..5709555acdb 100644
--- a/libgo/go/runtime/os_gccgo.go
+++ b/libgo/go/runtime/os_gccgo.go
@@ -8,7 +8,7 @@ import (
"unsafe"
)
-// Temporary for C code to call:
+// For C code to call:
//go:linkname minit runtime.minit
func goenvs() {
diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go
index 43d595f667e..2f656038a9e 100644
--- a/libgo/go/runtime/panic.go
+++ b/libgo/go/runtime/panic.go
@@ -450,6 +450,8 @@ func gopanic(e interface{}) {
}
gp._panic = p
+ atomic.Xadd(&runningPanicDefers, 1)
+
for {
d := gp._defer
if d == nil {
@@ -486,8 +488,8 @@ func gopanic(e interface{}) {
d._panic = nil
if p.recovered {
- // Some deferred function called recover.
- // Stop running this panic.
+ atomic.Xadd(&runningPanicDefers, -1)
+
gp._panic = p.link
// Aborted panics are marked but remain on the g.panic list.
@@ -531,6 +533,11 @@ func gopanic(e interface{}) {
// and String methods to prepare the panic strings before startpanic.
preprintpanics(gp._panic)
startpanic()
+
+ // startpanic set panicking, which will block main from exiting,
+ // so now OK to decrement runningPanicDefers.
+ atomic.Xadd(&runningPanicDefers, -1)
+
printpanics(gp._panic)
dopanic(0) // should not return
*(*int)(nil) = 0 // not reached
@@ -801,7 +808,17 @@ func throw(s string) {
*(*int)(nil) = 0 // not reached
}
-//uint32 runtime·panicking;
+// runningPanicDefers is non-zero while running deferred functions for panic.
+// runningPanicDefers is incremented and decremented atomically.
+// This is used to try hard to get a panic stack trace out when exiting.
+var runningPanicDefers uint32
+
+// panicking is non-zero when crashing the program for an unrecovered panic.
+// panicking is incremented and decremented atomically.
+var panicking uint32
+
+// paniclk is held while printing the panic information and stack trace,
+// so that two concurrent panics don't overlap their output.
var paniclk mutex
func startpanic() {
diff --git a/libgo/go/runtime/pprof/elf.go b/libgo/go/runtime/pprof/elf.go
new file mode 100644
index 00000000000..a8b5ea68175
--- /dev/null
+++ b/libgo/go/runtime/pprof/elf.go
@@ -0,0 +1,109 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "os"
+)
+
+var (
+ errBadELF = errors.New("malformed ELF binary")
+ errNoBuildID = errors.New("no NT_GNU_BUILD_ID found in ELF binary")
+)
+
+// elfBuildID returns the GNU build ID of the named ELF binary,
+// without introducing a dependency on debug/elf and its dependencies.
+func elfBuildID(file string) (string, error) {
+ buf := make([]byte, 256)
+ f, err := os.Open(file)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ if _, err := f.ReadAt(buf[:64], 0); err != nil {
+ return "", err
+ }
+
+ // ELF file begins with \x7F E L F.
+ if buf[0] != 0x7F || buf[1] != 'E' || buf[2] != 'L' || buf[3] != 'F' {
+ return "", errBadELF
+ }
+
+ var byteOrder binary.ByteOrder
+ switch buf[5] {
+ default:
+ return "", errBadELF
+ case 1: // little-endian
+ byteOrder = binary.LittleEndian
+ case 2: // big-endian
+ byteOrder = binary.BigEndian
+ }
+
+ var shnum int
+ var shoff, shentsize int64
+ switch buf[4] {
+ default:
+ return "", errBadELF
+ case 1: // 32-bit file header
+ shoff = int64(byteOrder.Uint32(buf[32:]))
+ shentsize = int64(byteOrder.Uint16(buf[46:]))
+ if shentsize != 40 {
+ return "", errBadELF
+ }
+ shnum = int(byteOrder.Uint16(buf[48:]))
+ case 2: // 64-bit file header
+ shoff = int64(byteOrder.Uint64(buf[40:]))
+ shentsize = int64(byteOrder.Uint16(buf[58:]))
+ if shentsize != 64 {
+ return "", errBadELF
+ }
+ shnum = int(byteOrder.Uint16(buf[60:]))
+ }
+
+ for i := 0; i < shnum; i++ {
+ if _, err := f.ReadAt(buf[:shentsize], shoff+int64(i)*shentsize); err != nil {
+ return "", err
+ }
+ if typ := byteOrder.Uint32(buf[4:]); typ != 7 { // SHT_NOTE
+ continue
+ }
+ var off, size int64
+ if shentsize == 40 {
+ // 32-bit section header
+ off = int64(byteOrder.Uint32(buf[16:]))
+ size = int64(byteOrder.Uint32(buf[20:]))
+ } else {
+ // 64-bit section header
+ off = int64(byteOrder.Uint64(buf[24:]))
+ size = int64(byteOrder.Uint64(buf[32:]))
+ }
+ size += off
+ for off < size {
+ if _, err := f.ReadAt(buf[:16], off); err != nil { // room for header + name GNU\x00
+ return "", err
+ }
+ nameSize := int(byteOrder.Uint32(buf[0:]))
+ descSize := int(byteOrder.Uint32(buf[4:]))
+ noteType := int(byteOrder.Uint32(buf[8:]))
+ descOff := off + int64(12+(nameSize+3)&^3)
+ off = descOff + int64((descSize+3)&^3)
+ if nameSize != 4 || noteType != 3 || buf[12] != 'G' || buf[13] != 'N' || buf[14] != 'U' || buf[15] != '\x00' { // want name GNU\x00 type 3 (NT_GNU_BUILD_ID)
+ continue
+ }
+ if descSize > len(buf) {
+ return "", errBadELF
+ }
+ if _, err := f.ReadAt(buf[:descSize], descOff); err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%x", buf[:descSize]), nil
+ }
+ }
+ return "", errNoBuildID
+}
diff --git a/libgo/go/runtime/pprof/internal/profile/encode.go b/libgo/go/runtime/pprof/internal/profile/encode.go
new file mode 100644
index 00000000000..6b879a84acd
--- /dev/null
+++ b/libgo/go/runtime/pprof/internal/profile/encode.go
@@ -0,0 +1,470 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package profile
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+)
+
+func (p *Profile) decoder() []decoder {
+ return profileDecoder
+}
+
+// preEncode populates the unexported fields to be used by encode
+// (with suffix X) from the corresponding exported fields. The
+// exported fields are cleared up to facilitate testing.
+func (p *Profile) preEncode() {
+ strings := make(map[string]int)
+ addString(strings, "")
+
+ for _, st := range p.SampleType {
+ st.typeX = addString(strings, st.Type)
+ st.unitX = addString(strings, st.Unit)
+ }
+
+ for _, s := range p.Sample {
+ s.labelX = nil
+ var keys []string
+ for k := range s.Label {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := s.Label[k]
+ for _, v := range vs {
+ s.labelX = append(s.labelX,
+ Label{
+ keyX: addString(strings, k),
+ strX: addString(strings, v),
+ },
+ )
+ }
+ }
+ var numKeys []string
+ for k := range s.NumLabel {
+ numKeys = append(numKeys, k)
+ }
+ sort.Strings(numKeys)
+ for _, k := range numKeys {
+ vs := s.NumLabel[k]
+ for _, v := range vs {
+ s.labelX = append(s.labelX,
+ Label{
+ keyX: addString(strings, k),
+ numX: v,
+ },
+ )
+ }
+ }
+ s.locationIDX = nil
+ for _, l := range s.Location {
+ s.locationIDX = append(s.locationIDX, l.ID)
+ }
+ }
+
+ for _, m := range p.Mapping {
+ m.fileX = addString(strings, m.File)
+ m.buildIDX = addString(strings, m.BuildID)
+ }
+
+ for _, l := range p.Location {
+ for i, ln := range l.Line {
+ if ln.Function != nil {
+ l.Line[i].functionIDX = ln.Function.ID
+ } else {
+ l.Line[i].functionIDX = 0
+ }
+ }
+ if l.Mapping != nil {
+ l.mappingIDX = l.Mapping.ID
+ } else {
+ l.mappingIDX = 0
+ }
+ }
+ for _, f := range p.Function {
+ f.nameX = addString(strings, f.Name)
+ f.systemNameX = addString(strings, f.SystemName)
+ f.filenameX = addString(strings, f.Filename)
+ }
+
+ p.dropFramesX = addString(strings, p.DropFrames)
+ p.keepFramesX = addString(strings, p.KeepFrames)
+
+ if pt := p.PeriodType; pt != nil {
+ pt.typeX = addString(strings, pt.Type)
+ pt.unitX = addString(strings, pt.Unit)
+ }
+
+ p.stringTable = make([]string, len(strings))
+ for s, i := range strings {
+ p.stringTable[i] = s
+ }
+}
+
+func (p *Profile) encode(b *buffer) {
+ for _, x := range p.SampleType {
+ encodeMessage(b, 1, x)
+ }
+ for _, x := range p.Sample {
+ encodeMessage(b, 2, x)
+ }
+ for _, x := range p.Mapping {
+ encodeMessage(b, 3, x)
+ }
+ for _, x := range p.Location {
+ encodeMessage(b, 4, x)
+ }
+ for _, x := range p.Function {
+ encodeMessage(b, 5, x)
+ }
+ encodeStrings(b, 6, p.stringTable)
+ encodeInt64Opt(b, 7, p.dropFramesX)
+ encodeInt64Opt(b, 8, p.keepFramesX)
+ encodeInt64Opt(b, 9, p.TimeNanos)
+ encodeInt64Opt(b, 10, p.DurationNanos)
+ if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
+ encodeMessage(b, 11, p.PeriodType)
+ }
+ encodeInt64Opt(b, 12, p.Period)
+}
+
+var profileDecoder = []decoder{
+ nil, // 0
+ // repeated ValueType sample_type = 1
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.SampleType = append(pp.SampleType, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Sample sample = 2
+ func(b *buffer, m message) error {
+ x := new(Sample)
+ pp := m.(*Profile)
+ pp.Sample = append(pp.Sample, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Mapping mapping = 3
+ func(b *buffer, m message) error {
+ x := new(Mapping)
+ pp := m.(*Profile)
+ pp.Mapping = append(pp.Mapping, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Location location = 4
+ func(b *buffer, m message) error {
+ x := new(Location)
+ pp := m.(*Profile)
+ pp.Location = append(pp.Location, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Function function = 5
+ func(b *buffer, m message) error {
+ x := new(Function)
+ pp := m.(*Profile)
+ pp.Function = append(pp.Function, x)
+ return decodeMessage(b, x)
+ },
+ // repeated string string_table = 6
+ func(b *buffer, m message) error {
+ err := decodeStrings(b, &m.(*Profile).stringTable)
+ if err != nil {
+ return err
+ }
+ if *&m.(*Profile).stringTable[0] != "" {
+ return errors.New("string_table[0] must be ''")
+ }
+ return nil
+ },
+ // repeated int64 drop_frames = 7
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
+ // repeated int64 keep_frames = 8
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
+ // repeated int64 time_nanos = 9
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).TimeNanos) },
+ // repeated int64 duration_nanos = 10
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
+ // optional string period_type = 11
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.PeriodType = x
+ return decodeMessage(b, x)
+ },
+ // repeated int64 period = 12
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
+}
+
+// postDecode takes the unexported fields populated by decode (with
+// suffix X) and populates the corresponding exported fields.
+// The unexported fields are cleared up to facilitate testing.
+func (p *Profile) postDecode() error {
+ var err error
+
+ mappings := make(map[uint64]*Mapping)
+ for _, m := range p.Mapping {
+ m.File, err = getString(p.stringTable, &m.fileX, err)
+ m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
+ mappings[m.ID] = m
+ }
+
+ functions := make(map[uint64]*Function)
+ for _, f := range p.Function {
+ f.Name, err = getString(p.stringTable, &f.nameX, err)
+ f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
+ f.Filename, err = getString(p.stringTable, &f.filenameX, err)
+ functions[f.ID] = f
+ }
+
+ locations := make(map[uint64]*Location)
+ for _, l := range p.Location {
+ l.Mapping = mappings[l.mappingIDX]
+ l.mappingIDX = 0
+ for i, ln := range l.Line {
+ if id := ln.functionIDX; id != 0 {
+ l.Line[i].Function = functions[id]
+ if l.Line[i].Function == nil {
+ return fmt.Errorf("Function ID %d not found", id)
+ }
+ l.Line[i].functionIDX = 0
+ }
+ }
+ locations[l.ID] = l
+ }
+
+ for _, st := range p.SampleType {
+ st.Type, err = getString(p.stringTable, &st.typeX, err)
+ st.Unit, err = getString(p.stringTable, &st.unitX, err)
+ }
+
+ for _, s := range p.Sample {
+ labels := make(map[string][]string)
+ numLabels := make(map[string][]int64)
+ for _, l := range s.labelX {
+ var key, value string
+ key, err = getString(p.stringTable, &l.keyX, err)
+ if l.strX != 0 {
+ value, err = getString(p.stringTable, &l.strX, err)
+ labels[key] = append(labels[key], value)
+ } else {
+ numLabels[key] = append(numLabels[key], l.numX)
+ }
+ }
+ if len(labels) > 0 {
+ s.Label = labels
+ }
+ if len(numLabels) > 0 {
+ s.NumLabel = numLabels
+ }
+ s.Location = nil
+ for _, lid := range s.locationIDX {
+ s.Location = append(s.Location, locations[lid])
+ }
+ s.locationIDX = nil
+ }
+
+ p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
+ p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
+
+ if pt := p.PeriodType; pt == nil {
+ p.PeriodType = &ValueType{}
+ }
+
+ if pt := p.PeriodType; pt != nil {
+ pt.Type, err = getString(p.stringTable, &pt.typeX, err)
+ pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
+ }
+ p.stringTable = nil
+ return nil
+}
+
+func (p *ValueType) decoder() []decoder {
+ return valueTypeDecoder
+}
+
+func (p *ValueType) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.typeX)
+ encodeInt64Opt(b, 2, p.unitX)
+}
+
+var valueTypeDecoder = []decoder{
+ nil, // 0
+ // optional int64 type = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
+ // optional int64 unit = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
+}
+
+func (p *Sample) decoder() []decoder {
+ return sampleDecoder
+}
+
+func (p *Sample) encode(b *buffer) {
+ encodeUint64s(b, 1, p.locationIDX)
+ for _, x := range p.Value {
+ encodeInt64(b, 2, x)
+ }
+ for _, x := range p.labelX {
+ encodeMessage(b, 3, x)
+ }
+}
+
+var sampleDecoder = []decoder{
+ nil, // 0
+ // repeated uint64 location = 1
+ func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
+ // repeated int64 value = 2
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
+ // repeated Label label = 3
+ func(b *buffer, m message) error {
+ s := m.(*Sample)
+ n := len(s.labelX)
+ s.labelX = append(s.labelX, Label{})
+ return decodeMessage(b, &s.labelX[n])
+ },
+}
+
+func (p Label) decoder() []decoder {
+ return labelDecoder
+}
+
+func (p Label) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.keyX)
+ encodeInt64Opt(b, 2, p.strX)
+ encodeInt64Opt(b, 3, p.numX)
+}
+
+var labelDecoder = []decoder{
+ nil, // 0
+ // optional int64 key = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).keyX) },
+ // optional int64 str = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).strX) },
+ // optional int64 num = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Label).numX) },
+}
+
+func (p *Mapping) decoder() []decoder {
+ return mappingDecoder
+}
+
+func (p *Mapping) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.Start)
+ encodeUint64Opt(b, 3, p.Limit)
+ encodeUint64Opt(b, 4, p.Offset)
+ encodeInt64Opt(b, 5, p.fileX)
+ encodeInt64Opt(b, 6, p.buildIDX)
+ encodeBoolOpt(b, 7, p.HasFunctions)
+ encodeBoolOpt(b, 8, p.HasFilenames)
+ encodeBoolOpt(b, 9, p.HasLineNumbers)
+ encodeBoolOpt(b, 10, p.HasInlineFrames)
+}
+
+var mappingDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
+}
+
+func (p *Location) decoder() []decoder {
+ return locationDecoder
+}
+
+func (p *Location) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.mappingIDX)
+ encodeUint64Opt(b, 3, p.Address)
+ for i := range p.Line {
+ encodeMessage(b, 4, &p.Line[i])
+ }
+}
+
+var locationDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
+ func(b *buffer, m message) error { // repeated Line line = 4
+ pp := m.(*Location)
+ n := len(pp.Line)
+ pp.Line = append(pp.Line, Line{})
+ return decodeMessage(b, &pp.Line[n])
+ },
+}
+
+func (p *Line) decoder() []decoder {
+ return lineDecoder
+}
+
+func (p *Line) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.functionIDX)
+ encodeInt64Opt(b, 2, p.Line)
+}
+
+var lineDecoder = []decoder{
+ nil, // 0
+ // optional uint64 function_id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
+ // optional int64 line = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
+}
+
+func (p *Function) decoder() []decoder {
+ return functionDecoder
+}
+
+func (p *Function) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeInt64Opt(b, 2, p.nameX)
+ encodeInt64Opt(b, 3, p.systemNameX)
+ encodeInt64Opt(b, 4, p.filenameX)
+ encodeInt64Opt(b, 5, p.StartLine)
+}
+
+var functionDecoder = []decoder{
+ nil, // 0
+ // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
+ // optional int64 function_name = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
+ // optional int64 function_system_name = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
+ // repeated int64 filename = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
+ // optional int64 start_line = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
+}
+
+func addString(strings map[string]int, s string) int64 {
+ i, ok := strings[s]
+ if !ok {
+ i = len(strings)
+ strings[s] = i
+ }
+ return int64(i)
+}
+
+func getString(strings []string, strng *int64, err error) (string, error) {
+ if err != nil {
+ return "", err
+ }
+ s := int(*strng)
+ if s < 0 || s >= len(strings) {
+ return "", errMalformed
+ }
+ *strng = 0
+ return strings[s], nil
+}
diff --git a/libgo/go/runtime/pprof/internal/profile/filter.go b/libgo/go/runtime/pprof/internal/profile/filter.go
new file mode 100644
index 00000000000..1baa096a49c
--- /dev/null
+++ b/libgo/go/runtime/pprof/internal/profile/filter.go
@@ -0,0 +1,158 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Implements methods to filter samples from profiles.
+
+package profile
+
+import "regexp"
+
+// FilterSamplesByName filters the samples in a profile and only keeps
+// samples where at least one frame matches focus but none match ignore.
+// Returns true is the corresponding regexp matched at least one sample.
+func (p *Profile) FilterSamplesByName(focus, ignore, hide *regexp.Regexp) (fm, im, hm bool) {
+ focusOrIgnore := make(map[uint64]bool)
+ hidden := make(map[uint64]bool)
+ for _, l := range p.Location {
+ if ignore != nil && l.matchesName(ignore) {
+ im = true
+ focusOrIgnore[l.ID] = false
+ } else if focus == nil || l.matchesName(focus) {
+ fm = true
+ focusOrIgnore[l.ID] = true
+ }
+ if hide != nil && l.matchesName(hide) {
+ hm = true
+ l.Line = l.unmatchedLines(hide)
+ if len(l.Line) == 0 {
+ hidden[l.ID] = true
+ }
+ }
+ }
+
+ s := make([]*Sample, 0, len(p.Sample))
+ for _, sample := range p.Sample {
+ if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
+ if len(hidden) > 0 {
+ var locs []*Location
+ for _, loc := range sample.Location {
+ if !hidden[loc.ID] {
+ locs = append(locs, loc)
+ }
+ }
+ if len(locs) == 0 {
+ // Remove sample with no locations (by not adding it to s).
+ continue
+ }
+ sample.Location = locs
+ }
+ s = append(s, sample)
+ }
+ }
+ p.Sample = s
+
+ return
+}
+
+// matchesName returns whether the function name or file in the
+// location matches the regular expression.
+func (loc *Location) matchesName(re *regexp.Regexp) bool {
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) {
+ return true
+ }
+ if re.MatchString(fn.Filename) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// unmatchedLines returns the lines in the location that do not match
+// the regular expression.
+func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
+ var lines []Line
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) {
+ continue
+ }
+ if re.MatchString(fn.Filename) {
+ continue
+ }
+ }
+ lines = append(lines, ln)
+ }
+ return lines
+}
+
+// focusedAndNotIgnored looks up a slice of ids against a map of
+// focused/ignored locations. The map only contains locations that are
+// explicitly focused or ignored. Returns whether there is at least
+// one focused location but no ignored locations.
+func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
+ var f bool
+ for _, loc := range locs {
+ if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
+ if focus {
+ // Found focused location. Must keep searching in case there
+ // is an ignored one as well.
+ f = true
+ } else {
+ // Found ignored location. Can return false right away.
+ return false
+ }
+ }
+ }
+ return f
+}
+
+// TagMatch selects tags for filtering
+type TagMatch func(key, val string, nval int64) bool
+
+// FilterSamplesByTag removes all samples from the profile, except
+// those that match focus and do not match the ignore regular
+// expression.
+func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
+ samples := make([]*Sample, 0, len(p.Sample))
+ for _, s := range p.Sample {
+ focused, ignored := focusedSample(s, focus, ignore)
+ fm = fm || focused
+ im = im || ignored
+ if focused && !ignored {
+ samples = append(samples, s)
+ }
+ }
+ p.Sample = samples
+ return
+}
+
+// focusedTag checks a sample against focus and ignore regexps.
+// Returns whether the focus/ignore regexps match any tags
+func focusedSample(s *Sample, focus, ignore TagMatch) (fm, im bool) {
+ fm = focus == nil
+ for key, vals := range s.Label {
+ for _, val := range vals {
+ if ignore != nil && ignore(key, val, 0) {
+ im = true
+ }
+ if !fm && focus(key, val, 0) {
+ fm = true
+ }
+ }
+ }
+ for key, vals := range s.NumLabel {
+ for _, val := range vals {
+ if ignore != nil && ignore(key, "", val) {
+ im = true
+ }
+ if !fm && focus(key, "", val) {
+ fm = true
+ }
+ }
+ }
+ return fm, im
+}
diff --git a/libgo/go/runtime/pprof/internal/profile/legacy_profile.go b/libgo/go/runtime/pprof/internal/profile/legacy_profile.go
new file mode 100644
index 00000000000..d69f8deee7c
--- /dev/null
+++ b/libgo/go/runtime/pprof/internal/profile/legacy_profile.go
@@ -0,0 +1,1266 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements parsers to convert legacy profiles into the
+// profile.proto format.
+
+package profile
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ countStartRE = regexp.MustCompile(`\A(\w+) profile: total \d+\n\z`)
+ countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\n\z`)
+
+ heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`)
+ heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`)
+
+ contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`)
+
+ hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`)
+
+ growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz`)
+
+ fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz`)
+
+ threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`)
+ threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`)
+
+ procMapsRE = regexp.MustCompile(`([[:xdigit:]]+)-([[:xdigit:]]+)\s+([-rwxp]+)\s+([[:xdigit:]]+)\s+([[:xdigit:]]+):([[:xdigit:]]+)\s+([[:digit:]]+)\s*(\S+)?`)
+
+ briefMapsRE = regexp.MustCompile(`\s*([[:xdigit:]]+)-([[:xdigit:]]+):\s*(\S+)(\s.*@)?([[:xdigit:]]+)?`)
+
+ // LegacyHeapAllocated instructs the heapz parsers to use the
+ // allocated memory stats instead of the default in-use memory. Note
+ // that tcmalloc doesn't provide all allocated memory, only in-use
+ // stats.
+ LegacyHeapAllocated bool
+)
+
+func isSpaceOrComment(line string) bool {
+ trimmed := strings.TrimSpace(line)
+ return len(trimmed) == 0 || trimmed[0] == '#'
+}
+
+// parseGoCount parses a Go count profile (e.g., threadcreate or
+// goroutine) and returns a new Profile.
+func parseGoCount(b []byte) (*Profile, error) {
+ r := bytes.NewBuffer(b)
+
+ var line string
+ var err error
+ for {
+ // Skip past comments and empty lines seeking a real header.
+ line, err = r.ReadString('\n')
+ if err != nil {
+ return nil, err
+ }
+ if !isSpaceOrComment(line) {
+ break
+ }
+ }
+
+ m := countStartRE.FindStringSubmatch(line)
+ if m == nil {
+ return nil, errUnrecognized
+ }
+ profileType := m[1]
+ p := &Profile{
+ PeriodType: &ValueType{Type: profileType, Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{{Type: profileType, Unit: "count"}},
+ }
+ locations := make(map[uint64]*Location)
+ for {
+ line, err = r.ReadString('\n')
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, err
+ }
+ if isSpaceOrComment(line) {
+ continue
+ }
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ m := countRE.FindStringSubmatch(line)
+ if m == nil {
+ return nil, errMalformed
+ }
+ n, err := strconv.ParseInt(m[1], 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ fields := strings.Fields(m[2])
+ locs := make([]*Location, 0, len(fields))
+ for _, stk := range fields {
+ addr, err := strconv.ParseUint(stk, 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ // Adjust all frames by -1 to land on the call instruction.
+ addr--
+ loc := locations[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locations[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ locs = append(locs, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Location: locs,
+ Value: []int64{n},
+ })
+ }
+
+ if err = parseAdditionalSections(strings.TrimSpace(line), r, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// remapLocationIDs ensures there is a location for each address
+// referenced by a sample, and remaps the samples to point to the new
+// location ids.
+func (p *Profile) remapLocationIDs() {
+ seen := make(map[*Location]bool, len(p.Location))
+ var locs []*Location
+
+ for _, s := range p.Sample {
+ for _, l := range s.Location {
+ if seen[l] {
+ continue
+ }
+ l.ID = uint64(len(locs) + 1)
+ locs = append(locs, l)
+ seen[l] = true
+ }
+ }
+ p.Location = locs
+}
+
+func (p *Profile) remapFunctionIDs() {
+ seen := make(map[*Function]bool, len(p.Function))
+ var fns []*Function
+
+ for _, l := range p.Location {
+ for _, ln := range l.Line {
+ fn := ln.Function
+ if fn == nil || seen[fn] {
+ continue
+ }
+ fn.ID = uint64(len(fns) + 1)
+ fns = append(fns, fn)
+ seen[fn] = true
+ }
+ }
+ p.Function = fns
+}
+
+// remapMappingIDs matches location addresses with existing mappings
+// and updates them appropriately. This is O(N*M), if this ever shows
+// up as a bottleneck, evaluate sorting the mappings and doing a
+// binary search, which would make it O(N*log(M)).
+func (p *Profile) remapMappingIDs() {
+ if len(p.Mapping) == 0 {
+ return
+ }
+
+ // Some profile handlers will incorrectly set regions for the main
+ // executable if its section is remapped. Fix them through heuristics.
+
+ // Remove the initial mapping if named '/anon_hugepage' and has a
+ // consecutive adjacent mapping.
+ if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") {
+ if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start {
+ p.Mapping = p.Mapping[1:]
+ }
+ }
+
+ // Subtract the offset from the start of the main mapping if it
+ // ends up at a recognizable start address.
+ const expectedStart = 0x400000
+ if m := p.Mapping[0]; m.Start-m.Offset == expectedStart {
+ m.Start = expectedStart
+ m.Offset = 0
+ }
+
+ for _, l := range p.Location {
+ if a := l.Address; a != 0 {
+ for _, m := range p.Mapping {
+ if m.Start <= a && a < m.Limit {
+ l.Mapping = m
+ break
+ }
+ }
+ }
+ }
+
+ // Reset all mapping IDs.
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+}
+
+var cpuInts = []func([]byte) (uint64, []byte){
+ get32l,
+ get32b,
+ get64l,
+ get64b,
+}
+
+func get32l(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:]
+}
+
+func get32b(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:]
+}
+
+func get64l(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:]
+}
+
+func get64b(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:]
+}
+
+// ParseTracebacks parses a set of tracebacks and returns a newly
+// populated profile. It will accept any text file and generate a
+// Profile out of it with any hex addresses it can identify, including
+// a process map if it can recognize one. Each sample will include a
+// tag "source" with the addresses recognized in string format.
+func ParseTracebacks(b []byte) (*Profile, error) {
+ r := bytes.NewBuffer(b)
+
+ p := &Profile{
+ PeriodType: &ValueType{Type: "trace", Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{
+ {Type: "trace", Unit: "count"},
+ },
+ }
+
+ var sources []string
+ var sloc []*Location
+
+ locs := make(map[uint64]*Location)
+ for {
+ l, err := r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return nil, err
+ }
+ if l == "" {
+ break
+ }
+ }
+ if sectionTrigger(l) == memoryMapSection {
+ break
+ }
+ if s, addrs := extractHexAddresses(l); len(s) > 0 {
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ sources = append(sources, s...)
+ } else {
+ if len(sources) > 0 || len(sloc) > 0 {
+ addTracebackSample(sloc, sources, p)
+ sloc, sources = nil, nil
+ }
+ }
+ }
+
+ // Add final sample to save any leftover data.
+ if len(sources) > 0 || len(sloc) > 0 {
+ addTracebackSample(sloc, sources, p)
+ }
+
+ if err := p.ParseMemoryMap(r); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func addTracebackSample(l []*Location, s []string, p *Profile) {
+ p.Sample = append(p.Sample,
+ &Sample{
+ Value: []int64{1},
+ Location: l,
+ Label: map[string][]string{"source": s},
+ })
+}
+
+// parseCPU parses a profilez legacy profile and returns a newly
+// populated Profile.
+//
+// The general format for profilez samples is a sequence of words in
+// binary format. The first words are a header with the following data:
+// 1st word -- 0
+// 2nd word -- 3
+// 3rd word -- 0 if a c++ application, 1 if a java application.
+// 4th word -- Sampling period (in microseconds).
+// 5th word -- Padding.
+func parseCPU(b []byte) (*Profile, error) {
+ var parse func([]byte) (uint64, []byte)
+ var n1, n2, n3, n4, n5 uint64
+ for _, parse = range cpuInts {
+ var tmp []byte
+ n1, tmp = parse(b)
+ n2, tmp = parse(tmp)
+ n3, tmp = parse(tmp)
+ n4, tmp = parse(tmp)
+ n5, tmp = parse(tmp)
+
+ if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 {
+ b = tmp
+ return cpuProfile(b, int64(n4), parse)
+ }
+ }
+ return nil, errUnrecognized
+}
+
+// cpuProfile returns a new Profile from C++ profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+ p := &Profile{
+ Period: period * 1000,
+ PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+ SampleType: []*ValueType{
+ {Type: "samples", Unit: "count"},
+ {Type: "cpu", Unit: "nanoseconds"},
+ },
+ }
+ var err error
+ if b, _, err = parseCPUSamples(b, parse, true, p); err != nil {
+ return nil, err
+ }
+
+ // If all samples have the same second-to-the-bottom frame, it
+ // strongly suggests that it is an uninteresting artifact of
+ // measurement -- a stack frame pushed by the signal handler. The
+ // bottom frame is always correct as it is picked up from the signal
+ // structure, not the stack. Check if this is the case and if so,
+ // remove.
+ if len(p.Sample) > 1 && len(p.Sample[0].Location) > 1 {
+ allSame := true
+ id1 := p.Sample[0].Location[1].Address
+ for _, s := range p.Sample {
+ if len(s.Location) < 2 || id1 != s.Location[1].Address {
+ allSame = false
+ break
+ }
+ }
+ if allSame {
+ for _, s := range p.Sample {
+ s.Location = append(s.Location[:1], s.Location[2:]...)
+ }
+ }
+ }
+
+ if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// parseCPUSamples parses a collection of profilez samples from a
+// profile.
+//
+// profilez samples are a repeated sequence of stack frames of the
+// form:
+// 1st word -- The number of times this stack was encountered.
+// 2nd word -- The size of the stack (StackSize).
+// 3rd word -- The first address on the stack.
+// ...
+// StackSize + 2 -- The last address on the stack
+// The last stack trace is of the form:
+// 1st word -- 0
+// 2nd word -- 1
+// 3rd word -- 0
+//
+// Addresses from stack traces may point to the next instruction after
+// each call. Optionally adjust by -1 to land somewhere on the actual
+// call (except for the leaf, which is not a call).
+func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) {
+ locs := make(map[uint64]*Location)
+ for len(b) > 0 {
+ var count, nstk uint64
+ count, b = parse(b)
+ nstk, b = parse(b)
+ if b == nil || nstk > uint64(len(b)/4) {
+ return nil, nil, errUnrecognized
+ }
+ var sloc []*Location
+ addrs := make([]uint64, nstk)
+ for i := 0; i < int(nstk); i++ {
+ addrs[i], b = parse(b)
+ }
+
+ if count == 0 && nstk == 1 && addrs[0] == 0 {
+ // End of data marker
+ break
+ }
+ for i, addr := range addrs {
+ if adjust && i > 0 {
+ addr--
+ }
+ loc := locs[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locs[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample,
+ &Sample{
+ Value: []int64{int64(count), int64(count) * p.Period},
+ Location: sloc,
+ })
+ }
+ // Reached the end without finding the EOD marker.
+ return b, locs, nil
+}
+
+// parseHeap parses a heapz legacy or a growthz profile and
+// returns a newly populated Profile.
+func parseHeap(b []byte) (p *Profile, err error) {
+ r := bytes.NewBuffer(b)
+ l, err := r.ReadString('\n')
+ if err != nil {
+ return nil, errUnrecognized
+ }
+
+ sampling := ""
+
+ if header := heapHeaderRE.FindStringSubmatch(l); header != nil {
+ p = &Profile{
+ SampleType: []*ValueType{
+ {Type: "objects", Unit: "count"},
+ {Type: "space", Unit: "bytes"},
+ },
+ PeriodType: &ValueType{Type: "objects", Unit: "bytes"},
+ }
+
+ var period int64
+ if len(header[6]) > 0 {
+ if period, err = strconv.ParseInt(header[6], 10, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ }
+
+ switch header[5] {
+ case "heapz_v2", "heap_v2":
+ sampling, p.Period = "v2", period
+ case "heapprofile":
+ sampling, p.Period = "", 1
+ case "heap":
+ sampling, p.Period = "v2", period/2
+ default:
+ return nil, errUnrecognized
+ }
+ } else if header = growthHeaderRE.FindStringSubmatch(l); header != nil {
+ p = &Profile{
+ SampleType: []*ValueType{
+ {Type: "objects", Unit: "count"},
+ {Type: "space", Unit: "bytes"},
+ },
+ PeriodType: &ValueType{Type: "heapgrowth", Unit: "count"},
+ Period: 1,
+ }
+ } else if header = fragmentationHeaderRE.FindStringSubmatch(l); header != nil {
+ p = &Profile{
+ SampleType: []*ValueType{
+ {Type: "objects", Unit: "count"},
+ {Type: "space", Unit: "bytes"},
+ },
+ PeriodType: &ValueType{Type: "allocations", Unit: "count"},
+ Period: 1,
+ }
+ } else {
+ return nil, errUnrecognized
+ }
+
+ if LegacyHeapAllocated {
+ for _, st := range p.SampleType {
+ st.Type = "alloc_" + st.Type
+ }
+ } else {
+ for _, st := range p.SampleType {
+ st.Type = "inuse_" + st.Type
+ }
+ }
+
+ locs := make(map[uint64]*Location)
+ for {
+ l, err = r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return nil, err
+ }
+
+ if l == "" {
+ break
+ }
+ }
+
+ if isSpaceOrComment(l) {
+ continue
+ }
+ l = strings.TrimSpace(l)
+
+ if sectionTrigger(l) != unrecognizedSection {
+ break
+ }
+
+ value, blocksize, addrs, err := parseHeapSample(l, p.Period, sampling)
+ if err != nil {
+ return nil, err
+ }
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ NumLabel: map[string][]int64{"bytes": {blocksize}},
+ })
+ }
+
+ if err = parseAdditionalSections(l, r, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// parseHeapSample parses a single row from a heap profile into a new Sample.
+func parseHeapSample(line string, rate int64, sampling string) (value []int64, blocksize int64, addrs []uint64, err error) {
+ sampleData := heapSampleRE.FindStringSubmatch(line)
+ if len(sampleData) != 6 {
+ return value, blocksize, addrs, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData))
+ }
+
+ // Use first two values by default; tcmalloc sampling generates the
+ // same value for both, only the older heap-profile collect separate
+ // stats for in-use and allocated objects.
+ valueIndex := 1
+ if LegacyHeapAllocated {
+ valueIndex = 3
+ }
+
+ var v1, v2 int64
+ if v1, err = strconv.ParseInt(sampleData[valueIndex], 10, 64); err != nil {
+ return value, blocksize, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ if v2, err = strconv.ParseInt(sampleData[valueIndex+1], 10, 64); err != nil {
+ return value, blocksize, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ if v1 == 0 {
+ if v2 != 0 {
+ return value, blocksize, addrs, fmt.Errorf("allocation count was 0 but allocation bytes was %d", v2)
+ }
+ } else {
+ blocksize = v2 / v1
+ if sampling == "v2" {
+ v1, v2 = scaleHeapSample(v1, v2, rate)
+ }
+ }
+
+ value = []int64{v1, v2}
+ addrs = parseHexAddresses(sampleData[5])
+
+ return value, blocksize, addrs, nil
+}
+
+// extractHexAddresses extracts hex numbers from a string and returns
+// them, together with their numeric value, in a slice.
+func extractHexAddresses(s string) ([]string, []uint64) {
+ hexStrings := hexNumberRE.FindAllString(s, -1)
+ var ids []uint64
+ for _, s := range hexStrings {
+ if id, err := strconv.ParseUint(s, 0, 64); err == nil {
+ ids = append(ids, id)
+ } else {
+ // Do not expect any parsing failures due to the regexp matching.
+ panic("failed to parse hex value:" + s)
+ }
+ }
+ return hexStrings, ids
+}
+
+// parseHexAddresses parses hex numbers from a string and returns them
+// in a slice.
+func parseHexAddresses(s string) []uint64 {
+ _, ids := extractHexAddresses(s)
+ return ids
+}
+
+// scaleHeapSample adjusts the data from a heapz Sample to
+// account for its probability of appearing in the collected
+// data. heapz profiles are a sampling of the memory allocations
+// requests in a program. We estimate the unsampled value by dividing
+// each collected sample by its probability of appearing in the
+// profile. heapz v2 profiles rely on a poisson process to determine
+// which samples to collect, based on the desired average collection
+// rate R. The probability of a sample of size S to appear in that
+// profile is 1-exp(-S/R).
+func scaleHeapSample(count, size, rate int64) (int64, int64) {
+ if count == 0 || size == 0 {
+ return 0, 0
+ }
+
+ if rate <= 1 {
+ // if rate==1 all samples were collected so no adjustment is needed.
+ // if rate<1 treat as unknown and skip scaling.
+ return count, size
+ }
+
+ avgSize := float64(size) / float64(count)
+ scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
+
+ return int64(float64(count) * scale), int64(float64(size) * scale)
+}
+
+// parseContention parses a mutex or contention profile. There are 2 cases:
+// "--- contentionz " for legacy C++ profiles (and backwards compatibility)
+// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime.
+// This code converts the text output from runtime into a *Profile. (In the future
+// the runtime might write a serialized Profile directly making this unnecessary.)
+func parseContention(b []byte) (*Profile, error) {
+ r := bytes.NewBuffer(b)
+ var l string
+ var err error
+ for {
+ // Skip past comments and empty lines seeking a real header.
+ l, err = r.ReadString('\n')
+ if err != nil {
+ return nil, err
+ }
+ if !isSpaceOrComment(l) {
+ break
+ }
+ }
+
+ if strings.HasPrefix(l, "--- contentionz ") {
+ return parseCppContention(r)
+ } else if strings.HasPrefix(l, "--- mutex:") {
+ return parseCppContention(r)
+ } else if strings.HasPrefix(l, "--- contention:") {
+ return parseCppContention(r)
+ }
+ return nil, errUnrecognized
+}
+
+// parseCppContention parses the output from synchronization_profiling.cc
+// for backward compatibility, and the compatible (non-debug) block profile
+// output from the Go runtime.
+func parseCppContention(r *bytes.Buffer) (*Profile, error) {
+ p := &Profile{
+ PeriodType: &ValueType{Type: "contentions", Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{
+ {Type: "contentions", Unit: "count"},
+ {Type: "delay", Unit: "nanoseconds"},
+ },
+ }
+
+ var cpuHz int64
+ var l string
+ var err error
+ // Parse text of the form "attribute = value" before the samples.
+ const delimiter = "="
+ for {
+ l, err = r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return nil, err
+ }
+
+ if l == "" {
+ break
+ }
+ }
+ if isSpaceOrComment(l) {
+ continue
+ }
+
+ if l = strings.TrimSpace(l); l == "" {
+ continue
+ }
+
+ if strings.HasPrefix(l, "---") {
+ break
+ }
+
+ attr := strings.SplitN(l, delimiter, 2)
+ if len(attr) != 2 {
+ break
+ }
+ key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])
+ var err error
+ switch key {
+ case "cycles/second":
+ if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "sampling period":
+ if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "ms since reset":
+ ms, err := strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return nil, errUnrecognized
+ }
+ p.DurationNanos = ms * 1000 * 1000
+ case "format":
+ // CPP contentionz profiles don't have format.
+ return nil, errUnrecognized
+ case "resolution":
+ // CPP contentionz profiles don't have resolution.
+ return nil, errUnrecognized
+ case "discarded samples":
+ default:
+ return nil, errUnrecognized
+ }
+ }
+
+ locs := make(map[uint64]*Location)
+ for {
+ if !isSpaceOrComment(l) {
+ if l = strings.TrimSpace(l); strings.HasPrefix(l, "---") {
+ break
+ }
+ value, addrs, err := parseContentionSample(l, p.Period, cpuHz)
+ if err != nil {
+ return nil, err
+ }
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ })
+ }
+
+ if l, err = r.ReadString('\n'); err != nil {
+ if err != io.EOF {
+ return nil, err
+ }
+ if l == "" {
+ break
+ }
+ }
+ }
+
+ if err = parseAdditionalSections(l, r, p); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseContentionSample parses a single row from a contention profile
+// into a new Sample.
+func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) {
+ sampleData := contentionSampleRE.FindStringSubmatch(line)
+ if sampleData == nil {
+ return value, addrs, errUnrecognized
+ }
+
+ v1, err := strconv.ParseInt(sampleData[1], 10, 64)
+ if err != nil {
+ return value, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ v2, err := strconv.ParseInt(sampleData[2], 10, 64)
+ if err != nil {
+ return value, addrs, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ // Unsample values if period and cpuHz are available.
+ // - Delays are scaled to cycles and then to nanoseconds.
+ // - Contentions are scaled to cycles.
+ if period > 0 {
+ if cpuHz > 0 {
+ cpuGHz := float64(cpuHz) / 1e9
+ v1 = int64(float64(v1) * float64(period) / cpuGHz)
+ }
+ v2 = v2 * period
+ }
+
+ value = []int64{v2, v1}
+ addrs = parseHexAddresses(sampleData[3])
+
+ return value, addrs, nil
+}
+
+// parseThread parses a Threadz profile and returns a new Profile.
+func parseThread(b []byte) (*Profile, error) {
+ r := bytes.NewBuffer(b)
+
+ var line string
+ var err error
+ for {
+ // Skip past comments and empty lines seeking a real header.
+ line, err = r.ReadString('\n')
+ if err != nil {
+ return nil, err
+ }
+ if !isSpaceOrComment(line) {
+ break
+ }
+ }
+
+ if m := threadzStartRE.FindStringSubmatch(line); m != nil {
+ // Advance over initial comments until first stack trace.
+ for {
+ line, err = r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return nil, err
+ }
+
+ if line == "" {
+ break
+ }
+ }
+ if sectionTrigger(line) != unrecognizedSection || line[0] == '-' {
+ break
+ }
+ }
+ } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ SampleType: []*ValueType{{Type: "thread", Unit: "count"}},
+ PeriodType: &ValueType{Type: "thread", Unit: "count"},
+ Period: 1,
+ }
+
+ locs := make(map[uint64]*Location)
+ // Recognize each thread and populate profile samples.
+ for sectionTrigger(line) == unrecognizedSection {
+ if strings.HasPrefix(line, "---- no stack trace for") {
+ line = ""
+ break
+ }
+ if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ var addrs []uint64
+ line, addrs, err = parseThreadSample(r)
+ if err != nil {
+ return nil, errUnrecognized
+ }
+ if len(addrs) == 0 {
+ // We got a --same as previous threads--. Bump counters.
+ if len(p.Sample) > 0 {
+ s := p.Sample[len(p.Sample)-1]
+ s.Value[0]++
+ }
+ continue
+ }
+
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: []int64{1},
+ Location: sloc,
+ })
+ }
+
+ if err = parseAdditionalSections(line, r, p); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseThreadSample parses a symbolized or unsymbolized stack trace.
+// Returns the first line after the traceback, the sample (or nil if
+// it hits a 'same-as-previous' marker) and an error.
+func parseThreadSample(b *bytes.Buffer) (nextl string, addrs []uint64, err error) {
+ var l string
+ sameAsPrevious := false
+ for {
+ if l, err = b.ReadString('\n'); err != nil {
+ if err != io.EOF {
+ return "", nil, err
+ }
+ if l == "" {
+ break
+ }
+ }
+ if l = strings.TrimSpace(l); l == "" {
+ continue
+ }
+
+ if strings.HasPrefix(l, "---") {
+ break
+ }
+ if strings.Contains(l, "same as previous thread") {
+ sameAsPrevious = true
+ continue
+ }
+
+ addrs = append(addrs, parseHexAddresses(l)...)
+ }
+
+ if sameAsPrevious {
+ return l, nil, nil
+ }
+ return l, addrs, nil
+}
+
+// parseAdditionalSections parses any additional sections in the
+// profile, ignoring any unrecognized sections.
+func parseAdditionalSections(l string, b *bytes.Buffer, p *Profile) (err error) {
+ for {
+ if sectionTrigger(l) == memoryMapSection {
+ break
+ }
+ // Ignore any unrecognized sections.
+ if l, err := b.ReadString('\n'); err != nil {
+ if err != io.EOF {
+ return err
+ }
+ if l == "" {
+ break
+ }
+ }
+ }
+ return p.ParseMemoryMap(b)
+}
+
+// ParseMemoryMap parses a memory map in the format of
+// /proc/self/maps, and overrides the mappings in the current profile.
+// It renumbers the samples and locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMap(rd io.Reader) error {
+ b := bufio.NewReader(rd)
+
+ var attrs []string
+ var r *strings.Replacer
+ const delimiter = "="
+ for {
+ l, err := b.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ if l == "" {
+ break
+ }
+ }
+ if l = strings.TrimSpace(l); l == "" {
+ continue
+ }
+
+ if r != nil {
+ l = r.Replace(l)
+ }
+ m, err := parseMappingEntry(l)
+ if err != nil {
+ if err == errUnrecognized {
+ // Recognize assignments of the form: attr=value, and replace
+ // $attr with value on subsequent mappings.
+ if attr := strings.SplitN(l, delimiter, 2); len(attr) == 2 {
+ attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]))
+ r = strings.NewReplacer(attrs...)
+ }
+ // Ignore any unrecognized entries
+ continue
+ }
+ return err
+ }
+ if m == nil || (m.File == "" && len(p.Mapping) != 0) {
+ // In some cases the first entry may include the address range
+ // but not the name of the file. It should be followed by
+ // another entry with the name.
+ continue
+ }
+ if len(p.Mapping) == 1 && p.Mapping[0].File == "" {
+ // Update the name if this is the entry following that empty one.
+ p.Mapping[0].File = m.File
+ continue
+ }
+ p.Mapping = append(p.Mapping, m)
+ }
+ p.remapLocationIDs()
+ p.remapFunctionIDs()
+ p.remapMappingIDs()
+ return nil
+}
+
+func parseMappingEntry(l string) (*Mapping, error) {
+ mapping := &Mapping{}
+ var err error
+ if me := procMapsRE.FindStringSubmatch(l); len(me) == 9 {
+ if !strings.Contains(me[3], "x") {
+ // Skip non-executable entries.
+ return nil, nil
+ }
+ if mapping.Start, err = strconv.ParseUint(me[1], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if mapping.Limit, err = strconv.ParseUint(me[2], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if me[4] != "" {
+ if mapping.Offset, err = strconv.ParseUint(me[4], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ }
+ mapping.File = me[8]
+ return mapping, nil
+ }
+
+ if me := briefMapsRE.FindStringSubmatch(l); len(me) == 6 {
+ if mapping.Start, err = strconv.ParseUint(me[1], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if mapping.Limit, err = strconv.ParseUint(me[2], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ mapping.File = me[3]
+ if me[5] != "" {
+ if mapping.Offset, err = strconv.ParseUint(me[5], 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ }
+ return mapping, nil
+ }
+
+ return nil, errUnrecognized
+}
+
+type sectionType int
+
+const (
+ unrecognizedSection sectionType = iota
+ memoryMapSection
+)
+
+var memoryMapTriggers = []string{
+ "--- Memory map: ---",
+ "MAPPED_LIBRARIES:",
+}
+
+func sectionTrigger(line string) sectionType {
+ for _, trigger := range memoryMapTriggers {
+ if strings.Contains(line, trigger) {
+ return memoryMapSection
+ }
+ }
+ return unrecognizedSection
+}
+
+func (p *Profile) addLegacyFrameInfo() {
+ switch {
+ case isProfileType(p, heapzSampleTypes) ||
+ isProfileType(p, heapzInUseSampleTypes) ||
+ isProfileType(p, heapzAllocSampleTypes):
+ p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr
+ case isProfileType(p, contentionzSampleTypes):
+ p.DropFrames, p.KeepFrames = lockRxStr, ""
+ default:
+ p.DropFrames, p.KeepFrames = cpuProfilerRxStr, ""
+ }
+}
+
+var heapzSampleTypes = []string{"allocations", "size"} // early Go pprof profiles
+var heapzInUseSampleTypes = []string{"inuse_objects", "inuse_space"}
+var heapzAllocSampleTypes = []string{"alloc_objects", "alloc_space"}
+var contentionzSampleTypes = []string{"contentions", "delay"}
+
+func isProfileType(p *Profile, t []string) bool {
+ st := p.SampleType
+ if len(st) != len(t) {
+ return false
+ }
+
+ for i := range st {
+ if st[i].Type != t[i] {
+ return false
+ }
+ }
+ return true
+}
+
+var allocRxStr = strings.Join([]string{
+ // POSIX entry points.
+ `calloc`,
+ `cfree`,
+ `malloc`,
+ `free`,
+ `memalign`,
+ `do_memalign`,
+ `(__)?posix_memalign`,
+ `pvalloc`,
+ `valloc`,
+ `realloc`,
+
+ // TC malloc.
+ `tcmalloc::.*`,
+ `tc_calloc`,
+ `tc_cfree`,
+ `tc_malloc`,
+ `tc_free`,
+ `tc_memalign`,
+ `tc_posix_memalign`,
+ `tc_pvalloc`,
+ `tc_valloc`,
+ `tc_realloc`,
+ `tc_new`,
+ `tc_delete`,
+ `tc_newarray`,
+ `tc_deletearray`,
+ `tc_new_nothrow`,
+ `tc_newarray_nothrow`,
+
+ // Memory-allocation routines on OS X.
+ `malloc_zone_malloc`,
+ `malloc_zone_calloc`,
+ `malloc_zone_valloc`,
+ `malloc_zone_realloc`,
+ `malloc_zone_memalign`,
+ `malloc_zone_free`,
+
+ // Go runtime
+ `runtime\..*`,
+
+ // Other misc. memory allocation routines
+ `BaseArena::.*`,
+ `(::)?do_malloc_no_errno`,
+ `(::)?do_malloc_pages`,
+ `(::)?do_malloc`,
+ `DoSampledAllocation`,
+ `MallocedMemBlock::MallocedMemBlock`,
+ `_M_allocate`,
+ `__builtin_(vec_)?delete`,
+ `__builtin_(vec_)?new`,
+ `__gnu_cxx::new_allocator::allocate`,
+ `__libc_malloc`,
+ `__malloc_alloc_template::allocate`,
+ `allocate`,
+ `cpp_alloc`,
+ `operator new(\[\])?`,
+ `simple_alloc::allocate`,
+}, `|`)
+
+var allocSkipRxStr = strings.Join([]string{
+ // Preserve Go runtime frames that appear in the middle/bottom of
+ // the stack.
+ `runtime\.panic`,
+ `runtime\.reflectcall`,
+ `runtime\.call[0-9]*`,
+}, `|`)
+
+var cpuProfilerRxStr = strings.Join([]string{
+ `ProfileData::Add`,
+ `ProfileData::prof_handler`,
+ `CpuProfiler::prof_handler`,
+ `__pthread_sighandler`,
+ `__restore`,
+}, `|`)
+
+var lockRxStr = strings.Join([]string{
+ `RecordLockProfileData`,
+ `(base::)?RecordLockProfileData.*`,
+ `(base::)?SubmitMutexProfileData.*`,
+ `(base::)?SubmitSpinLockProfileData.*`,
+ `(Mutex::)?AwaitCommon.*`,
+ `(Mutex::)?Unlock.*`,
+ `(Mutex::)?UnlockSlow.*`,
+ `(Mutex::)?ReaderUnlock.*`,
+ `(MutexLock::)?~MutexLock.*`,
+ `(SpinLock::)?Unlock.*`,
+ `(SpinLock::)?SlowUnlock.*`,
+ `(SpinLockHolder::)?~SpinLockHolder.*`,
+}, `|`)
diff --git a/libgo/go/runtime/pprof/internal/profile/profile.go b/libgo/go/runtime/pprof/internal/profile/profile.go
new file mode 100644
index 00000000000..9b6a6f9aa9b
--- /dev/null
+++ b/libgo/go/runtime/pprof/internal/profile/profile.go
@@ -0,0 +1,575 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package profile provides a representation of profile.proto and
+// methods to encode/decode profiles in this format.
+//
+// This package is only for testing runtime/pprof.
+// It is not used by production Go programs.
+package profile
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "regexp"
+ "strings"
+ "time"
+)
+
+// Profile is an in-memory representation of profile.proto.
+type Profile struct {
+ SampleType []*ValueType
+ Sample []*Sample
+ Mapping []*Mapping
+ Location []*Location
+ Function []*Function
+
+ DropFrames string
+ KeepFrames string
+
+ TimeNanos int64
+ DurationNanos int64
+ PeriodType *ValueType
+ Period int64
+
+ dropFramesX int64
+ keepFramesX int64
+ stringTable []string
+}
+
+// ValueType corresponds to Profile.ValueType
+type ValueType struct {
+ Type string // cpu, wall, inuse_space, etc
+ Unit string // seconds, nanoseconds, bytes, etc
+
+ typeX int64
+ unitX int64
+}
+
+// Sample corresponds to Profile.Sample
+type Sample struct {
+ Location []*Location
+ Value []int64
+ Label map[string][]string
+ NumLabel map[string][]int64
+
+ locationIDX []uint64
+ labelX []Label
+}
+
+// Label corresponds to Profile.Label
+type Label struct {
+ keyX int64
+ // Exactly one of the two following values must be set
+ strX int64
+ numX int64 // Integer value for this label
+}
+
+// Mapping corresponds to Profile.Mapping
+type Mapping struct {
+ ID uint64
+ Start uint64
+ Limit uint64
+ Offset uint64
+ File string
+ BuildID string
+ HasFunctions bool
+ HasFilenames bool
+ HasLineNumbers bool
+ HasInlineFrames bool
+
+ fileX int64
+ buildIDX int64
+}
+
+// Location corresponds to Profile.Location
+type Location struct {
+ ID uint64
+ Mapping *Mapping
+ Address uint64
+ Line []Line
+
+ mappingIDX uint64
+}
+
+// Line corresponds to Profile.Line
+type Line struct {
+ Function *Function
+ Line int64
+
+ functionIDX uint64
+}
+
+// Function corresponds to Profile.Function
+type Function struct {
+ ID uint64
+ Name string
+ SystemName string
+ Filename string
+ StartLine int64
+
+ nameX int64
+ systemNameX int64
+ filenameX int64
+}
+
+// Parse parses a profile and checks for its validity. The input
+// may be a gzip-compressed encoded protobuf or one of many legacy
+// profile formats which may be unsupported in the future.
+func Parse(r io.Reader) (*Profile, error) {
+ orig, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ var p *Profile
+ if len(orig) >= 2 && orig[0] == 0x1f && orig[1] == 0x8b {
+ gz, err := gzip.NewReader(bytes.NewBuffer(orig))
+ if err != nil {
+ return nil, fmt.Errorf("decompressing profile: %v", err)
+ }
+ data, err := ioutil.ReadAll(gz)
+ if err != nil {
+ return nil, fmt.Errorf("decompressing profile: %v", err)
+ }
+ orig = data
+ }
+ if p, err = parseUncompressed(orig); err != nil {
+ if p, err = parseLegacy(orig); err != nil {
+ return nil, fmt.Errorf("parsing profile: %v", err)
+ }
+ }
+
+ if err := p.CheckValid(); err != nil {
+ return nil, fmt.Errorf("malformed profile: %v", err)
+ }
+ return p, nil
+}
+
+var errUnrecognized = fmt.Errorf("unrecognized profile format")
+var errMalformed = fmt.Errorf("malformed profile format")
+
+func parseLegacy(data []byte) (*Profile, error) {
+ parsers := []func([]byte) (*Profile, error){
+ parseCPU,
+ parseHeap,
+ parseGoCount, // goroutine, threadcreate
+ parseThread,
+ parseContention,
+ }
+
+ for _, parser := range parsers {
+ p, err := parser(data)
+ if err == nil {
+ p.setMain()
+ p.addLegacyFrameInfo()
+ return p, nil
+ }
+ if err != errUnrecognized {
+ return nil, err
+ }
+ }
+ return nil, errUnrecognized
+}
+
+func parseUncompressed(data []byte) (*Profile, error) {
+ p := &Profile{}
+ if err := unmarshal(data, p); err != nil {
+ return nil, err
+ }
+
+ if err := p.postDecode(); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
+
+// setMain scans Mapping entries and guesses which entry is main
+// because legacy profiles don't obey the convention of putting main
+// first.
+func (p *Profile) setMain() {
+ for i := 0; i < len(p.Mapping); i++ {
+ file := strings.TrimSpace(strings.Replace(p.Mapping[i].File, "(deleted)", "", -1))
+ if len(file) == 0 {
+ continue
+ }
+ if len(libRx.FindStringSubmatch(file)) > 0 {
+ continue
+ }
+ if strings.HasPrefix(file, "[") {
+ continue
+ }
+ // Swap what we guess is main to position 0.
+ tmp := p.Mapping[i]
+ p.Mapping[i] = p.Mapping[0]
+ p.Mapping[0] = tmp
+ break
+ }
+}
+
+// Write writes the profile as a gzip-compressed marshaled protobuf.
+func (p *Profile) Write(w io.Writer) error {
+ p.preEncode()
+ b := marshal(p)
+ zw := gzip.NewWriter(w)
+ defer zw.Close()
+ _, err := zw.Write(b)
+ return err
+}
+
+// CheckValid tests whether the profile is valid. Checks include, but are
+// not limited to:
+// - len(Profile.Sample[n].value) == len(Profile.value_unit)
+// - Sample.id has a corresponding Profile.Location
+func (p *Profile) CheckValid() error {
+ // Check that sample values are consistent
+ sampleLen := len(p.SampleType)
+ if sampleLen == 0 && len(p.Sample) != 0 {
+ return fmt.Errorf("missing sample type information")
+ }
+ for _, s := range p.Sample {
+ if len(s.Value) != sampleLen {
+ return fmt.Errorf("mismatch: sample has: %d values vs. %d types", len(s.Value), len(p.SampleType))
+ }
+ }
+
+ // Check that all mappings/locations/functions are in the tables
+ // Check that there are no duplicate ids
+ mappings := make(map[uint64]*Mapping, len(p.Mapping))
+ for _, m := range p.Mapping {
+ if m.ID == 0 {
+ return fmt.Errorf("found mapping with reserved ID=0")
+ }
+ if mappings[m.ID] != nil {
+ return fmt.Errorf("multiple mappings with same id: %d", m.ID)
+ }
+ mappings[m.ID] = m
+ }
+ functions := make(map[uint64]*Function, len(p.Function))
+ for _, f := range p.Function {
+ if f.ID == 0 {
+ return fmt.Errorf("found function with reserved ID=0")
+ }
+ if functions[f.ID] != nil {
+ return fmt.Errorf("multiple functions with same id: %d", f.ID)
+ }
+ functions[f.ID] = f
+ }
+ locations := make(map[uint64]*Location, len(p.Location))
+ for _, l := range p.Location {
+ if l.ID == 0 {
+ return fmt.Errorf("found location with reserved id=0")
+ }
+ if locations[l.ID] != nil {
+ return fmt.Errorf("multiple locations with same id: %d", l.ID)
+ }
+ locations[l.ID] = l
+ if m := l.Mapping; m != nil {
+ if m.ID == 0 || mappings[m.ID] != m {
+ return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
+ }
+ }
+ for _, ln := range l.Line {
+ if f := ln.Function; f != nil {
+ if f.ID == 0 || functions[f.ID] != f {
+ return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Aggregate merges the locations in the profile into equivalence
+// classes preserving the request attributes. It also updates the
+// samples to point to the merged locations.
+func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
+ for _, m := range p.Mapping {
+ m.HasInlineFrames = m.HasInlineFrames && inlineFrame
+ m.HasFunctions = m.HasFunctions && function
+ m.HasFilenames = m.HasFilenames && filename
+ m.HasLineNumbers = m.HasLineNumbers && linenumber
+ }
+
+ // Aggregate functions
+ if !function || !filename {
+ for _, f := range p.Function {
+ if !function {
+ f.Name = ""
+ f.SystemName = ""
+ }
+ if !filename {
+ f.Filename = ""
+ }
+ }
+ }
+
+ // Aggregate locations
+ if !inlineFrame || !address || !linenumber {
+ for _, l := range p.Location {
+ if !inlineFrame && len(l.Line) > 1 {
+ l.Line = l.Line[len(l.Line)-1:]
+ }
+ if !linenumber {
+ for i := range l.Line {
+ l.Line[i].Line = 0
+ }
+ }
+ if !address {
+ l.Address = 0
+ }
+ }
+ }
+
+ return p.CheckValid()
+}
+
+// Print dumps a text representation of a profile. Intended mainly
+// for debugging purposes.
+func (p *Profile) String() string {
+
+ ss := make([]string, 0, len(p.Sample)+len(p.Mapping)+len(p.Location))
+ if pt := p.PeriodType; pt != nil {
+ ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
+ }
+ ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
+ if p.TimeNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
+ }
+ if p.DurationNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Duration: %v", time.Duration(p.DurationNanos)))
+ }
+
+ ss = append(ss, "Samples:")
+ var sh1 string
+ for _, s := range p.SampleType {
+ sh1 = sh1 + fmt.Sprintf("%s/%s ", s.Type, s.Unit)
+ }
+ ss = append(ss, strings.TrimSpace(sh1))
+ for _, s := range p.Sample {
+ var sv string
+ for _, v := range s.Value {
+ sv = fmt.Sprintf("%s %10d", sv, v)
+ }
+ sv = sv + ": "
+ for _, l := range s.Location {
+ sv = sv + fmt.Sprintf("%d ", l.ID)
+ }
+ ss = append(ss, sv)
+ const labelHeader = " "
+ if len(s.Label) > 0 {
+ ls := labelHeader
+ for k, v := range s.Label {
+ ls = ls + fmt.Sprintf("%s:%v ", k, v)
+ }
+ ss = append(ss, ls)
+ }
+ if len(s.NumLabel) > 0 {
+ ls := labelHeader
+ for k, v := range s.NumLabel {
+ ls = ls + fmt.Sprintf("%s:%v ", k, v)
+ }
+ ss = append(ss, ls)
+ }
+ }
+
+ ss = append(ss, "Locations")
+ for _, l := range p.Location {
+ locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
+ if m := l.Mapping; m != nil {
+ locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
+ }
+ if len(l.Line) == 0 {
+ ss = append(ss, locStr)
+ }
+ for li := range l.Line {
+ lnStr := "??"
+ if fn := l.Line[li].Function; fn != nil {
+ lnStr = fmt.Sprintf("%s %s:%d s=%d",
+ fn.Name,
+ fn.Filename,
+ l.Line[li].Line,
+ fn.StartLine)
+ if fn.Name != fn.SystemName {
+ lnStr = lnStr + "(" + fn.SystemName + ")"
+ }
+ }
+ ss = append(ss, locStr+lnStr)
+ // Do not print location details past the first line
+ locStr = " "
+ }
+ }
+
+ ss = append(ss, "Mappings")
+ for _, m := range p.Mapping {
+ bits := ""
+ if m.HasFunctions {
+ bits = bits + "[FN]"
+ }
+ if m.HasFilenames {
+ bits = bits + "[FL]"
+ }
+ if m.HasLineNumbers {
+ bits = bits + "[LN]"
+ }
+ if m.HasInlineFrames {
+ bits = bits + "[IN]"
+ }
+ ss = append(ss, fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
+ m.ID,
+ m.Start, m.Limit, m.Offset,
+ m.File,
+ m.BuildID,
+ bits))
+ }
+
+ return strings.Join(ss, "\n") + "\n"
+}
+
+// Merge adds profile p adjusted by ratio r into profile p. Profiles
+// must be compatible (same Type and SampleType).
+// TODO(rsilvera): consider normalizing the profiles based on the
+// total samples collected.
+func (p *Profile) Merge(pb *Profile, r float64) error {
+ if err := p.Compatible(pb); err != nil {
+ return err
+ }
+
+ pb = pb.Copy()
+
+ // Keep the largest of the two periods.
+ if pb.Period > p.Period {
+ p.Period = pb.Period
+ }
+
+ p.DurationNanos += pb.DurationNanos
+
+ p.Mapping = append(p.Mapping, pb.Mapping...)
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+ p.Location = append(p.Location, pb.Location...)
+ for i, l := range p.Location {
+ l.ID = uint64(i + 1)
+ }
+ p.Function = append(p.Function, pb.Function...)
+ for i, f := range p.Function {
+ f.ID = uint64(i + 1)
+ }
+
+ if r != 1.0 {
+ for _, s := range pb.Sample {
+ for i, v := range s.Value {
+ s.Value[i] = int64((float64(v) * r))
+ }
+ }
+ }
+ p.Sample = append(p.Sample, pb.Sample...)
+ return p.CheckValid()
+}
+
+// Compatible determines if two profiles can be compared/merged.
+// returns nil if the profiles are compatible; otherwise an error with
+// details on the incompatibility.
+func (p *Profile) Compatible(pb *Profile) error {
+ if !compatibleValueTypes(p.PeriodType, pb.PeriodType) {
+ return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
+ }
+
+ if len(p.SampleType) != len(pb.SampleType) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+
+ for i := range p.SampleType {
+ if !compatibleValueTypes(p.SampleType[i], pb.SampleType[i]) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+ }
+
+ return nil
+}
+
+// HasFunctions determines if all locations in this profile have
+// symbolized function information.
+func (p *Profile) HasFunctions() bool {
+ for _, l := range p.Location {
+ if l.Mapping == nil || !l.Mapping.HasFunctions {
+ return false
+ }
+ }
+ return true
+}
+
+// HasFileLines determines if all locations in this profile have
+// symbolized file and line number information.
+func (p *Profile) HasFileLines() bool {
+ for _, l := range p.Location {
+ if l.Mapping == nil || (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
+ return false
+ }
+ }
+ return true
+}
+
+func compatibleValueTypes(v1, v2 *ValueType) bool {
+ if v1 == nil || v2 == nil {
+ return true // No grounds to disqualify.
+ }
+ return v1.Type == v2.Type && v1.Unit == v2.Unit
+}
+
+// Copy makes a fully independent copy of a profile.
+func (p *Profile) Copy() *Profile {
+ p.preEncode()
+ b := marshal(p)
+
+ pp := &Profile{}
+ if err := unmarshal(b, pp); err != nil {
+ panic(err)
+ }
+ if err := pp.postDecode(); err != nil {
+ panic(err)
+ }
+
+ return pp
+}
+
+// Demangler maps symbol names to a human-readable form. This may
+// include C++ demangling and additional simplification. Names that
+// are not demangled may be missing from the resulting map.
+type Demangler func(name []string) (map[string]string, error)
+
+// Demangle attempts to demangle and optionally simplify any function
+// names referenced in the profile. It works on a best-effort basis:
+// it will silently preserve the original names in case of any errors.
+func (p *Profile) Demangle(d Demangler) error {
+ // Collect names to demangle.
+ var names []string
+ for _, fn := range p.Function {
+ names = append(names, fn.SystemName)
+ }
+
+ // Update profile with demangled names.
+ demangled, err := d(names)
+ if err != nil {
+ return err
+ }
+ for _, fn := range p.Function {
+ if dd, ok := demangled[fn.SystemName]; ok {
+ fn.Name = dd
+ }
+ }
+ return nil
+}
+
+// Empty returns true if the profile contains no samples.
+func (p *Profile) Empty() bool {
+ return len(p.Sample) == 0
+}
diff --git a/libgo/go/runtime/pprof/internal/profile/profile_test.go b/libgo/go/runtime/pprof/internal/profile/profile_test.go
new file mode 100644
index 00000000000..e1963f33515
--- /dev/null
+++ b/libgo/go/runtime/pprof/internal/profile/profile_test.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package profile
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestEmptyProfile(t *testing.T) {
+ var buf bytes.Buffer
+ p, err := Parse(&buf)
+ if err != nil {
+ t.Error("Want no error, got", err)
+ }
+ if p == nil {
+ t.Fatal("Want a valid profile, got ")
+ }
+ if !p.Empty() {
+ t.Errorf("Profile should be empty, got %#v", p)
+ }
+}
+
+func TestParseContention(t *testing.T) {
+ tests := []struct {
+ name string
+ in string
+ wantErr bool
+ }{
+ {
+ name: "valid",
+ in: `--- mutex:
+cycles/second=3491920901
+sampling period=1
+43227965305 1659640 @ 0x45e851 0x45f764 0x4a2be1 0x44ea31
+34035731690 15760 @ 0x45e851 0x45f764 0x4a2b17 0x44ea31
+`,
+ },
+ {
+ name: "valid with comment",
+ in: `--- mutex:
+cycles/second=3491920901
+sampling period=1
+43227965305 1659640 @ 0x45e851 0x45f764 0x4a2be1 0x44ea31
+# 0x45e850 sync.(*Mutex).Unlock+0x80 /go/src/sync/mutex.go:126
+# 0x45f763 sync.(*RWMutex).Unlock+0x83 /go/src/sync/rwmutex.go:125
+# 0x4a2be0 main.main.func3+0x70 /go/src/internal/pprof/profile/a_binary.go:58
+
+34035731690 15760 @ 0x45e851 0x45f764 0x4a2b17 0x44ea31
+# 0x45e850 sync.(*Mutex).Unlock+0x80 /go/src/sync/mutex.go:126
+# 0x45f763 sync.(*RWMutex).Unlock+0x83 /go/src/sync/rwmutex.go:125
+# 0x4a2b16 main.main.func2+0xd6 /go/src/internal/pprof/profile/a_binary.go:48
+`,
+ },
+ {
+ name: "empty",
+ in: `--- mutex:`,
+ wantErr: true,
+ },
+ {
+ name: "invalid header",
+ in: `--- channel:
+43227965305 1659640 @ 0x45e851 0x45f764 0x4a2be1 0x44ea31`,
+ wantErr: true,
+ },
+ }
+ for _, tc := range tests {
+ _, err := parseContention([]byte(tc.in))
+ if tc.wantErr && err == nil {
+ t.Errorf("parseContention(%q) succeeded unexpectedly", tc.name)
+ }
+ if !tc.wantErr && err != nil {
+ t.Errorf("parseContention(%q) failed unexpectedly: %v", tc.name, err)
+ }
+ }
+
+}
diff --git a/libgo/go/runtime/pprof/internal/profile/proto.go b/libgo/go/runtime/pprof/internal/profile/proto.go
new file mode 100644
index 00000000000..11d7f9ff9b3
--- /dev/null
+++ b/libgo/go/runtime/pprof/internal/profile/proto.go
@@ -0,0 +1,360 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a simple protocol buffer encoder and decoder.
+//
+// A protocol message must implement the message interface:
+// decoder() []decoder
+// encode(*buffer)
+//
+// The decode method returns a slice indexed by field number that gives the
+// function to decode that field.
+// The encode method encodes its receiver into the given buffer.
+//
+// The two methods are simple enough to be implemented by hand rather than
+// by using a protocol compiler.
+//
+// See profile.go for examples of messages implementing this interface.
+//
+// There is no support for groups, message sets, or "has" bits.
+
+package profile
+
+import "errors"
+
+type buffer struct {
+ field int
+ typ int
+ u64 uint64
+ data []byte
+ tmp [16]byte
+}
+
+type decoder func(*buffer, message) error
+
+type message interface {
+ decoder() []decoder
+ encode(*buffer)
+}
+
+func marshal(m message) []byte {
+ var b buffer
+ m.encode(&b)
+ return b.data
+}
+
+func encodeVarint(b *buffer, x uint64) {
+ for x >= 128 {
+ b.data = append(b.data, byte(x)|0x80)
+ x >>= 7
+ }
+ b.data = append(b.data, byte(x))
+}
+
+func encodeLength(b *buffer, tag int, len int) {
+ encodeVarint(b, uint64(tag)<<3|2)
+ encodeVarint(b, uint64(len))
+}
+
+func encodeUint64(b *buffer, tag int, x uint64) {
+ // append varint to b.data
+ encodeVarint(b, uint64(tag)<<3|0)
+ encodeVarint(b, x)
+}
+
+func encodeUint64s(b *buffer, tag int, x []uint64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, u)
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeUint64(b, tag, u)
+ }
+}
+
+func encodeUint64Opt(b *buffer, tag int, x uint64) {
+ if x == 0 {
+ return
+ }
+ encodeUint64(b, tag, x)
+}
+
+func encodeInt64(b *buffer, tag int, x int64) {
+ u := uint64(x)
+ encodeUint64(b, tag, u)
+}
+
+func encodeInt64Opt(b *buffer, tag int, x int64) {
+ if x == 0 {
+ return
+ }
+ encodeInt64(b, tag, x)
+}
+
+func encodeInt64s(b *buffer, tag int, x []int64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, uint64(u))
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeInt64(b, tag, u)
+ }
+}
+
+func encodeString(b *buffer, tag int, x string) {
+ encodeLength(b, tag, len(x))
+ b.data = append(b.data, x...)
+}
+
+func encodeStrings(b *buffer, tag int, x []string) {
+ for _, s := range x {
+ encodeString(b, tag, s)
+ }
+}
+
+func encodeStringOpt(b *buffer, tag int, x string) {
+ if x == "" {
+ return
+ }
+ encodeString(b, tag, x)
+}
+
+func encodeBool(b *buffer, tag int, x bool) {
+ if x {
+ encodeUint64(b, tag, 1)
+ } else {
+ encodeUint64(b, tag, 0)
+ }
+}
+
+func encodeBoolOpt(b *buffer, tag int, x bool) {
+ if x == false {
+ return
+ }
+ encodeBool(b, tag, x)
+}
+
+func encodeMessage(b *buffer, tag int, m message) {
+ n1 := len(b.data)
+ m.encode(b)
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+}
+
+func unmarshal(data []byte, m message) (err error) {
+ b := buffer{data: data, typ: 2}
+ return decodeMessage(&b, m)
+}
+
+func le64(p []byte) uint64 {
+ return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
+}
+
+func le32(p []byte) uint32 {
+ return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+}
+
+func decodeVarint(data []byte) (uint64, []byte, error) {
+ var i int
+ var u uint64
+ for i = 0; ; i++ {
+ if i >= 10 || i >= len(data) {
+ return 0, nil, errors.New("bad varint")
+ }
+ u |= uint64(data[i]&0x7F) << uint(7*i)
+ if data[i]&0x80 == 0 {
+ return u, data[i+1:], nil
+ }
+ }
+}
+
+func decodeField(b *buffer, data []byte) ([]byte, error) {
+ x, data, err := decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ b.field = int(x >> 3)
+ b.typ = int(x & 7)
+ b.data = nil
+ b.u64 = 0
+ switch b.typ {
+ case 0:
+ b.u64, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ case 1:
+ if len(data) < 8 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = le64(data[:8])
+ data = data[8:]
+ case 2:
+ var n uint64
+ n, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ if n > uint64(len(data)) {
+ return nil, errors.New("too much data")
+ }
+ b.data = data[:n]
+ data = data[n:]
+ case 5:
+ if len(data) < 4 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = uint64(le32(data[:4]))
+ data = data[4:]
+ default:
+ return nil, errors.New("unknown type: " + string(b.typ))
+ }
+
+ return data, nil
+}
+
+func checkType(b *buffer, typ int) error {
+ if b.typ != typ {
+ return errors.New("type mismatch")
+ }
+ return nil
+}
+
+func decodeMessage(b *buffer, m message) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ dec := m.decoder()
+ data := b.data
+ for len(data) > 0 {
+ // pull varint field# + type
+ var err error
+ data, err = decodeField(b, data)
+ if err != nil {
+ return err
+ }
+ if b.field >= len(dec) || dec[b.field] == nil {
+ continue
+ }
+ if err := dec[b.field](b, m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func decodeInt64(b *buffer, x *int64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = int64(b.u64)
+ return nil
+}
+
+func decodeInt64s(b *buffer, x *[]int64) error {
+ if b.typ == 2 {
+ // Packed encoding
+ data := b.data
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, int64(u))
+ }
+ return nil
+ }
+ var i int64
+ if err := decodeInt64(b, &i); err != nil {
+ return err
+ }
+ *x = append(*x, i)
+ return nil
+}
+
+func decodeUint64(b *buffer, x *uint64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = b.u64
+ return nil
+}
+
+func decodeUint64s(b *buffer, x *[]uint64) error {
+ if b.typ == 2 {
+ data := b.data
+ // Packed encoding
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ }
+ return nil
+ }
+ var u uint64
+ if err := decodeUint64(b, &u); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ return nil
+}
+
+func decodeString(b *buffer, x *string) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ *x = string(b.data)
+ return nil
+}
+
+func decodeStrings(b *buffer, x *[]string) error {
+ var s string
+ if err := decodeString(b, &s); err != nil {
+ return err
+ }
+ *x = append(*x, s)
+ return nil
+}
+
+func decodeBool(b *buffer, x *bool) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ if int64(b.u64) == 0 {
+ *x = false
+ } else {
+ *x = true
+ }
+ return nil
+}
diff --git a/libgo/go/runtime/pprof/internal/profile/proto_test.go b/libgo/go/runtime/pprof/internal/profile/proto_test.go
new file mode 100644
index 00000000000..c2613fc375a
--- /dev/null
+++ b/libgo/go/runtime/pprof/internal/profile/proto_test.go
@@ -0,0 +1,67 @@
+package profile
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestPackedEncoding(t *testing.T) {
+
+ type testcase struct {
+ uint64s []uint64
+ int64s []int64
+ encoded []byte
+ }
+ for i, tc := range []testcase{
+ {
+ []uint64{0, 1, 10, 100, 1000, 10000},
+ []int64{1000, 0, 1000},
+ []byte{10, 8, 0, 1, 10, 100, 232, 7, 144, 78, 18, 5, 232, 7, 0, 232, 7},
+ },
+ {
+ []uint64{10000},
+ nil,
+ []byte{8, 144, 78},
+ },
+ {
+ nil,
+ []int64{-10000},
+ []byte{16, 240, 177, 255, 255, 255, 255, 255, 255, 255, 1},
+ },
+ } {
+ source := &packedInts{tc.uint64s, tc.int64s}
+ if got, want := marshal(source), tc.encoded; !reflect.DeepEqual(got, want) {
+ t.Errorf("failed encode %d, got %v, want %v", i, got, want)
+ }
+
+ dest := new(packedInts)
+ if err := unmarshal(tc.encoded, dest); err != nil {
+ t.Errorf("failed decode %d: %v", i, err)
+ continue
+ }
+ if got, want := dest.uint64s, tc.uint64s; !reflect.DeepEqual(got, want) {
+ t.Errorf("failed decode uint64s %d, got %v, want %v", i, got, want)
+ }
+ if got, want := dest.int64s, tc.int64s; !reflect.DeepEqual(got, want) {
+ t.Errorf("failed decode int64s %d, got %v, want %v", i, got, want)
+ }
+ }
+}
+
+type packedInts struct {
+ uint64s []uint64
+ int64s []int64
+}
+
+func (u *packedInts) decoder() []decoder {
+ return []decoder{
+ nil,
+ func(b *buffer, m message) error { return decodeUint64s(b, &m.(*packedInts).uint64s) },
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*packedInts).int64s) },
+ }
+}
+
+func (u *packedInts) encode(b *buffer) {
+ encodeUint64s(b, 1, u.uint64s)
+ encodeInt64s(b, 2, u.int64s)
+}
diff --git a/libgo/go/runtime/pprof/internal/profile/prune.go b/libgo/go/runtime/pprof/internal/profile/prune.go
new file mode 100644
index 00000000000..1924fada7a5
--- /dev/null
+++ b/libgo/go/runtime/pprof/internal/profile/prune.go
@@ -0,0 +1,97 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Implements methods to remove frames from profiles.
+
+package profile
+
+import (
+ "fmt"
+ "regexp"
+)
+
+// Prune removes all nodes beneath a node matching dropRx, and not
+// matching keepRx. If the root node of a Sample matches, the sample
+// will have an empty stack.
+func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
+ prune := make(map[uint64]bool)
+ pruneBeneath := make(map[uint64]bool)
+
+ for _, loc := range p.Location {
+ var i int
+ for i = len(loc.Line) - 1; i >= 0; i-- {
+ if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+ funcName := fn.Name
+ // Account for leading '.' on the PPC ELF v1 ABI.
+ if funcName[0] == '.' {
+ funcName = funcName[1:]
+ }
+ if dropRx.MatchString(funcName) {
+ if keepRx == nil || !keepRx.MatchString(funcName) {
+ break
+ }
+ }
+ }
+ }
+
+ if i >= 0 {
+ // Found matching entry to prune.
+ pruneBeneath[loc.ID] = true
+
+ // Remove the matching location.
+ if i == len(loc.Line)-1 {
+ // Matched the top entry: prune the whole location.
+ prune[loc.ID] = true
+ } else {
+ loc.Line = loc.Line[i+1:]
+ }
+ }
+ }
+
+ // Prune locs from each Sample
+ for _, sample := range p.Sample {
+ // Scan from the root to the leaves to find the prune location.
+ // Do not prune frames before the first user frame, to avoid
+ // pruning everything.
+ foundUser := false
+ for i := len(sample.Location) - 1; i >= 0; i-- {
+ id := sample.Location[i].ID
+ if !prune[id] && !pruneBeneath[id] {
+ foundUser = true
+ continue
+ }
+ if !foundUser {
+ continue
+ }
+ if prune[id] {
+ sample.Location = sample.Location[i+1:]
+ break
+ }
+ if pruneBeneath[id] {
+ sample.Location = sample.Location[i:]
+ break
+ }
+ }
+ }
+}
+
+// RemoveUninteresting prunes and elides profiles using built-in
+// tables of uninteresting function names.
+func (p *Profile) RemoveUninteresting() error {
+ var keep, drop *regexp.Regexp
+ var err error
+
+ if p.DropFrames != "" {
+ if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
+ }
+ if p.KeepFrames != "" {
+ if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
+ }
+ }
+ p.Prune(drop, keep)
+ }
+ return nil
+}
diff --git a/libgo/go/runtime/pprof/internal/protopprof/protomemprofile.go b/libgo/go/runtime/pprof/internal/protopprof/protomemprofile.go
deleted file mode 100644
index c2ab5b57025..00000000000
--- a/libgo/go/runtime/pprof/internal/protopprof/protomemprofile.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protopprof
-
-import (
- "internal/pprof/profile"
- "math"
- "runtime"
- "time"
-)
-
-// EncodeMemProfile converts MemProfileRecords to a Profile.
-func EncodeMemProfile(mr []runtime.MemProfileRecord, rate int64, t time.Time) *profile.Profile {
- p := &profile.Profile{
- Period: rate,
- PeriodType: &profile.ValueType{Type: "space", Unit: "bytes"},
- SampleType: []*profile.ValueType{
- {Type: "alloc_objects", Unit: "count"},
- {Type: "alloc_space", Unit: "bytes"},
- {Type: "inuse_objects", Unit: "count"},
- {Type: "inuse_space", Unit: "bytes"},
- },
- TimeNanos: int64(t.UnixNano()),
- }
-
- locs := make(map[uintptr]*profile.Location)
- for _, r := range mr {
- stack := r.Stack()
- sloc := make([]*profile.Location, len(stack))
- for i, addr := range stack {
- loc := locs[addr]
- if loc == nil {
- loc = &profile.Location{
- ID: uint64(len(p.Location) + 1),
- Address: uint64(addr),
- }
- locs[addr] = loc
- p.Location = append(p.Location, loc)
- }
- sloc[i] = loc
- }
-
- ao, ab := scaleHeapSample(r.AllocObjects, r.AllocBytes, rate)
- uo, ub := scaleHeapSample(r.InUseObjects(), r.InUseBytes(), rate)
-
- p.Sample = append(p.Sample, &profile.Sample{
- Value: []int64{ao, ab, uo, ub},
- Location: sloc,
- })
- }
- if runtime.GOOS == "linux" {
- addMappings(p)
- }
- return p
-}
-
-// scaleHeapSample adjusts the data from a heap Sample to
-// account for its probability of appearing in the collected
-// data. heap profiles are a sampling of the memory allocations
-// requests in a program. We estimate the unsampled value by dividing
-// each collected sample by its probability of appearing in the
-// profile. heap profiles rely on a poisson process to determine
-// which samples to collect, based on the desired average collection
-// rate R. The probability of a sample of size S to appear in that
-// profile is 1-exp(-S/R).
-func scaleHeapSample(count, size, rate int64) (int64, int64) {
- if count == 0 || size == 0 {
- return 0, 0
- }
-
- if rate <= 1 {
- // if rate==1 all samples were collected so no adjustment is needed.
- // if rate<1 treat as unknown and skip scaling.
- return count, size
- }
-
- avgSize := float64(size) / float64(count)
- scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
-
- return int64(float64(count) * scale), int64(float64(size) * scale)
-}
diff --git a/libgo/go/runtime/pprof/internal/protopprof/protomemprofile_test.go b/libgo/go/runtime/pprof/internal/protopprof/protomemprofile_test.go
deleted file mode 100644
index a10fe772ccf..00000000000
--- a/libgo/go/runtime/pprof/internal/protopprof/protomemprofile_test.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protopprof
-
-import (
- "bytes"
- "internal/pprof/profile"
- "io/ioutil"
- "reflect"
- "runtime"
- "testing"
- "time"
-)
-
-// TestSampledHeapAllocProfile tests encoding of a memory profile from
-// runtime.MemProfileRecord data.
-func TestSampledHeapAllocProfile(t *testing.T) {
- if runtime.GOOS != "linux" {
- t.Skip("Test requires a system with /proc/self/maps")
- }
-
- // Figure out two addresses from /proc/self/maps.
- mmap, err := ioutil.ReadFile("/proc/self/maps")
- if err != nil {
- t.Fatal("Cannot read /proc/self/maps")
- }
- rd := bytes.NewReader(mmap)
- mprof := &profile.Profile{}
- if err = mprof.ParseMemoryMap(rd); err != nil {
- t.Fatalf("Cannot parse /proc/self/maps")
- }
- if len(mprof.Mapping) < 2 {
- // It is possible for a binary to only have 1 executable
- // region of memory.
- t.Skipf("need 2 or more mappings, got %v", len(mprof.Mapping))
- }
- address1 := mprof.Mapping[0].Start
- address2 := mprof.Mapping[1].Start
-
- var buf bytes.Buffer
-
- rec, rate := testMemRecords(address1, address2)
- p := EncodeMemProfile(rec, rate, time.Now())
- if err := p.Write(&buf); err != nil {
- t.Fatalf("Failed to write profile: %v", err)
- }
-
- p, err = profile.Parse(&buf)
- if err != nil {
- t.Fatalf("Could not parse Profile profile: %v", err)
- }
-
- // Expected PeriodType, SampleType and Sample.
- expectedPeriodType := &profile.ValueType{Type: "space", Unit: "bytes"}
- expectedSampleType := []*profile.ValueType{
- {Type: "alloc_objects", Unit: "count"},
- {Type: "alloc_space", Unit: "bytes"},
- {Type: "inuse_objects", Unit: "count"},
- {Type: "inuse_space", Unit: "bytes"},
- }
- // Expected samples, with values unsampled according to the profiling rate.
- expectedSample := []*profile.Sample{
- {Value: []int64{2050, 2099200, 1537, 1574400}, Location: []*profile.Location{
- {ID: 1, Mapping: mprof.Mapping[0], Address: address1},
- {ID: 2, Mapping: mprof.Mapping[1], Address: address2},
- }},
- {Value: []int64{1, 829411, 1, 829411}, Location: []*profile.Location{
- {ID: 3, Mapping: mprof.Mapping[1], Address: address2 + 1},
- {ID: 4, Mapping: mprof.Mapping[1], Address: address2 + 2},
- }},
- {Value: []int64{1, 829411, 0, 0}, Location: []*profile.Location{
- {ID: 5, Mapping: mprof.Mapping[0], Address: address1 + 1},
- {ID: 6, Mapping: mprof.Mapping[0], Address: address1 + 2},
- {ID: 7, Mapping: mprof.Mapping[1], Address: address2 + 3},
- }},
- }
-
- if p.Period != 512*1024 {
- t.Fatalf("Sampling periods do not match")
- }
- if !reflect.DeepEqual(p.PeriodType, expectedPeriodType) {
- t.Fatalf("Period types do not match")
- }
- if !reflect.DeepEqual(p.SampleType, expectedSampleType) {
- t.Fatalf("Sample types do not match")
- }
- if !reflect.DeepEqual(p.Sample, expectedSample) {
- t.Fatalf("Samples do not match: Expected: %v, Got:%v", getSampleAsString(expectedSample),
- getSampleAsString(p.Sample))
- }
-}
-
-func testMemRecords(a1, a2 uint64) ([]runtime.MemProfileRecord, int64) {
- addr1, addr2 := uintptr(a1), uintptr(a2)
- rate := int64(512 * 1024)
- rec := []runtime.MemProfileRecord{
- {4096, 1024, 4, 1, [32]uintptr{addr1, addr2}},
- {512 * 1024, 0, 1, 0, [32]uintptr{addr2 + 1, addr2 + 2}},
- {512 * 1024, 512 * 1024, 1, 1, [32]uintptr{addr1 + 1, addr1 + 2, addr2 + 3}},
- }
- return rec, rate
-}
diff --git a/libgo/go/runtime/pprof/internal/protopprof/protopprof.go b/libgo/go/runtime/pprof/internal/protopprof/protopprof.go
deleted file mode 100644
index 5d269c4f652..00000000000
--- a/libgo/go/runtime/pprof/internal/protopprof/protopprof.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package protopprof converts the runtime's raw profile logs
-// to Profile structs containing a representation of the pprof
-// protocol buffer profile format.
-package protopprof
-
-import (
- "fmt"
- "os"
- "runtime"
- "time"
- "unsafe"
-
- "internal/pprof/profile"
-)
-
-// TranslateCPUProfile parses binary CPU profiling stack trace data
-// generated by runtime.CPUProfile() into a profile struct.
-func TranslateCPUProfile(b []byte, startTime time.Time) (*profile.Profile, error) {
- const wordSize = unsafe.Sizeof(uintptr(0))
- const minRawProfile = 5 * wordSize // Need a minimum of 5 words.
- if uintptr(len(b)) < minRawProfile {
- return nil, fmt.Errorf("truncated profile")
- }
- n := int(uintptr(len(b)) / wordSize)
- data := ((*[1 << 28]uintptr)(unsafe.Pointer(&b[0])))[:n:n]
- period := data[3]
- data = data[5:] // skip header
-
- // profile initialization taken from pprof tool
- p := &profile.Profile{
- Period: int64(period) * 1000,
- PeriodType: &profile.ValueType{Type: "cpu", Unit: "nanoseconds"},
- SampleType: []*profile.ValueType{
- {Type: "samples", Unit: "count"},
- {Type: "cpu", Unit: "nanoseconds"},
- },
- TimeNanos: int64(startTime.UnixNano()),
- DurationNanos: time.Since(startTime).Nanoseconds(),
- }
- // Parse CPU samples from the profile.
- locs := make(map[uint64]*profile.Location)
- for len(b) > 0 {
- if len(data) < 2 || uintptr(len(data)) < 2+data[1] {
- return nil, fmt.Errorf("truncated profile")
- }
- count := data[0]
- nstk := data[1]
- if uintptr(len(data)) < 2+nstk {
- return nil, fmt.Errorf("truncated profile")
- }
- stk := data[2 : 2+nstk]
- data = data[2+nstk:]
-
- if count == 0 && nstk == 1 && stk[0] == 0 {
- // end of data marker
- break
- }
-
- sloc := make([]*profile.Location, len(stk))
- for i, addr := range stk {
- addr := uint64(addr)
- // Addresses from stack traces point to the next instruction after
- // each call. Adjust by -1 to land somewhere on the actual call
- // (except for the leaf, which is not a call).
- if i > 0 {
- addr--
- }
- loc := locs[addr]
- if loc == nil {
- loc = &profile.Location{
- ID: uint64(len(p.Location) + 1),
- Address: addr,
- }
- locs[addr] = loc
- p.Location = append(p.Location, loc)
- }
- sloc[i] = loc
- }
- p.Sample = append(p.Sample, &profile.Sample{
- Value: []int64{int64(count), int64(count) * int64(p.Period)},
- Location: sloc,
- })
- }
-
- if runtime.GOOS == "linux" {
- if err := addMappings(p); err != nil {
- return nil, err
- }
- }
- return p, nil
-}
-
-func addMappings(p *profile.Profile) error {
- // Parse memory map from /proc/self/maps
- f, err := os.Open("/proc/self/maps")
- if err != nil {
- return err
- }
- defer f.Close()
- return p.ParseMemoryMap(f)
-}
diff --git a/libgo/go/runtime/pprof/internal/protopprof/protopprof_test.go b/libgo/go/runtime/pprof/internal/protopprof/protopprof_test.go
deleted file mode 100644
index f1937b5bd06..00000000000
--- a/libgo/go/runtime/pprof/internal/protopprof/protopprof_test.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protopprof
-
-import (
- "bytes"
- "fmt"
- "internal/pprof/profile"
- "io/ioutil"
- "reflect"
- "runtime"
- "testing"
- "time"
- "unsafe"
-)
-
-// Helper function to initialize empty cpu profile with sampling period provided.
-func createEmptyProfileWithPeriod(t *testing.T, periodMs uint64) bytes.Buffer {
- // Mock the sample header produced by cpu profiler. Write a sample
- // period of 2000 microseconds, followed by no samples.
- buf := new(bytes.Buffer)
- // Profile header is as follows:
- // The first, third and fifth words are 0. The second word is 3.
- // The fourth word is the period.
- // EOD marker:
- // The sixth word -- count is initialized to 0 above.
- // The code below sets the seventh word -- nstk to 1
- // The eighth word -- addr is initialized to 0 above.
- words := []int{0, 3, 0, int(periodMs), 0, 0, 1, 0}
- n := int(unsafe.Sizeof(0)) * len(words)
- data := ((*[1 << 29]byte)(unsafe.Pointer(&words[0])))[:n:n]
- if _, err := buf.Write(data); err != nil {
- t.Fatalf("createEmptyProfileWithPeriod failed: %v", err)
- }
- return *buf
-}
-
-// Helper function to initialize cpu profile with two sample values.
-func createProfileWithTwoSamples(t *testing.T, periodMs uintptr, count1 uintptr, count2 uintptr,
- address1 uintptr, address2 uintptr) bytes.Buffer {
- // Mock the sample header produced by cpu profiler. Write a sample
- // period of 2000 microseconds, followed by no samples.
- buf := new(bytes.Buffer)
- words := []uintptr{0, 3, 0, uintptr(periodMs), 0, uintptr(count1), 2,
- uintptr(address1), uintptr(address1 + 2),
- uintptr(count2), 2, uintptr(address2), uintptr(address2 + 2),
- 0, 1, 0}
- for _, n := range words {
- var err error
- switch unsafe.Sizeof(int(0)) {
- case 8:
- _, err = buf.Write((*[8]byte)(unsafe.Pointer(&n))[:8:8])
- case 4:
- _, err = buf.Write((*[4]byte)(unsafe.Pointer(&n))[:4:4])
- }
- if err != nil {
- t.Fatalf("createProfileWithTwoSamples failed: %v", err)
- }
- }
- return *buf
-}
-
-// Tests TranslateCPUProfile parses correct sampling period in an otherwise empty cpu profile.
-func TestTranlateCPUProfileSamplingPeriod(t *testing.T) {
- // A test server with mock cpu profile data.
- var buf bytes.Buffer
-
- startTime := time.Now()
- b := createEmptyProfileWithPeriod(t, 2000)
- p, err := TranslateCPUProfile(b.Bytes(), startTime)
- if err != nil {
- t.Fatalf("translate failed: %v", err)
- }
- if err := p.Write(&buf); err != nil {
- t.Fatalf("write failed: %v", err)
- }
-
- p, err = profile.Parse(&buf)
- if err != nil {
- t.Fatalf("Could not parse Profile profile: %v", err)
- }
-
- // Expected PeriodType and SampleType.
- expectedPeriodType := &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}
- expectedSampleType := []*profile.ValueType{
- {Type: "samples", Unit: "count"},
- {Type: "cpu", Unit: "nanoseconds"},
- }
- if p.Period != 2000*1000 || !reflect.DeepEqual(p.PeriodType, expectedPeriodType) ||
- !reflect.DeepEqual(p.SampleType, expectedSampleType) || p.Sample != nil {
- t.Fatalf("Unexpected Profile fields")
- }
-}
-
-func getSampleAsString(sample []*profile.Sample) string {
- var str string
- for _, x := range sample {
- for _, y := range x.Location {
- if y.Mapping != nil {
- str += fmt.Sprintf("Mapping:%v\n", *y.Mapping)
- }
- str += fmt.Sprintf("Location:%v\n", y)
- }
- str += fmt.Sprintf("Sample:%v\n", *x)
- }
- return str
-}
-
-// Tests TranslateCPUProfile parses a cpu profile with sample values present.
-func TestTranslateCPUProfileWithSamples(t *testing.T) {
- if runtime.GOOS != "linux" {
- t.Skip("test requires a system with /proc/self/maps")
- }
- // Figure out two addresses from /proc/self/maps.
- mmap, err := ioutil.ReadFile("/proc/self/maps")
- if err != nil {
- t.Fatal("Cannot read /proc/self/maps")
- }
- rd := bytes.NewReader(mmap)
- mprof := &profile.Profile{}
- if err = mprof.ParseMemoryMap(rd); err != nil {
- t.Fatalf("Cannot parse /proc/self/maps")
- }
- if len(mprof.Mapping) < 2 {
- // It is possible for a binary to only have 1 executable
- // region of memory.
- t.Skipf("need 2 or more mappings, got %v", len(mprof.Mapping))
- }
- address1 := mprof.Mapping[0].Start
- address2 := mprof.Mapping[1].Start
- // A test server with mock cpu profile data.
-
- startTime := time.Now()
- b := createProfileWithTwoSamples(t, 2000, 20, 40, uintptr(address1), uintptr(address2))
- p, err := TranslateCPUProfile(b.Bytes(), startTime)
-
- if err != nil {
- t.Fatalf("Could not parse Profile profile: %v", err)
- }
- // Expected PeriodType, SampleType and Sample.
- expectedPeriodType := &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}
- expectedSampleType := []*profile.ValueType{
- {Type: "samples", Unit: "count"},
- {Type: "cpu", Unit: "nanoseconds"},
- }
- expectedSample := []*profile.Sample{
- {Value: []int64{20, 20 * 2000 * 1000}, Location: []*profile.Location{
- {ID: 1, Mapping: mprof.Mapping[0], Address: address1},
- {ID: 2, Mapping: mprof.Mapping[0], Address: address1 + 1},
- }},
- {Value: []int64{40, 40 * 2000 * 1000}, Location: []*profile.Location{
- {ID: 3, Mapping: mprof.Mapping[1], Address: address2},
- {ID: 4, Mapping: mprof.Mapping[1], Address: address2 + 1},
- }},
- }
- if p.Period != 2000*1000 {
- t.Fatalf("Sampling periods do not match")
- }
- if !reflect.DeepEqual(p.PeriodType, expectedPeriodType) {
- t.Fatalf("Period types do not match")
- }
- if !reflect.DeepEqual(p.SampleType, expectedSampleType) {
- t.Fatalf("Sample types do not match")
- }
- if !reflect.DeepEqual(p.Sample, expectedSample) {
- t.Fatalf("Samples do not match: Expected: %v, Got:%v", getSampleAsString(expectedSample),
- getSampleAsString(p.Sample))
- }
-}
diff --git a/libgo/go/runtime/pprof/label.go b/libgo/go/runtime/pprof/label.go
new file mode 100644
index 00000000000..35647ee3ce1
--- /dev/null
+++ b/libgo/go/runtime/pprof/label.go
@@ -0,0 +1,85 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+import (
+ "context"
+)
+
+type label struct {
+ key string
+ value string
+}
+
+// LabelSet is a set of labels.
+type LabelSet struct {
+ list []label
+}
+
+// labelContextKey is the type of contextKeys used for profiler labels.
+type labelContextKey struct{}
+
+func labelValue(ctx context.Context) labelMap {
+ labels, _ := ctx.Value(labelContextKey{}).(*labelMap)
+ if labels == nil {
+ return labelMap(nil)
+ }
+ return *labels
+}
+
+// labelMap is the representation of the label set held in the context type.
+// This is an initial implementation, but it will be replaced with something
+// that admits incremental immutable modification more efficiently.
+type labelMap map[string]string
+
+// WithLabels returns a new context.Context with the given labels added.
+// A label overwrites a prior label with the same key.
+func WithLabels(ctx context.Context, labels LabelSet) context.Context {
+ childLabels := make(labelMap)
+ parentLabels := labelValue(ctx)
+ // TODO(matloob): replace the map implementation with something
+ // more efficient so creating a child context WithLabels doesn't need
+ // to clone the map.
+ for k, v := range parentLabels {
+ childLabels[k] = v
+ }
+ for _, label := range labels.list {
+ childLabels[label.key] = label.value
+ }
+ return context.WithValue(ctx, labelContextKey{}, &childLabels)
+}
+
+// Labels takes an even number of strings representing key-value pairs
+// and makes a LabelSet containing them.
+// A label overwrites a prior label with the same key.
+func Labels(args ...string) LabelSet {
+ if len(args)%2 != 0 {
+ panic("uneven number of arguments to pprof.Labels")
+ }
+ labels := LabelSet{}
+ for i := 0; i+1 < len(args); i += 2 {
+ labels.list = append(labels.list, label{key: args[i], value: args[i+1]})
+ }
+ return labels
+}
+
+// Label returns the value of the label with the given key on ctx, and a boolean indicating
+// whether that label exists.
+func Label(ctx context.Context, key string) (string, bool) {
+ ctxLabels := labelValue(ctx)
+ v, ok := ctxLabels[key]
+ return v, ok
+}
+
+// ForLabels invokes f with each label set on the context.
+// The function f should return true to continue iteration or false to stop iteration early.
+func ForLabels(ctx context.Context, f func(key, value string) bool) {
+ ctxLabels := labelValue(ctx)
+ for k, v := range ctxLabels {
+ if !f(k, v) {
+ break
+ }
+ }
+}
diff --git a/libgo/go/runtime/pprof/label_test.go b/libgo/go/runtime/pprof/label_test.go
new file mode 100644
index 00000000000..240445f098e
--- /dev/null
+++ b/libgo/go/runtime/pprof/label_test.go
@@ -0,0 +1,82 @@
+package pprof
+
+import (
+ "context"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func labelsSorted(ctx context.Context) []label {
+ ls := []label{}
+ ForLabels(ctx, func(key, value string) bool {
+ ls = append(ls, label{key, value})
+ return true
+ })
+ sort.Sort(labelSorter(ls))
+ return ls
+}
+
+type labelSorter []label
+
+func (s labelSorter) Len() int { return len(s) }
+func (s labelSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s labelSorter) Less(i, j int) bool { return s[i].key < s[j].key }
+
+func TestContextLabels(t *testing.T) {
+ // Background context starts with no lablels.
+ ctx := context.Background()
+ labels := labelsSorted(ctx)
+ if len(labels) != 0 {
+ t.Errorf("labels on background context: want [], got %v ", labels)
+ }
+
+ // Add a single label.
+ ctx = WithLabels(ctx, Labels("key", "value"))
+ // Retrieve it with Label.
+ v, ok := Label(ctx, "key")
+ if !ok || v != "value" {
+ t.Errorf(`Label(ctx, "key"): got %v, %v; want "value", ok`, v, ok)
+ }
+ gotLabels := labelsSorted(ctx)
+ wantLabels := []label{{"key", "value"}}
+ if !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("(sorted) labels on context: got %v, want %v", gotLabels, wantLabels)
+ }
+
+ // Add a label with a different key.
+ ctx = WithLabels(ctx, Labels("key2", "value2"))
+ v, ok = Label(ctx, "key2")
+ if !ok || v != "value2" {
+ t.Errorf(`Label(ctx, "key2"): got %v, %v; want "value2", ok`, v, ok)
+ }
+ gotLabels = labelsSorted(ctx)
+ wantLabels = []label{{"key", "value"}, {"key2", "value2"}}
+ if !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("(sorted) labels on context: got %v, want %v", gotLabels, wantLabels)
+ }
+
+ // Add label with first key to test label replacement.
+ ctx = WithLabels(ctx, Labels("key", "value3"))
+ v, ok = Label(ctx, "key")
+ if !ok || v != "value3" {
+ t.Errorf(`Label(ctx, "key3"): got %v, %v; want "value3", ok`, v, ok)
+ }
+ gotLabels = labelsSorted(ctx)
+ wantLabels = []label{{"key", "value3"}, {"key2", "value2"}}
+ if !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("(sorted) labels on context: got %v, want %v", gotLabels, wantLabels)
+ }
+
+ // Labels called with two labels with the same key should pick the second.
+ ctx = WithLabels(ctx, Labels("key4", "value4a", "key4", "value4b"))
+ v, ok = Label(ctx, "key4")
+ if !ok || v != "value4b" {
+ t.Errorf(`Label(ctx, "key4"): got %v, %v; want "value4b", ok`, v, ok)
+ }
+ gotLabels = labelsSorted(ctx)
+ wantLabels = []label{{"key", "value3"}, {"key2", "value2"}, {"key4", "value4b"}}
+ if !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("(sorted) labels on context: got %v, want %v", gotLabels, wantLabels)
+ }
+}
diff --git a/libgo/go/runtime/pprof/map.go b/libgo/go/runtime/pprof/map.go
new file mode 100644
index 00000000000..a271ad022e7
--- /dev/null
+++ b/libgo/go/runtime/pprof/map.go
@@ -0,0 +1,89 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+import "unsafe"
+
+// A profMap is a map from (stack, tag) to mapEntry.
+// It grows without bound, but that's assumed to be OK.
+type profMap struct {
+ hash map[uintptr]*profMapEntry
+ all *profMapEntry
+ last *profMapEntry
+ free []profMapEntry
+ freeStk []uintptr
+}
+
+// A profMapEntry is a single entry in the profMap.
+type profMapEntry struct {
+ nextHash *profMapEntry // next in hash list
+ nextAll *profMapEntry // next in list of all entries
+ stk []uintptr
+ tag unsafe.Pointer
+ count int64
+}
+
+func (m *profMap) lookup(stk []uint64, tag unsafe.Pointer) *profMapEntry {
+ // Compute hash of (stk, tag).
+ h := uintptr(0)
+ for _, x := range stk {
+ h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1)))
+ h += uintptr(x) * 41
+ }
+ h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1)))
+ h += uintptr(tag) * 41
+
+ // Find entry if present.
+ var last *profMapEntry
+Search:
+ for e := m.hash[h]; e != nil; last, e = e, e.nextHash {
+ if len(e.stk) != len(stk) || e.tag != tag {
+ continue
+ }
+ for j := range stk {
+ if e.stk[j] != uintptr(stk[j]) {
+ continue Search
+ }
+ }
+ // Move to front.
+ if last != nil {
+ last.nextHash = e.nextHash
+ e.nextHash = m.hash[h]
+ m.hash[h] = e
+ }
+ return e
+ }
+
+ // Add new entry.
+ if len(m.free) < 1 {
+ m.free = make([]profMapEntry, 128)
+ }
+ e := &m.free[0]
+ m.free = m.free[1:]
+ e.nextHash = m.hash[h]
+ e.tag = tag
+
+ if len(m.freeStk) < len(stk) {
+ m.freeStk = make([]uintptr, 1024)
+ }
+ e.stk = m.freeStk[:len(stk)]
+ m.freeStk = m.freeStk[len(stk):]
+
+ for j := range stk {
+ e.stk[j] = uintptr(stk[j])
+ }
+ if m.hash == nil {
+ m.hash = make(map[uintptr]*profMapEntry)
+ }
+ m.hash[h] = e
+ if m.all == nil {
+ m.all = e
+ m.last = e
+ } else {
+ m.last.nextAll = e
+ m.last = e
+ }
+ return e
+}
diff --git a/libgo/go/runtime/pprof/mprof_test.go b/libgo/go/runtime/pprof/mprof_test.go
index 5ebd46b198f..5d77a1d8986 100644
--- a/libgo/go/runtime/pprof/mprof_test.go
+++ b/libgo/go/runtime/pprof/mprof_test.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package pprof_test
+package pprof
import (
"bytes"
@@ -10,7 +10,6 @@ import (
"reflect"
"regexp"
"runtime"
- . "runtime/pprof"
"testing"
"unsafe"
)
@@ -87,26 +86,26 @@ func TestMemoryProfiler(t *testing.T) {
tests := []string{
fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f x]+
-# 0x[0-9,a-f]+ pprof_test\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:41
-# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:75
+# 0x[0-9,a-f]+ pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:40
+# 0x[0-9,a-f]+ runtime_pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:74
`, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun),
fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f x]+
-# 0x[0-9,a-f]+ pprof_test\.allocateTransient1M\+0x[0-9,a-f]+ .*/mprof_test.go:22
-# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:73
+# 0x[0-9,a-f]+ pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*/mprof_test.go:21
+# 0x[0-9,a-f]+ runtime_pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:72
`, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun),
// This should start with "0: 0" but gccgo's imprecise
// GC means that sometimes the value is not collected.
fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @ 0x[0-9,a-f x]+
-# 0x[0-9,a-f]+ pprof_test\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:28
-# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:74
+# 0x[0-9,a-f]+ pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:27
+# 0x[0-9,a-f]+ runtime_pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:73
`, memoryProfilerRun, (2<<20)*memoryProfilerRun, memoryProfilerRun, (2<<20)*memoryProfilerRun),
// This should start with "0: 0" but gccgo's imprecise
// GC means that sometimes the value is not collected.
fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @( 0x[0-9,a-f]+)+
-# 0x[0-9,a-f]+ pprof_test\.allocateReflectTransient\+0x[0-9,a-f]+ .*/mprof_test.go:49
+# 0x[0-9,a-f]+ pprof\.allocateReflectTransient\+0x[0-9,a-f]+ .*/mprof_test.go:48
`, memoryProfilerRun, (2<<20)*memoryProfilerRun, memoryProfilerRun, (2<<20)*memoryProfilerRun),
}
diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go
index 0db1dedd7ac..a57b69dca35 100644
--- a/libgo/go/runtime/pprof/pprof.go
+++ b/libgo/go/runtime/pprof/pprof.go
@@ -33,7 +33,9 @@
// }
// defer pprof.StopCPUProfile()
// }
-// ...
+//
+// // ... rest of the program ...
+//
// if *memprofile != "" {
// f, err := os.Create(*memprofile)
// if err != nil {
@@ -73,15 +75,14 @@ import (
"bufio"
"bytes"
"fmt"
- "internal/pprof/profile"
"io"
"runtime"
- "runtime/pprof/internal/protopprof"
"sort"
"strings"
"sync"
"text/tabwriter"
"time"
+ "unsafe"
)
// BUG(rsc): Profiles are only as good as the kernel support used to generate them.
@@ -183,6 +184,8 @@ func unlockProfiles() {
// If a profile with that name already exists, NewProfile panics.
// The convention is to use a 'import/path.' prefix to create
// separate name spaces for each package.
+// For compatibility with various tools that read pprof data,
+// profile names should not contain spaces.
func NewProfile(name string) *Profile {
lockProfiles()
defer unlockProfiles()
@@ -264,13 +267,18 @@ func (p *Profile) Add(value interface{}, skip int) {
stk := make([]uintptr, 32)
n := runtime.Callers(skip+1, stk[:])
+ stk = stk[:n]
+ if len(stk) == 0 {
+ // The value for skip is too large, and there's no stack trace to record.
+ stk = []uintptr{funcPC(lostProfileEvent) + 1}
+ }
p.mu.Lock()
defer p.mu.Unlock()
if p.m[value] != nil {
panic("pprof: Profile.Add of duplicate value")
}
- p.m[value] = stk[:n]
+ p.m[value] = stk
}
// Remove removes the execution stack associated with value from the profile.
@@ -303,8 +311,8 @@ func (p *Profile) WriteTo(w io.Writer, debug int) error {
}
// Obtain consistent snapshot under lock; then process without lock.
- all := make([][]uintptr, 0, len(p.m))
p.mu.Lock()
+ all := make([][]uintptr, 0, len(p.m))
for _, stk := range p.m {
all = append(all, stk)
}
@@ -380,35 +388,29 @@ func printCountProfile(w io.Writer, debug int, name string, p countProfile) erro
}
// Output profile in protobuf form.
- prof := &profile.Profile{
- PeriodType: &profile.ValueType{Type: name, Unit: "count"},
- Period: 1,
- Sample: make([]*profile.Sample, 0, len(keys)),
- SampleType: []*profile.ValueType{{Type: name, Unit: "count"}},
- }
- locMap := make(map[uintptr]*profile.Location)
+ b := newProfileBuilder(w)
+ b.pbValueType(tagProfile_PeriodType, name, "count")
+ b.pb.int64Opt(tagProfile_Period, 1)
+ b.pbValueType(tagProfile_SampleType, name, "count")
+
+ values := []int64{0}
+ var locs []uint64
for _, k := range keys {
- stk := p.Stack(index[k])
- c := count[k]
- locs := make([]*profile.Location, len(stk))
- for i, addr := range stk {
- loc := locMap[addr]
- if loc == nil {
- loc = &profile.Location{
- ID: uint64(len(locMap) + 1),
- Address: uint64(addr - 1),
- }
- prof.Location = append(prof.Location, loc)
- locMap[addr] = loc
+ values[0] = int64(count[k])
+ locs = locs[:0]
+ for _, addr := range p.Stack(index[k]) {
+ // For count profiles, all stack addresses are
+ // return PCs, which is what locForPC expects.
+ l := b.locForPC(addr)
+ if l == 0 { // runtime.goexit
+ continue
}
- locs[i] = loc
+ locs = append(locs, l)
}
- prof.Sample = append(prof.Sample, &profile.Sample{
- Location: locs,
- Value: []int64{int64(c)},
- })
+ b.pbSample(values, locs, nil)
}
- return prof.Write(w)
+ b.build()
+ return nil
}
// keysByCount sorts keys with higher counts first, breaking ties by key string order.
@@ -510,8 +512,7 @@ func writeHeap(w io.Writer, debug int) error {
}
if debug == 0 {
- pp := protopprof.EncodeMemProfile(p, int64(runtime.MemProfileRate), time.Now())
- return pp.Write(w)
+ return writeHeapProto(w, p, int64(runtime.MemProfileRate))
}
sort.Slice(p, func(i, j int) bool { return p[i].InUseBytes() > p[j].InUseBytes() })
@@ -576,8 +577,12 @@ func writeHeap(w io.Writer, debug int) error {
fmt.Fprintf(w, "# OtherSys = %d\n", s.OtherSys)
fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
+ fmt.Fprintf(w, "# LastGC = %d\n", s.LastGC)
fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
+ fmt.Fprintf(w, "# PauseEnd = %d\n", s.PauseEnd)
fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
+ fmt.Fprintf(w, "# NumForcedGC = %d\n", s.NumForcedGC)
+ fmt.Fprintf(w, "# GCCPUFraction = %v\n", s.GCCPUFraction)
fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
tw.Flush()
@@ -703,30 +708,32 @@ func StartCPUProfile(w io.Writer) error {
return nil
}
+// readProfile, provided by the runtime, returns the next chunk of
+// binary CPU profiling stack trace data, blocking until data is available.
+// If profiling is turned off and all the profile data accumulated while it was
+// on has been returned, readProfile returns eof=true.
+// The caller must save the returned data and tags before calling readProfile again.
+func readProfile() (data []uint64, tags []unsafe.Pointer, eof bool)
+
func profileWriter(w io.Writer) {
- startTime := time.Now()
- // This will buffer the entire profile into buf and then
- // translate it into a profile.Profile structure. This will
- // create two copies of all the data in the profile in memory.
- // TODO(matloob): Convert each chunk of the proto output and
- // stream it out instead of converting the entire profile.
- var buf bytes.Buffer
+ b := newProfileBuilder(w)
+ var err error
for {
- data := runtime.CPUProfile()
- if data == nil {
+ time.Sleep(100 * time.Millisecond)
+ data, tags, eof := readProfile()
+ if e := b.addCPUData(data, tags); e != nil && err == nil {
+ err = e
+ }
+ if eof {
break
}
- buf.Write(data)
}
-
- profile, err := protopprof.TranslateCPUProfile(buf.Bytes(), startTime)
if err != nil {
// The runtime should never produce an invalid or truncated profile.
// It drops records that can't fit into its log buffers.
- panic(fmt.Errorf("could not translate binary profile to proto format: %v", err))
+ panic("runtime/pprof: converting profile: " + err.Error())
}
-
- profile.Write(w)
+ b.build()
cpu.done <- true
}
diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go
index 60340582d5f..9e5e403b741 100644
--- a/libgo/go/runtime/pprof/pprof_test.go
+++ b/libgo/go/runtime/pprof/pprof_test.go
@@ -4,13 +4,12 @@
// +build !nacl
-package pprof_test
+package pprof
import (
"bytes"
- "compress/gzip"
+ "context"
"fmt"
- "internal/pprof/profile"
"internal/testenv"
"io"
"io/ioutil"
@@ -19,14 +18,15 @@ import (
"os/exec"
"regexp"
"runtime"
- . "runtime/pprof"
+ "runtime/pprof/internal/profile"
"strings"
"sync"
+ "sync/atomic"
"testing"
"time"
)
-func cpuHogger(f func(), dur time.Duration) {
+func cpuHogger(f func() int, dur time.Duration) {
// We only need to get one 100 Hz clock tick, so we've got
// a large safety buffer.
// But do at least 500 iterations (which should take about 100ms),
@@ -46,7 +46,7 @@ var (
// The actual CPU hogging function.
// Must not call other functions nor access heap/globals in the loop,
// otherwise under race detector the samples will be in the race runtime.
-func cpuHog1() {
+func cpuHog1() int {
foo := salt1
for i := 0; i < 1e5; i++ {
if foo > 0 {
@@ -55,10 +55,10 @@ func cpuHog1() {
foo *= foo + 1
}
}
- salt1 = foo
+ return foo
}
-func cpuHog2() {
+func cpuHog2() int {
foo := salt2
for i := 0; i < 1e5; i++ {
if foo > 0 {
@@ -67,18 +67,18 @@ func cpuHog2() {
foo *= foo + 2
}
}
- salt2 = foo
+ return foo
}
func TestCPUProfile(t *testing.T) {
- testCPUProfile(t, []string{"pprof_test.cpuHog1"}, func(dur time.Duration) {
+ testCPUProfile(t, []string{"pprof.cpuHog1"}, func(dur time.Duration) {
cpuHogger(cpuHog1, dur)
})
}
func TestCPUProfileMultithreaded(t *testing.T) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
- testCPUProfile(t, []string{"pprof_test.cpuHog1", "pprof_test.cpuHog2"}, func(dur time.Duration) {
+ testCPUProfile(t, []string{"pprof.cpuHog1", "pprof.cpuHog2"}, func(dur time.Duration) {
c := make(chan int)
go func() {
cpuHogger(cpuHog1, dur)
@@ -89,18 +89,42 @@ func TestCPUProfileMultithreaded(t *testing.T) {
})
}
-func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []uintptr)) {
+func TestCPUProfileInlining(t *testing.T) {
+ testCPUProfile(t, []string{"pprof.inlinedCallee", "pprof.inlinedCaller"}, func(dur time.Duration) {
+ cpuHogger(inlinedCaller, dur)
+ })
+}
+
+func inlinedCaller() int {
+ inlinedCallee()
+ return 0
+}
+
+func inlinedCallee() {
+ // We could just use cpuHog1, but for loops prevent inlining
+ // right now. :(
+ foo := salt1
+ i := 0
+loop:
+ if foo > 0 {
+ foo *= foo
+ } else {
+ foo *= foo + 1
+ }
+ if i++; i < 1e5 {
+ goto loop
+ }
+ salt1 = foo
+}
+
+func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) {
p, err := profile.Parse(bytes.NewReader(valBytes))
if err != nil {
t.Fatal(err)
}
for _, sample := range p.Sample {
count := uintptr(sample.Value[0])
- stk := make([]uintptr, len(sample.Location))
- for i := range sample.Location {
- stk[i] = uintptr(sample.Location[i].Address)
- }
- f(count, stk)
+ f(count, sample.Location, sample.Label)
}
}
@@ -124,8 +148,7 @@ func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) {
const maxDuration = 5 * time.Second
// If we're running a long test, start with a long duration
- // because some of the tests (e.g., TestStackBarrierProfiling)
- // are trying to make sure something *doesn't* happen.
+ // for tests that try to make sure something *doesn't* happen.
duration := 5 * time.Second
if testing.Short() {
duration = 200 * time.Millisecond
@@ -169,32 +192,45 @@ func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) {
t.FailNow()
}
+func contains(slice []string, s string) bool {
+ for i := range slice {
+ if slice[i] == s {
+ return true
+ }
+ }
+ return false
+}
+
func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Duration) (ok bool) {
ok = true
// Check that profile is well formed and contains need.
have := make([]uintptr, len(need))
var samples uintptr
- parseProfile(t, prof.Bytes(), func(count uintptr, stk []uintptr) {
+ var buf bytes.Buffer
+ parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
+ fmt.Fprintf(&buf, "%d:", count)
+ fprintStack(&buf, stk)
samples += count
- for _, pc := range stk {
- f := runtime.FuncForPC(pc)
- if f == nil {
- continue
- }
- t.Log(f.Name(), count)
- for i, name := range need {
- if strings.Contains(f.Name(), name) {
- have[i] += count
+ for i, name := range need {
+ if semi := strings.Index(name, ";"); semi > -1 {
+ kv := strings.SplitN(name[semi+1:], "=", 2)
+ if len(kv) != 2 || !contains(labels[kv[0]], kv[1]) {
+ continue
}
+ name = name[:semi]
}
- if strings.Contains(f.Name(), "stackBarrier") {
- // The runtime should have unwound this.
- t.Fatalf("profile includes stackBarrier")
+ for _, loc := range stk {
+ for _, line := range loc.Line {
+ if strings.Contains(line.Function.Name, name) {
+ have[i] += count
+ }
+ }
}
}
+ fmt.Fprintf(&buf, "\n")
})
- t.Logf("total %d CPU profile samples collected", samples)
+ t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
if samples < 10 && runtime.GOOS == "windows" {
// On some windows machines we end up with
@@ -301,36 +337,43 @@ func TestGoroutineSwitch(t *testing.T) {
// Read profile to look for entries for runtime.gogo with an attempt at a traceback.
// The special entry
- parseProfile(t, prof.Bytes(), func(count uintptr, stk []uintptr) {
+ parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
// An entry with two frames with 'System' in its top frame
// exists to record a PC without a traceback. Those are okay.
if len(stk) == 2 {
- f := runtime.FuncForPC(stk[1])
- if f != nil && (f.Name() == "runtime._System" || f.Name() == "runtime._ExternalCode" || f.Name() == "runtime._GC") {
+ name := stk[1].Line[0].Function.Name
+ if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
return
}
}
// Otherwise, should not see runtime.gogo.
// The place we'd see it would be the inner most frame.
- f := runtime.FuncForPC(stk[0])
- if f != nil && f.Name() == "runtime.gogo" {
+ name := stk[0].Line[0].Function.Name
+ if name == "runtime.gogo" {
var buf bytes.Buffer
- for _, pc := range stk {
- f := runtime.FuncForPC(pc)
- if f == nil {
- fmt.Fprintf(&buf, "%#x ?:0\n", pc)
- } else {
- file, line := f.FileLine(pc)
- fmt.Fprintf(&buf, "%#x %s:%d\n", pc, file, line)
- }
- }
+ fprintStack(&buf, stk)
t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
}
})
}
}
+func fprintStack(w io.Writer, stk []*profile.Location) {
+ for _, loc := range stk {
+ fmt.Fprintf(w, " %#x", loc.Address)
+ fmt.Fprintf(w, " (")
+ for i, line := range loc.Line {
+ if i > 0 {
+ fmt.Fprintf(w, " ")
+ }
+ fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
+ }
+ fmt.Fprintf(w, ")")
+ }
+ fmt.Fprintf(w, "\n")
+}
+
// Test that profiling of division operations is okay, especially on ARM. See issue 6681.
func TestMathBigDivide(t *testing.T) {
testCPUProfile(t, nil, func(duration time.Duration) {
@@ -351,111 +394,6 @@ func TestMathBigDivide(t *testing.T) {
})
}
-func slurpString(r io.Reader) string {
- slurp, _ := ioutil.ReadAll(r)
- return string(slurp)
-}
-
-func getLinuxKernelConfig() string {
- if f, err := os.Open("/proc/config"); err == nil {
- defer f.Close()
- return slurpString(f)
- }
- if f, err := os.Open("/proc/config.gz"); err == nil {
- defer f.Close()
- r, err := gzip.NewReader(f)
- if err != nil {
- return ""
- }
- return slurpString(r)
- }
- if f, err := os.Open("/boot/config"); err == nil {
- defer f.Close()
- return slurpString(f)
- }
- uname, _ := exec.Command("uname", "-r").Output()
- if len(uname) > 0 {
- if f, err := os.Open("/boot/config-" + strings.TrimSpace(string(uname))); err == nil {
- defer f.Close()
- return slurpString(f)
- }
- }
- return ""
-}
-
-func haveLinuxHiresTimers() bool {
- config := getLinuxKernelConfig()
- return strings.Contains(config, "CONFIG_HIGH_RES_TIMERS=y")
-}
-
-func TestStackBarrierProfiling(t *testing.T) {
- if (runtime.GOOS == "linux" && runtime.GOARCH == "arm") ||
- runtime.GOOS == "openbsd" ||
- runtime.GOOS == "solaris" ||
- runtime.GOOS == "dragonfly" ||
- runtime.GOOS == "freebsd" {
- // This test currently triggers a large number of
- // usleep(100)s. These kernels/arches have poor
- // resolution timers, so this gives up a whole
- // scheduling quantum. On Linux and the BSDs (and
- // probably Solaris), profiling signals are only
- // generated when a process completes a whole
- // scheduling quantum, so this test often gets zero
- // profiling signals and fails.
- t.Skipf("low resolution timers inhibit profiling signals (golang.org/issue/13405)")
- return
- }
-
- if runtime.GOOS == "linux" && strings.HasPrefix(runtime.GOARCH, "mips") {
- if !haveLinuxHiresTimers() {
- t.Skipf("low resolution timers inhibit profiling signals (golang.org/issue/13405, golang.org/issue/17936)")
- }
- }
-
- if !strings.Contains(os.Getenv("GODEBUG"), "gcstackbarrierall=1") {
- // Re-execute this test with constant GC and stack
- // barriers at every frame.
- testenv.MustHaveExec(t)
- if runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" {
- t.Skip("gcstackbarrierall doesn't work on ppc64")
- }
- args := []string{"-test.run=TestStackBarrierProfiling"}
- if testing.Short() {
- args = append(args, "-test.short")
- }
- cmd := exec.Command(os.Args[0], args...)
- cmd.Env = append([]string{"GODEBUG=gcstackbarrierall=1", "GOGC=1", "GOTRACEBACK=system"}, os.Environ()...)
- if out, err := cmd.CombinedOutput(); err != nil {
- t.Fatalf("subprocess failed with %v:\n%s", err, out)
- }
- return
- }
-
- testCPUProfile(t, nil, func(duration time.Duration) {
- // In long mode, we're likely to get one or two
- // samples in stackBarrier.
- t := time.After(duration)
- for {
- deepStack(1000)
- select {
- case <-t:
- return
- default:
- }
- }
- })
-}
-
-var x []byte
-
-func deepStack(depth int) int {
- if depth == 0 {
- return 0
- }
- x = make([]byte, 1024)
- return deepStack(depth-1) + 1
-}
-
// Operating systems that are expected to fail the tests. See issue 13841.
var badOS = map[string]bool{
"darwin": true,
@@ -474,46 +412,46 @@ func TestBlockProfile(t *testing.T) {
}
tests := [...]TestCase{
{"chan recv", blockChanRecv, `
-[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanRecv\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
+# 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.blockChanRecv\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{"chan send", blockChanSend, `
-[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.chansend1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanSend\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
+# 0x[0-9a-f]+ runtime\.chansend1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.blockChanSend\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{"chan close", blockChanClose, `
-[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanClose\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
+# 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.blockChanClose\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{"select recv async", blockSelectRecvAsync, `
-[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/runtime/select.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectRecvAsync\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
+# 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{"select send sync", blockSelectSendSync, `
-[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/runtime/select.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectSendSync\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
+# 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{"mutex", blockMutex, `
-[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9,a-f]+ .*/src/sync/mutex\.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.blockMutex\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
+# 0x[0-9a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+ .*/src/sync/mutex\.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.blockMutex\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
{"cond", blockCond, `
-[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
-# 0x[0-9,a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9,a-f]+ .*/src/sync/cond\.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.blockCond\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
-# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+[0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
+# 0x[0-9a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+ .*/src/sync/cond\.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.blockCond\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
`},
}
@@ -608,6 +546,10 @@ func blockMutex() {
time.Sleep(blockDelay)
mu.Unlock()
}()
+ // Note: Unlock releases mu before recording the mutex event,
+ // so it's theoretically possible for this to proceed and
+ // capture the profile before the event is recorded. As long
+ // as this is blocked before the unlock happens, it's okay.
mu.Lock()
}
@@ -656,7 +598,7 @@ func TestMutexProfile(t *testing.T) {
if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
t.Errorf("%q didn't match %q", lines[3], r2)
}
- r3 := "^#.*pprof_test.\\$nested.*$"
+ r3 := "^#.*pprof.\\$nested.*$"
match := false
for _, i := range []int{5, 6} {
if ok, _ := regexp.MatchString(r3, lines[i]); ok {
@@ -678,22 +620,26 @@ func TestGoroutineCounts(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("goroutine stacks not supported on gccgo")
}
- if runtime.GOOS == "openbsd" {
- testenv.SkipFlaky(t, 15156)
- }
+
+ // Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
+ // desired blocking point.
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+
c := make(chan int)
for i := 0; i < 100; i++ {
- if i%10 == 0 {
+ switch {
+ case i%10 == 0:
go func1(c)
- continue
- }
- if i%2 == 0 {
+ case i%2 == 0:
go func2(c)
- continue
+ default:
+ go func3(c)
+ }
+ // Let goroutines block on channel
+ for j := 0; j < 5; j++ {
+ runtime.Gosched()
}
- go func3(c)
}
- time.Sleep(10 * time.Millisecond) // let goroutines block on channel
var w bytes.Buffer
goroutineProf := Lookup("goroutine")
@@ -756,3 +702,81 @@ func containsCounts(prof *profile.Profile, counts []int64) bool {
}
return true
}
+
+// Issue 18836.
+func TestEmptyCallStack(t *testing.T) {
+ t.Parallel()
+ var buf bytes.Buffer
+ p := NewProfile("test18836")
+ p.Add("foo", 47674)
+ p.WriteTo(&buf, 1)
+ p.Remove("foo")
+ got := buf.String()
+ prefix := "test18836 profile: total 1\n"
+ if !strings.HasPrefix(got, prefix) {
+ t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
+ }
+ lostevent := "lostProfileEvent"
+ if !strings.Contains(got, lostevent) {
+ t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
+ }
+}
+
+func TestCPUProfileLabel(t *testing.T) {
+ testCPUProfile(t, []string{"pprof.cpuHogger;key=value"}, func(dur time.Duration) {
+ Do(context.Background(), Labels("key", "value"), func(context.Context) {
+ cpuHogger(cpuHog1, dur)
+ })
+ })
+}
+
+func TestLabelRace(t *testing.T) {
+ // Test the race detector annotations for synchronization
+ // between settings labels and consuming them from the
+ // profile.
+ testCPUProfile(t, []string{"pprof.cpuHogger;key=value"}, func(dur time.Duration) {
+ start := time.Now()
+ var wg sync.WaitGroup
+ for time.Since(start) < dur {
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ Do(context.Background(), Labels("key", "value"), func(context.Context) {
+ cpuHogger(cpuHog1, time.Millisecond)
+ })
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ }
+ })
+}
+
+// Check that there is no deadlock when the program receives SIGPROF while in
+// 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
+func TestAtomicLoadStore64(t *testing.T) {
+ f, err := ioutil.TempFile("", "profatomic")
+ if err != nil {
+ t.Fatalf("TempFile: %v", err)
+ }
+ defer os.Remove(f.Name())
+ defer f.Close()
+
+ if err := StartCPUProfile(f); err != nil {
+ t.Fatal(err)
+ }
+ defer StopCPUProfile()
+
+ var flag uint64
+ done := make(chan bool, 1)
+
+ go func() {
+ for atomic.LoadUint64(&flag) == 0 {
+ runtime.Gosched()
+ }
+ done <- true
+ }()
+ time.Sleep(50 * time.Millisecond)
+ atomic.StoreUint64(&flag, 1)
+ <-done
+}
diff --git a/libgo/go/runtime/pprof/proto.go b/libgo/go/runtime/pprof/proto.go
new file mode 100644
index 00000000000..5e1d71c7e72
--- /dev/null
+++ b/libgo/go/runtime/pprof/proto.go
@@ -0,0 +1,515 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "runtime"
+ "sort"
+ "strconv"
+ "time"
+ "unsafe"
+)
+
+// lostProfileEvent is the function to which lost profiling
+// events are attributed.
+// (The name shows up in the pprof graphs.)
+func lostProfileEvent() { lostProfileEvent() }
+
+// funcPC returns the PC for the func value f.
+func funcPC(f interface{}) uintptr {
+ type iface struct {
+ tab unsafe.Pointer
+ data unsafe.Pointer
+ }
+ i := (*iface)(unsafe.Pointer(&f))
+ return **(**uintptr)(i.data)
+}
+
+// A profileBuilder writes a profile incrementally from a
+// stream of profile samples delivered by the runtime.
+type profileBuilder struct {
+ start time.Time
+ end time.Time
+ havePeriod bool
+ period int64
+ m profMap
+
+ // encoding state
+ w io.Writer
+ zw *gzip.Writer
+ pb protobuf
+ strings []string
+ stringMap map[string]int
+ locs map[uintptr]int
+ funcs map[string]int // Package path-qualified function name to Function.ID
+ mem []memMap
+}
+
+type memMap struct {
+ start uintptr
+ end uintptr
+}
+
+const (
+ // message Profile
+ tagProfile_SampleType = 1 // repeated ValueType
+ tagProfile_Sample = 2 // repeated Sample
+ tagProfile_Mapping = 3 // repeated Mapping
+ tagProfile_Location = 4 // repeated Location
+ tagProfile_Function = 5 // repeated Function
+ tagProfile_StringTable = 6 // repeated string
+ tagProfile_DropFrames = 7 // int64 (string table index)
+ tagProfile_KeepFrames = 8 // int64 (string table index)
+ tagProfile_TimeNanos = 9 // int64
+ tagProfile_DurationNanos = 10 // int64
+ tagProfile_PeriodType = 11 // ValueType (really optional string???)
+ tagProfile_Period = 12 // int64
+
+ // message ValueType
+ tagValueType_Type = 1 // int64 (string table index)
+ tagValueType_Unit = 2 // int64 (string table index)
+
+ // message Sample
+ tagSample_Location = 1 // repeated uint64
+ tagSample_Value = 2 // repeated int64
+ tagSample_Label = 3 // repeated Label
+
+ // message Label
+ tagLabel_Key = 1 // int64 (string table index)
+ tagLabel_Str = 2 // int64 (string table index)
+ tagLabel_Num = 3 // int64
+
+ // message Mapping
+ tagMapping_ID = 1 // uint64
+ tagMapping_Start = 2 // uint64
+ tagMapping_Limit = 3 // uint64
+ tagMapping_Offset = 4 // uint64
+ tagMapping_Filename = 5 // int64 (string table index)
+ tagMapping_BuildID = 6 // int64 (string table index)
+ tagMapping_HasFunctions = 7 // bool
+ tagMapping_HasFilenames = 8 // bool
+ tagMapping_HasLineNumbers = 9 // bool
+ tagMapping_HasInlineFrames = 10 // bool
+
+ // message Location
+ tagLocation_ID = 1 // uint64
+ tagLocation_MappingID = 2 // uint64
+ tagLocation_Address = 3 // uint64
+ tagLocation_Line = 4 // repeated Line
+
+ // message Line
+ tagLine_FunctionID = 1 // uint64
+ tagLine_Line = 2 // int64
+
+ // message Function
+ tagFunction_ID = 1 // uint64
+ tagFunction_Name = 2 // int64 (string table index)
+ tagFunction_SystemName = 3 // int64 (string table index)
+ tagFunction_Filename = 4 // int64 (string table index)
+ tagFunction_StartLine = 5 // int64
+)
+
+// stringIndex adds s to the string table if not already present
+// and returns the index of s in the string table.
+func (b *profileBuilder) stringIndex(s string) int64 {
+ id, ok := b.stringMap[s]
+ if !ok {
+ id = len(b.strings)
+ b.strings = append(b.strings, s)
+ b.stringMap[s] = id
+ }
+ return int64(id)
+}
+
+func (b *profileBuilder) flush() {
+ const dataFlush = 4096
+ if b.pb.nest == 0 && len(b.pb.data) > dataFlush {
+ b.zw.Write(b.pb.data)
+ b.pb.data = b.pb.data[:0]
+ }
+}
+
+// pbValueType encodes a ValueType message to b.pb.
+func (b *profileBuilder) pbValueType(tag int, typ, unit string) {
+ start := b.pb.startMessage()
+ b.pb.int64(tagValueType_Type, b.stringIndex(typ))
+ b.pb.int64(tagValueType_Unit, b.stringIndex(unit))
+ b.pb.endMessage(tag, start)
+}
+
+// pbSample encodes a Sample message to b.pb.
+func (b *profileBuilder) pbSample(values []int64, locs []uint64, labels func()) {
+ start := b.pb.startMessage()
+ b.pb.int64s(tagSample_Value, values)
+ b.pb.uint64s(tagSample_Location, locs)
+ if labels != nil {
+ labels()
+ }
+ b.pb.endMessage(tagProfile_Sample, start)
+ b.flush()
+}
+
+// pbLabel encodes a Label message to b.pb.
+func (b *profileBuilder) pbLabel(tag int, key, str string, num int64) {
+ start := b.pb.startMessage()
+ b.pb.int64Opt(tagLabel_Key, b.stringIndex(key))
+ b.pb.int64Opt(tagLabel_Str, b.stringIndex(str))
+ b.pb.int64Opt(tagLabel_Num, num)
+ b.pb.endMessage(tag, start)
+}
+
+// pbLine encodes a Line message to b.pb.
+func (b *profileBuilder) pbLine(tag int, funcID uint64, line int64) {
+ start := b.pb.startMessage()
+ b.pb.uint64Opt(tagLine_FunctionID, funcID)
+ b.pb.int64Opt(tagLine_Line, line)
+ b.pb.endMessage(tag, start)
+}
+
+// pbMapping encodes a Mapping message to b.pb.
+func (b *profileBuilder) pbMapping(tag int, id, base, limit, offset uint64, file, buildID string) {
+ start := b.pb.startMessage()
+ b.pb.uint64Opt(tagMapping_ID, id)
+ b.pb.uint64Opt(tagMapping_Start, base)
+ b.pb.uint64Opt(tagMapping_Limit, limit)
+ b.pb.uint64Opt(tagMapping_Offset, offset)
+ b.pb.int64Opt(tagMapping_Filename, b.stringIndex(file))
+ b.pb.int64Opt(tagMapping_BuildID, b.stringIndex(buildID))
+ // TODO: Set any of HasInlineFrames, HasFunctions, HasFilenames, HasLineNumbers?
+ // It seems like they should all be true, but they've never been set.
+ b.pb.endMessage(tag, start)
+}
+
+// locForPC returns the location ID for addr.
+// addr must be a return PC. This returns the location of the call.
+// It may emit to b.pb, so there must be no message encoding in progress.
+func (b *profileBuilder) locForPC(addr uintptr) uint64 {
+ id := uint64(b.locs[addr])
+ if id != 0 {
+ return id
+ }
+
+ // Expand this one address using CallersFrames so we can cache
+ // each expansion. In general, CallersFrames takes a whole
+ // stack, but in this case we know there will be no skips in
+ // the stack and we have return PCs anyway.
+ frames := runtime.CallersFrames([]uintptr{addr})
+ frame, more := frames.Next()
+ if frame.Function == "runtime.goexit" {
+ // Short-circuit if we see runtime.goexit so the loop
+ // below doesn't allocate a useless empty location.
+ return 0
+ }
+
+ if frame.PC == 0 {
+ // If we failed to resolve the frame, at least make up
+ // a reasonable call PC. This mostly happens in tests.
+ frame.PC = addr - 1
+ }
+
+ // We can't write out functions while in the middle of the
+ // Location message, so record new functions we encounter and
+ // write them out after the Location.
+ type newFunc struct {
+ id uint64
+ name, file string
+ }
+ newFuncs := make([]newFunc, 0, 8)
+
+ id = uint64(len(b.locs)) + 1
+ b.locs[addr] = int(id)
+ start := b.pb.startMessage()
+ b.pb.uint64Opt(tagLocation_ID, id)
+ b.pb.uint64Opt(tagLocation_Address, uint64(frame.PC))
+ for frame.Function != "runtime.goexit" {
+ // Write out each line in frame expansion.
+ funcID := uint64(b.funcs[frame.Function])
+ if funcID == 0 {
+ funcID = uint64(len(b.funcs)) + 1
+ b.funcs[frame.Function] = int(funcID)
+ newFuncs = append(newFuncs, newFunc{funcID, frame.Function, frame.File})
+ }
+ b.pbLine(tagLocation_Line, funcID, int64(frame.Line))
+ if !more {
+ break
+ }
+ frame, more = frames.Next()
+ }
+ if len(b.mem) > 0 {
+ i := sort.Search(len(b.mem), func(i int) bool {
+ return b.mem[i].end > addr
+ })
+ if i < len(b.mem) && b.mem[i].start <= addr && addr < b.mem[i].end {
+ b.pb.uint64Opt(tagLocation_MappingID, uint64(i+1))
+ }
+ }
+ b.pb.endMessage(tagProfile_Location, start)
+
+ // Write out functions we found during frame expansion.
+ for _, fn := range newFuncs {
+ start := b.pb.startMessage()
+ b.pb.uint64Opt(tagFunction_ID, fn.id)
+ b.pb.int64Opt(tagFunction_Name, b.stringIndex(fn.name))
+ b.pb.int64Opt(tagFunction_SystemName, b.stringIndex(fn.name))
+ b.pb.int64Opt(tagFunction_Filename, b.stringIndex(fn.file))
+ b.pb.endMessage(tagProfile_Function, start)
+ }
+
+ b.flush()
+ return id
+}
+
+// newProfileBuilder returns a new profileBuilder.
+// CPU profiling data obtained from the runtime can be added
+// by calling b.addCPUData, and then the eventual profile
+// can be obtained by calling b.finish.
+func newProfileBuilder(w io.Writer) *profileBuilder {
+ zw, _ := gzip.NewWriterLevel(w, gzip.BestSpeed)
+ b := &profileBuilder{
+ w: w,
+ zw: zw,
+ start: time.Now(),
+ strings: []string{""},
+ stringMap: map[string]int{"": 0},
+ locs: map[uintptr]int{},
+ funcs: map[string]int{},
+ }
+ b.readMapping()
+ return b
+}
+
+// addCPUData adds the CPU profiling data to the profile.
+// The data must be a whole number of records,
+// as delivered by the runtime.
+func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error {
+ if !b.havePeriod {
+ // first record is period
+ if len(data) < 3 {
+ return fmt.Errorf("truncated profile")
+ }
+ if data[0] != 3 || data[2] == 0 {
+ return fmt.Errorf("malformed profile")
+ }
+ // data[2] is sampling rate in Hz. Convert to sampling
+ // period in nanoseconds.
+ b.period = 1e9 / int64(data[2])
+ b.havePeriod = true
+ data = data[3:]
+ }
+
+ // Parse CPU samples from the profile.
+ // Each sample is 3+n uint64s:
+ // data[0] = 3+n
+ // data[1] = time stamp (ignored)
+ // data[2] = count
+ // data[3:3+n] = stack
+ // If the count is 0 and the stack has length 1,
+ // that's an overflow record inserted by the runtime
+ // to indicate that stack[0] samples were lost.
+ // Otherwise the count is usually 1,
+ // but in a few special cases like lost non-Go samples
+ // there can be larger counts.
+ // Because many samples with the same stack arrive,
+ // we want to deduplicate immediately, which we do
+ // using the b.m profMap.
+ for len(data) > 0 {
+ if len(data) < 3 || data[0] > uint64(len(data)) {
+ return fmt.Errorf("truncated profile")
+ }
+ if data[0] < 3 || tags != nil && len(tags) < 1 {
+ return fmt.Errorf("malformed profile")
+ }
+ count := data[2]
+ stk := data[3:data[0]]
+ data = data[data[0]:]
+ var tag unsafe.Pointer
+ if tags != nil {
+ tag = tags[0]
+ tags = tags[1:]
+ }
+
+ if count == 0 && len(stk) == 1 {
+ // overflow record
+ count = uint64(stk[0])
+ stk = []uint64{
+ uint64(funcPC(lostProfileEvent)),
+ }
+ }
+ b.m.lookup(stk, tag).count += int64(count)
+ }
+ return nil
+}
+
+// build completes and returns the constructed profile.
+func (b *profileBuilder) build() error {
+ b.end = time.Now()
+
+ b.pb.int64Opt(tagProfile_TimeNanos, b.start.UnixNano())
+ if b.havePeriod { // must be CPU profile
+ b.pbValueType(tagProfile_SampleType, "samples", "count")
+ b.pbValueType(tagProfile_SampleType, "cpu", "nanoseconds")
+ b.pb.int64Opt(tagProfile_DurationNanos, b.end.Sub(b.start).Nanoseconds())
+ b.pbValueType(tagProfile_PeriodType, "cpu", "nanoseconds")
+ b.pb.int64Opt(tagProfile_Period, b.period)
+ }
+
+ values := []int64{0, 0}
+ var locs []uint64
+ for e := b.m.all; e != nil; e = e.nextAll {
+ values[0] = e.count
+ values[1] = e.count * b.period
+
+ var labels func()
+ if e.tag != nil {
+ labels = func() {
+ for k, v := range *(*labelMap)(e.tag) {
+ b.pbLabel(tagSample_Label, k, v, 0)
+ }
+ }
+ }
+
+ locs = locs[:0]
+ for i, addr := range e.stk {
+ // Addresses from stack traces point to the
+ // next instruction after each call, except
+ // for the leaf, which points to where the
+ // signal occurred. locForPC expects return
+ // PCs, so increment the leaf address to look
+ // like a return PC.
+ if i == 0 {
+ addr++
+ }
+ l := b.locForPC(addr)
+ if l == 0 { // runtime.goexit
+ continue
+ }
+ locs = append(locs, l)
+ }
+ b.pbSample(values, locs, labels)
+ }
+
+ // TODO: Anything for tagProfile_DropFrames?
+ // TODO: Anything for tagProfile_KeepFrames?
+
+ b.pb.strings(tagProfile_StringTable, b.strings)
+ b.zw.Write(b.pb.data)
+ b.zw.Close()
+ return nil
+}
+
+// readMapping reads /proc/self/maps and writes mappings to b.pb.
+// It saves the address ranges of the mappings in b.mem for use
+// when emitting locations.
+func (b *profileBuilder) readMapping() {
+ data, _ := ioutil.ReadFile("/proc/self/maps")
+ parseProcSelfMaps(data, b.addMapping)
+}
+
+func parseProcSelfMaps(data []byte, addMapping func(lo, hi, offset uint64, file, buildID string)) {
+ // $ cat /proc/self/maps
+ // 00400000-0040b000 r-xp 00000000 fc:01 787766 /bin/cat
+ // 0060a000-0060b000 r--p 0000a000 fc:01 787766 /bin/cat
+ // 0060b000-0060c000 rw-p 0000b000 fc:01 787766 /bin/cat
+ // 014ab000-014cc000 rw-p 00000000 00:00 0 [heap]
+ // 7f7d76af8000-7f7d7797c000 r--p 00000000 fc:01 1318064 /usr/lib/locale/locale-archive
+ // 7f7d7797c000-7f7d77b36000 r-xp 00000000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so
+ // 7f7d77b36000-7f7d77d36000 ---p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so
+ // 7f7d77d36000-7f7d77d3a000 r--p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so
+ // 7f7d77d3a000-7f7d77d3c000 rw-p 001be000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so
+ // 7f7d77d3c000-7f7d77d41000 rw-p 00000000 00:00 0
+ // 7f7d77d41000-7f7d77d64000 r-xp 00000000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so
+ // 7f7d77f3f000-7f7d77f42000 rw-p 00000000 00:00 0
+ // 7f7d77f61000-7f7d77f63000 rw-p 00000000 00:00 0
+ // 7f7d77f63000-7f7d77f64000 r--p 00022000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so
+ // 7f7d77f64000-7f7d77f65000 rw-p 00023000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so
+ // 7f7d77f65000-7f7d77f66000 rw-p 00000000 00:00 0
+ // 7ffc342a2000-7ffc342c3000 rw-p 00000000 00:00 0 [stack]
+ // 7ffc34343000-7ffc34345000 r-xp 00000000 00:00 0 [vdso]
+ // ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]
+
+ var line []byte
+ // next removes and returns the next field in the line.
+ // It also removes from line any spaces following the field.
+ next := func() []byte {
+ j := bytes.IndexByte(line, ' ')
+ if j < 0 {
+ f := line
+ line = nil
+ return f
+ }
+ f := line[:j]
+ line = line[j+1:]
+ for len(line) > 0 && line[0] == ' ' {
+ line = line[1:]
+ }
+ return f
+ }
+
+ for len(data) > 0 {
+ i := bytes.IndexByte(data, '\n')
+ if i < 0 {
+ line, data = data, nil
+ } else {
+ line, data = data[:i], data[i+1:]
+ }
+ addr := next()
+ i = bytes.IndexByte(addr, '-')
+ if i < 0 {
+ continue
+ }
+ lo, err := strconv.ParseUint(string(addr[:i]), 16, 64)
+ if err != nil {
+ continue
+ }
+ hi, err := strconv.ParseUint(string(addr[i+1:]), 16, 64)
+ if err != nil {
+ continue
+ }
+ perm := next()
+ if len(perm) < 4 || perm[2] != 'x' {
+ // Only interested in executable mappings.
+ continue
+ }
+ offset, err := strconv.ParseUint(string(next()), 16, 64)
+ if err != nil {
+ continue
+ }
+ next() // dev
+ inode := next() // inode
+ if line == nil {
+ continue
+ }
+ file := string(line)
+ if len(inode) == 1 && inode[0] == '0' && file == "" {
+ // Huge-page text mappings list the initial fragment of
+ // mapped but unpopulated memory as being inode 0.
+ // Don't report that part.
+ // But [vdso] and [vsyscall] are inode 0, so let non-empty file names through.
+ continue
+ }
+
+ // TODO: pprof's remapMappingIDs makes two adjustments:
+ // 1. If there is an /anon_hugepage mapping first and it is
+ // consecutive to a next mapping, drop the /anon_hugepage.
+ // 2. If start-offset = 0x400000, change start to 0x400000 and offset to 0.
+ // There's no indication why either of these is needed.
+ // Let's try not doing these and see what breaks.
+ // If we do need them, they would go here, before we
+ // enter the mappings into b.mem in the first place.
+
+ buildID, _ := elfBuildID(file)
+ addMapping(lo, hi, offset, file, buildID)
+ }
+}
+
+func (b *profileBuilder) addMapping(lo, hi, offset uint64, file, buildID string) {
+ b.mem = append(b.mem, memMap{uintptr(lo), uintptr(hi)})
+ b.pbMapping(tagProfile_Mapping, uint64(len(b.mem)), lo, hi, offset, file, buildID)
+}
diff --git a/libgo/go/runtime/pprof/proto_test.go b/libgo/go/runtime/pprof/proto_test.go
new file mode 100644
index 00000000000..a268c3a95d9
--- /dev/null
+++ b/libgo/go/runtime/pprof/proto_test.go
@@ -0,0 +1,224 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "runtime"
+ "runtime/pprof/internal/profile"
+ "strings"
+ "testing"
+)
+
+// translateCPUProfile parses binary CPU profiling stack trace data
+// generated by runtime.CPUProfile() into a profile struct.
+// This is only used for testing. Real conversions stream the
+// data into the profileBuilder as it becomes available.
+func translateCPUProfile(data []uint64) (*profile.Profile, error) {
+ var buf bytes.Buffer
+ b := newProfileBuilder(&buf)
+ if err := b.addCPUData(data, nil); err != nil {
+ return nil, err
+ }
+ b.build()
+ return profile.Parse(&buf)
+}
+
+// fmtJSON returns a pretty-printed JSON form for x.
+// It works reasonbly well for printing protocol-buffer
+// data structures like profile.Profile.
+func fmtJSON(x interface{}) string {
+ js, _ := json.MarshalIndent(x, "", "\t")
+ return string(js)
+}
+
+func TestConvertCPUProfileEmpty(t *testing.T) {
+ // A test server with mock cpu profile data.
+ var buf bytes.Buffer
+
+ b := []uint64{3, 0, 500} // empty profile at 500 Hz (2ms sample period)
+ p, err := translateCPUProfile(b)
+ if err != nil {
+ t.Fatalf("translateCPUProfile: %v", err)
+ }
+ if err := p.Write(&buf); err != nil {
+ t.Fatalf("writing profile: %v", err)
+ }
+
+ p, err = profile.Parse(&buf)
+ if err != nil {
+ t.Fatalf("profile.Parse: %v", err)
+ }
+
+ // Expected PeriodType and SampleType.
+ periodType := &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}
+ sampleType := []*profile.ValueType{
+ {Type: "samples", Unit: "count"},
+ {Type: "cpu", Unit: "nanoseconds"},
+ }
+
+ checkProfile(t, p, 2000*1000, periodType, sampleType, nil)
+}
+
+// For gccgo make these functions different so that gccgo doesn't
+// merge them with each other and with lostProfileEvent.
+func f1(i int) { f1(i + 1) }
+func f2(i int) { f2(i + 2) }
+
+// testPCs returns two PCs and two corresponding memory mappings
+// to use in test profiles.
+func testPCs(t *testing.T) (addr1, addr2 uint64, map1, map2 *profile.Mapping) {
+ switch runtime.GOOS {
+ case "linux", "android", "netbsd":
+ // Figure out two addresses from /proc/self/maps.
+ mmap, err := ioutil.ReadFile("/proc/self/maps")
+ if err != nil {
+ t.Fatal(err)
+ }
+ mprof := &profile.Profile{}
+ if err = mprof.ParseMemoryMap(bytes.NewReader(mmap)); err != nil {
+ t.Fatalf("parsing /proc/self/maps: %v", err)
+ }
+ if len(mprof.Mapping) < 2 {
+ // It is possible for a binary to only have 1 executable
+ // region of memory.
+ t.Skipf("need 2 or more mappings, got %v", len(mprof.Mapping))
+ }
+ addr1 = mprof.Mapping[0].Start
+ map1 = mprof.Mapping[0]
+ map1.BuildID, _ = elfBuildID(map1.File)
+ addr2 = mprof.Mapping[1].Start
+ map2 = mprof.Mapping[1]
+ map2.BuildID, _ = elfBuildID(map2.File)
+ default:
+ addr1 = uint64(funcPC(f1))
+ addr2 = uint64(funcPC(f2))
+ }
+ return
+}
+
+func TestConvertCPUProfile(t *testing.T) {
+ addr1, addr2, map1, map2 := testPCs(t)
+
+ b := []uint64{
+ 3, 0, 500, // hz = 500
+ 5, 0, 10, uint64(addr1), uint64(addr1 + 2), // 10 samples in addr1
+ 5, 0, 40, uint64(addr2), uint64(addr2 + 2), // 40 samples in addr2
+ 5, 0, 10, uint64(addr1), uint64(addr1 + 2), // 10 samples in addr1
+ }
+ p, err := translateCPUProfile(b)
+ if err != nil {
+ t.Fatalf("translating profile: %v", err)
+ }
+ period := int64(2000 * 1000)
+ periodType := &profile.ValueType{Type: "cpu", Unit: "nanoseconds"}
+ sampleType := []*profile.ValueType{
+ {Type: "samples", Unit: "count"},
+ {Type: "cpu", Unit: "nanoseconds"},
+ }
+ samples := []*profile.Sample{
+ {Value: []int64{20, 20 * 2000 * 1000}, Location: []*profile.Location{
+ {ID: 1, Mapping: map1, Address: addr1},
+ {ID: 2, Mapping: map1, Address: addr1 + 1},
+ }},
+ {Value: []int64{40, 40 * 2000 * 1000}, Location: []*profile.Location{
+ {ID: 3, Mapping: map2, Address: addr2},
+ {ID: 4, Mapping: map2, Address: addr2 + 1},
+ }},
+ }
+ checkProfile(t, p, period, periodType, sampleType, samples)
+}
+
+func checkProfile(t *testing.T, p *profile.Profile, period int64, periodType *profile.ValueType, sampleType []*profile.ValueType, samples []*profile.Sample) {
+ if p.Period != period {
+ t.Fatalf("p.Period = %d, want %d", p.Period, period)
+ }
+ if !reflect.DeepEqual(p.PeriodType, periodType) {
+ t.Fatalf("p.PeriodType = %v\nwant = %v", fmtJSON(p.PeriodType), fmtJSON(periodType))
+ }
+ if !reflect.DeepEqual(p.SampleType, sampleType) {
+ t.Fatalf("p.SampleType = %v\nwant = %v", fmtJSON(p.SampleType), fmtJSON(sampleType))
+ }
+ // Clear line info since it is not in the expected samples.
+ // If we used f1 and f2 above, then the samples will have line info.
+ for _, s := range p.Sample {
+ for _, l := range s.Location {
+ l.Line = nil
+ }
+ }
+ if fmtJSON(p.Sample) != fmtJSON(samples) { // ignore unexported fields
+ if len(p.Sample) == len(samples) {
+ for i := range p.Sample {
+ if !reflect.DeepEqual(p.Sample[i], samples[i]) {
+ t.Errorf("sample %d = %v\nwant = %v\n", i, fmtJSON(p.Sample[i]), fmtJSON(samples[i]))
+ }
+ }
+ if t.Failed() {
+ t.FailNow()
+ }
+ }
+ t.Fatalf("p.Sample = %v\nwant = %v", fmtJSON(p.Sample), fmtJSON(samples))
+ }
+}
+
+var profSelfMapsTests = `
+00400000-0040b000 r-xp 00000000 fc:01 787766 /bin/cat
+0060a000-0060b000 r--p 0000a000 fc:01 787766 /bin/cat
+0060b000-0060c000 rw-p 0000b000 fc:01 787766 /bin/cat
+014ab000-014cc000 rw-p 00000000 00:00 0 [heap]
+7f7d76af8000-7f7d7797c000 r--p 00000000 fc:01 1318064 /usr/lib/locale/locale-archive
+7f7d7797c000-7f7d77b36000 r-xp 00000000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so
+7f7d77b36000-7f7d77d36000 ---p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so
+7f7d77d36000-7f7d77d3a000 r--p 001ba000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so
+7f7d77d3a000-7f7d77d3c000 rw-p 001be000 fc:01 1180226 /lib/x86_64-linux-gnu/libc-2.19.so
+7f7d77d3c000-7f7d77d41000 rw-p 00000000 00:00 0
+7f7d77d41000-7f7d77d64000 r-xp 00000000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so
+7f7d77f3f000-7f7d77f42000 rw-p 00000000 00:00 0
+7f7d77f61000-7f7d77f63000 rw-p 00000000 00:00 0
+7f7d77f63000-7f7d77f64000 r--p 00022000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so
+7f7d77f64000-7f7d77f65000 rw-p 00023000 fc:01 1180217 /lib/x86_64-linux-gnu/ld-2.19.so
+7f7d77f65000-7f7d77f66000 rw-p 00000000 00:00 0
+7ffc342a2000-7ffc342c3000 rw-p 00000000 00:00 0 [stack]
+7ffc34343000-7ffc34345000 r-xp 00000000 00:00 0 [vdso]
+ffffffffff600000-ffffffffff601000 r-xp 00000090 00:00 0 [vsyscall]
+->
+00400000 0040b000 00000000 /bin/cat
+7f7d7797c000 7f7d77b36000 00000000 /lib/x86_64-linux-gnu/libc-2.19.so
+7f7d77d41000 7f7d77d64000 00000000 /lib/x86_64-linux-gnu/ld-2.19.so
+7ffc34343000 7ffc34345000 00000000 [vdso]
+ffffffffff600000 ffffffffff601000 00000090 [vsyscall]
+
+00400000-07000000 r-xp 00000000 00:00 0
+07000000-07093000 r-xp 06c00000 00:2e 536754 /path/to/gobench_server_main
+07093000-0722d000 rw-p 06c92000 00:2e 536754 /path/to/gobench_server_main
+0722d000-07b21000 rw-p 00000000 00:00 0
+c000000000-c000036000 rw-p 00000000 00:00 0
+->
+07000000 07093000 06c00000 /path/to/gobench_server_main
+`
+
+func TestProcSelfMaps(t *testing.T) {
+ for tx, tt := range strings.Split(profSelfMapsTests, "\n\n") {
+ i := strings.Index(tt, "->\n")
+ if i < 0 {
+ t.Fatal("malformed test case")
+ }
+ in, out := tt[:i], tt[i+len("->\n"):]
+ if len(out) > 0 && out[len(out)-1] != '\n' {
+ out += "\n"
+ }
+ var buf bytes.Buffer
+ parseProcSelfMaps([]byte(in), func(lo, hi, offset uint64, file, buildID string) {
+ fmt.Fprintf(&buf, "%08x %08x %08x %s\n", lo, hi, offset, file)
+ })
+ if buf.String() != out {
+ t.Errorf("#%d: have:\n%s\nwant:\n%s\n%q\n%q", tx, buf.String(), out, buf.String(), out)
+ }
+ }
+}
diff --git a/libgo/go/runtime/pprof/protobuf.go b/libgo/go/runtime/pprof/protobuf.go
new file mode 100644
index 00000000000..7b99095a13a
--- /dev/null
+++ b/libgo/go/runtime/pprof/protobuf.go
@@ -0,0 +1,141 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+// A protobuf is a simple protocol buffer encoder.
+type protobuf struct {
+ data []byte
+ tmp [16]byte
+ nest int
+}
+
+func (b *protobuf) varint(x uint64) {
+ for x >= 128 {
+ b.data = append(b.data, byte(x)|0x80)
+ x >>= 7
+ }
+ b.data = append(b.data, byte(x))
+}
+
+func (b *protobuf) length(tag int, len int) {
+ b.varint(uint64(tag)<<3 | 2)
+ b.varint(uint64(len))
+}
+
+func (b *protobuf) uint64(tag int, x uint64) {
+ // append varint to b.data
+ b.varint(uint64(tag)<<3 | 0)
+ b.varint(x)
+}
+
+func (b *protobuf) uint64s(tag int, x []uint64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ b.varint(u)
+ }
+ n2 := len(b.data)
+ b.length(tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ b.uint64(tag, u)
+ }
+}
+
+func (b *protobuf) uint64Opt(tag int, x uint64) {
+ if x == 0 {
+ return
+ }
+ b.uint64(tag, x)
+}
+
+func (b *protobuf) int64(tag int, x int64) {
+ u := uint64(x)
+ b.uint64(tag, u)
+}
+
+func (b *protobuf) int64Opt(tag int, x int64) {
+ if x == 0 {
+ return
+ }
+ b.int64(tag, x)
+}
+
+func (b *protobuf) int64s(tag int, x []int64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ b.varint(uint64(u))
+ }
+ n2 := len(b.data)
+ b.length(tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ b.int64(tag, u)
+ }
+}
+
+func (b *protobuf) string(tag int, x string) {
+ b.length(tag, len(x))
+ b.data = append(b.data, x...)
+}
+
+func (b *protobuf) strings(tag int, x []string) {
+ for _, s := range x {
+ b.string(tag, s)
+ }
+}
+
+func (b *protobuf) stringOpt(tag int, x string) {
+ if x == "" {
+ return
+ }
+ b.string(tag, x)
+}
+
+func (b *protobuf) bool(tag int, x bool) {
+ if x {
+ b.uint64(tag, 1)
+ } else {
+ b.uint64(tag, 0)
+ }
+}
+
+func (b *protobuf) boolOpt(tag int, x bool) {
+ if x == false {
+ return
+ }
+ b.bool(tag, x)
+}
+
+type msgOffset int
+
+func (b *protobuf) startMessage() msgOffset {
+ b.nest++
+ return msgOffset(len(b.data))
+}
+
+func (b *protobuf) endMessage(tag int, start msgOffset) {
+ n1 := int(start)
+ n2 := len(b.data)
+ b.length(tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ b.nest--
+}
diff --git a/libgo/go/runtime/pprof/protomem.go b/libgo/go/runtime/pprof/protomem.go
new file mode 100644
index 00000000000..2756cfd28d8
--- /dev/null
+++ b/libgo/go/runtime/pprof/protomem.go
@@ -0,0 +1,93 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+import (
+ "io"
+ "math"
+ "runtime"
+ "strings"
+)
+
+// writeHeapProto writes the current heap profile in protobuf format to w.
+func writeHeapProto(w io.Writer, p []runtime.MemProfileRecord, rate int64) error {
+ b := newProfileBuilder(w)
+ b.pbValueType(tagProfile_PeriodType, "space", "bytes")
+ b.pb.int64Opt(tagProfile_Period, rate)
+ b.pbValueType(tagProfile_SampleType, "alloc_objects", "count")
+ b.pbValueType(tagProfile_SampleType, "alloc_space", "bytes")
+ b.pbValueType(tagProfile_SampleType, "inuse_objects", "count")
+ b.pbValueType(tagProfile_SampleType, "inuse_space", "bytes")
+
+ values := []int64{0, 0, 0, 0}
+ var locs []uint64
+ for _, r := range p {
+ locs = locs[:0]
+ hideRuntime := true
+ for tries := 0; tries < 2; tries++ {
+ for _, addr := range r.Stack() {
+ // For heap profiles, all stack
+ // addresses are return PCs, which is
+ // what locForPC expects.
+ if hideRuntime {
+ if f := runtime.FuncForPC(addr); f != nil && strings.HasPrefix(f.Name(), "runtime.") {
+ continue
+ }
+ // Found non-runtime. Show any runtime uses above it.
+ hideRuntime = false
+ }
+ l := b.locForPC(addr)
+ if l == 0 { // runtime.goexit
+ continue
+ }
+ locs = append(locs, l)
+ }
+ if len(locs) > 0 {
+ break
+ }
+ hideRuntime = false // try again, and show all frames
+ }
+
+ values[0], values[1] = scaleHeapSample(r.AllocObjects, r.AllocBytes, rate)
+ values[2], values[3] = scaleHeapSample(r.InUseObjects(), r.InUseBytes(), rate)
+ var blockSize int64
+ if values[0] > 0 {
+ blockSize = values[1] / values[0]
+ }
+ b.pbSample(values, locs, func() {
+ if blockSize != 0 {
+ b.pbLabel(tagSample_Label, "bytes", "", blockSize)
+ }
+ })
+ }
+ b.build()
+ return nil
+}
+
+// scaleHeapSample adjusts the data from a heap Sample to
+// account for its probability of appearing in the collected
+// data. heap profiles are a sampling of the memory allocations
+// requests in a program. We estimate the unsampled value by dividing
+// each collected sample by its probability of appearing in the
+// profile. heap profiles rely on a poisson process to determine
+// which samples to collect, based on the desired average collection
+// rate R. The probability of a sample of size S to appear in that
+// profile is 1-exp(-S/R).
+func scaleHeapSample(count, size, rate int64) (int64, int64) {
+ if count == 0 || size == 0 {
+ return 0, 0
+ }
+
+ if rate <= 1 {
+ // if rate==1 all samples were collected so no adjustment is needed.
+ // if rate<1 treat as unknown and skip scaling.
+ return count, size
+ }
+
+ avgSize := float64(size) / float64(count)
+ scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
+
+ return int64(float64(count) * scale), int64(float64(size) * scale)
+}
diff --git a/libgo/go/runtime/pprof/protomem_test.go b/libgo/go/runtime/pprof/protomem_test.go
new file mode 100644
index 00000000000..1e30ed93a36
--- /dev/null
+++ b/libgo/go/runtime/pprof/protomem_test.go
@@ -0,0 +1,74 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+import (
+ "bytes"
+ "runtime"
+ "runtime/pprof/internal/profile"
+ "testing"
+)
+
+func TestConvertMemProfile(t *testing.T) {
+ addr1, addr2, map1, map2 := testPCs(t)
+
+ var buf bytes.Buffer
+ // MemProfileRecord stacks are return PCs, so add one to the
+ // addresses recorded in the "profile". The proto profile
+ // locations are call PCs, so conversion will subtract one
+ // from these and get back to addr1 and addr2.
+ a1, a2 := uintptr(addr1)+1, uintptr(addr2)+1
+ rate := int64(512 * 1024)
+ rec := []runtime.MemProfileRecord{
+ {AllocBytes: 4096, FreeBytes: 1024, AllocObjects: 4, FreeObjects: 1, Stack0: [32]uintptr{a1, a2}},
+ {AllocBytes: 512 * 1024, FreeBytes: 0, AllocObjects: 1, FreeObjects: 0, Stack0: [32]uintptr{a2 + 1, a2 + 2}},
+ {AllocBytes: 512 * 1024, FreeBytes: 512 * 1024, AllocObjects: 1, FreeObjects: 1, Stack0: [32]uintptr{a1 + 1, a1 + 2, a2 + 3}},
+ }
+
+ if err := writeHeapProto(&buf, rec, rate); err != nil {
+ t.Fatalf("writing profile: %v", err)
+ }
+
+ p, err := profile.Parse(&buf)
+ if err != nil {
+ t.Fatalf("profile.Parse: %v", err)
+ }
+
+ periodType := &profile.ValueType{Type: "space", Unit: "bytes"}
+ sampleType := []*profile.ValueType{
+ {Type: "alloc_objects", Unit: "count"},
+ {Type: "alloc_space", Unit: "bytes"},
+ {Type: "inuse_objects", Unit: "count"},
+ {Type: "inuse_space", Unit: "bytes"},
+ }
+ samples := []*profile.Sample{
+ {
+ Value: []int64{2050, 2099200, 1537, 1574400},
+ Location: []*profile.Location{
+ {ID: 1, Mapping: map1, Address: addr1},
+ {ID: 2, Mapping: map2, Address: addr2},
+ },
+ NumLabel: map[string][]int64{"bytes": {1024}},
+ },
+ {
+ Value: []int64{1, 829411, 1, 829411},
+ Location: []*profile.Location{
+ {ID: 3, Mapping: map2, Address: addr2 + 1},
+ {ID: 4, Mapping: map2, Address: addr2 + 2},
+ },
+ NumLabel: map[string][]int64{"bytes": {829411}},
+ },
+ {
+ Value: []int64{1, 829411, 0, 0},
+ Location: []*profile.Location{
+ {ID: 5, Mapping: map1, Address: addr1 + 1},
+ {ID: 6, Mapping: map1, Address: addr1 + 2},
+ {ID: 7, Mapping: map2, Address: addr2 + 3},
+ },
+ NumLabel: map[string][]int64{"bytes": {829411}},
+ },
+ }
+ checkProfile(t, p, rate, periodType, sampleType, samples)
+}
diff --git a/libgo/go/runtime/pprof/runtime.go b/libgo/go/runtime/pprof/runtime.go
new file mode 100644
index 00000000000..e6aace83e26
--- /dev/null
+++ b/libgo/go/runtime/pprof/runtime.go
@@ -0,0 +1,36 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+import (
+ "context"
+ "unsafe"
+)
+
+// runtime_setProfLabel is defined in runtime/proflabel.go.
+func runtime_setProfLabel(labels unsafe.Pointer)
+
+// runtime_getProfLabel is defined in runtime/proflabel.go.
+func runtime_getProfLabel() unsafe.Pointer
+
+// SetGoroutineLabels sets the current goroutine's labels to match ctx.
+// This is a lower-level API than Do, which should be used instead when possible.
+func SetGoroutineLabels(ctx context.Context) {
+ ctxLabels, _ := ctx.Value(labelContextKey{}).(*labelMap)
+ runtime_setProfLabel(unsafe.Pointer(ctxLabels))
+}
+
+// Do calls f with a copy of the parent context with the
+// given labels added to the parent's label map.
+// Each key/value pair in labels is inserted into the label map in the
+// order provided, overriding any previous value for the same key.
+// The augmented label map will be set for the duration of the call to f
+// and restored once f returns.
+func Do(ctx context.Context, labels LabelSet, f func(context.Context)) {
+ defer SetGoroutineLabels(ctx)
+ ctx = WithLabels(ctx, labels)
+ SetGoroutineLabels(ctx)
+ f(ctx)
+}
diff --git a/libgo/go/runtime/pprof/runtime_test.go b/libgo/go/runtime/pprof/runtime_test.go
new file mode 100644
index 00000000000..0dd5324b42e
--- /dev/null
+++ b/libgo/go/runtime/pprof/runtime_test.go
@@ -0,0 +1,96 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pprof
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+func TestSetGoroutineLabels(t *testing.T) {
+ sync := make(chan struct{})
+
+ wantLabels := map[string]string{}
+ if gotLabels := getProfLabel(); !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("Expected parent goroutine's profile labels to be empty before test, got %v", gotLabels)
+ }
+ go func() {
+ if gotLabels := getProfLabel(); !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("Expected child goroutine's profile labels to be empty before test, got %v", gotLabels)
+ }
+ sync <- struct{}{}
+ }()
+ <-sync
+
+ wantLabels = map[string]string{"key": "value"}
+ ctx := WithLabels(context.Background(), Labels("key", "value"))
+ SetGoroutineLabels(ctx)
+ if gotLabels := getProfLabel(); !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("parent goroutine's profile labels: got %v, want %v", gotLabels, wantLabels)
+ }
+ go func() {
+ if gotLabels := getProfLabel(); !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("child goroutine's profile labels: got %v, want %v", gotLabels, wantLabels)
+ }
+ sync <- struct{}{}
+ }()
+ <-sync
+
+ wantLabels = map[string]string{}
+ ctx = context.Background()
+ SetGoroutineLabels(ctx)
+ if gotLabels := getProfLabel(); !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("Expected parent goroutine's profile labels to be empty, got %v", gotLabels)
+ }
+ go func() {
+ if gotLabels := getProfLabel(); !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("Expected child goroutine's profile labels to be empty, got %v", gotLabels)
+ }
+ sync <- struct{}{}
+ }()
+ <-sync
+}
+
+func TestDo(t *testing.T) {
+ wantLabels := map[string]string{}
+ if gotLabels := getProfLabel(); !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("Expected parent goroutine's profile labels to be empty before Do, got %v", gotLabels)
+ }
+
+ Do(context.Background(), Labels("key1", "value1", "key2", "value2"), func(ctx context.Context) {
+ wantLabels := map[string]string{"key1": "value1", "key2": "value2"}
+ if gotLabels := getProfLabel(); !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("parent goroutine's profile labels: got %v, want %v", gotLabels, wantLabels)
+ }
+
+ sync := make(chan struct{})
+ go func() {
+ wantLabels := map[string]string{"key1": "value1", "key2": "value2"}
+ if gotLabels := getProfLabel(); !reflect.DeepEqual(gotLabels, wantLabels) {
+ t.Errorf("child goroutine's profile labels: got %v, want %v", gotLabels, wantLabels)
+ }
+ sync <- struct{}{}
+ }()
+ <-sync
+
+ })
+
+ wantLabels = map[string]string{}
+ if gotLabels := getProfLabel(); !reflect.DeepEqual(gotLabels, wantLabels) {
+ fmt.Printf("%#v", gotLabels)
+ fmt.Printf("%#v", wantLabels)
+ t.Errorf("Expected parent goroutine's profile labels to be empty after Do, got %v", gotLabels)
+ }
+}
+
+func getProfLabel() map[string]string {
+ l := (*labelMap)(runtime_getProfLabel())
+ if l == nil {
+ return map[string]string{}
+ }
+ return *l
+}
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
index 6b6f9c9c379..345f57b6875 100644
--- a/libgo/go/runtime/proc.go
+++ b/libgo/go/runtime/proc.go
@@ -214,8 +214,17 @@ func main() {
// Make racy client program work: if panicking on
// another goroutine at the same time as main returns,
// let the other goroutine finish printing the panic trace.
- // Once it does, it will exit. See issue 3934.
- if panicking != 0 {
+ // Once it does, it will exit. See issues 3934 and 20018.
+ if atomic.Load(&runningPanicDefers) != 0 {
+ // Running deferred functions should not take long.
+ for c := 0; c < 1000; c++ {
+ if atomic.Load(&runningPanicDefers) == 0 {
+ break
+ }
+ Gosched()
+ }
+ }
+ if atomic.Load(&panicking) != 0 {
gopark(nil, nil, "panicwait", traceEvGoStop, 1)
}
@@ -255,18 +264,25 @@ func forcegchelper() {
if debug.gctrace > 0 {
println("GC forced")
}
- gcStart(gcBackgroundMode, true)
+ // Time-triggered, fully concurrent.
+ gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()})
}
}
-//go:nosplit
-
// Gosched yields the processor, allowing other goroutines to run. It does not
// suspend the current goroutine, so execution resumes automatically.
+//go:nosplit
func Gosched() {
mcall(gosched_m)
}
+// goschedguarded yields the processor like gosched, but also checks
+// for forbidden states and opts out of the yield in those cases.
+//go:nosplit
+func goschedguarded() {
+ mcall(goschedguarded_m)
+}
+
// Puts the current goroutine into a waiting state and calls unlockf.
// If unlockf returns false, the goroutine is resumed.
// unlockf must not access this G's stack, as it may be moved between
@@ -419,16 +435,6 @@ func allgadd(gp *g) {
lock(&allglock)
allgs = append(allgs, gp)
allglen = uintptr(len(allgs))
-
- // Grow GC rescan list if necessary.
- if len(allgs) > cap(work.rescan.list) {
- lock(&work.rescan.lock)
- l := work.rescan.list
- // Let append do the heavy lifting, but keep the
- // length the same.
- work.rescan.list = append(l[:cap(l)], 0)[:len(l)]
- unlock(&work.rescan.lock)
- }
unlock(&allglock)
}
@@ -765,9 +771,8 @@ func casgstatus(gp *g, oldval, newval uint32) {
nextYield = nanotime() + yieldDelay/2
}
}
- if newval == _Grunning && gp.gcscanvalid {
- // Run queueRescan on the system stack so it has more space.
- systemstack(func() { queueRescan(gp) })
+ if newval == _Grunning {
+ gp.gcscanvalid = false
}
}
@@ -779,8 +784,6 @@ func scang(gp *g, gcw *gcWork) {
// Nothing is racing with us now, but gcscandone might be set to true left over
// from an earlier round of stack scanning (we scan twice per GC).
// We use gcscandone to record whether the scan has been done during this round.
- // It is important that the scan happens exactly once: if called twice,
- // the installation of stack barriers will detect the double scan and die.
gp.gcscandone = false
@@ -902,7 +905,7 @@ func restartg(gp *g) {
// in panic or being exited, this may not reliably stop all
// goroutines.
func stopTheWorld(reason string) {
- semacquire(&worldsema, 0)
+ semacquire(&worldsema)
getg().m.preemptoff = reason
systemstack(stopTheWorldWithSema)
}
@@ -1129,10 +1132,10 @@ func mstart1() {
}
asminit()
- minit()
// Install signal handlers; after minit so that minit can
// prepare the thread to be able to handle the signals.
+ // For gccgo minit was called by C code.
if _g_.m == &m0 {
// Create an extra M for callbacks on threads not created by Go.
if iscgo && !cgoHasExtraM {
@@ -1363,6 +1366,7 @@ func needm(x byte) {
// running at all (that is, there's no garbage collection
// running right now).
mp.needextram = mp.schedlink == 0
+ extraMCount--
unlockextra(mp.schedlink.ptr())
// Save and block signals before installing g.
@@ -1376,12 +1380,16 @@ func needm(x byte) {
// Install g (= m->curg).
setg(mp.curg)
- atomic.Store(&mp.curg.atomicstatus, _Gsyscall)
- setGContext()
// Initialize this thread to use the m.
asminit()
minit()
+
+ setGContext()
+
+ // mp.curg is now a real goroutine.
+ casgstatus(mp.curg, _Gdead, _Gsyscall)
+ atomic.Xadd(&sched.ngsys, -1)
}
var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
@@ -1414,14 +1422,12 @@ func oneNewExtraM() {
// the goroutine stack ends.
mp, g0SP, g0SPSize := allocm(nil, nil, true)
gp := malg(true, false, nil, nil)
- gp.gcscanvalid = true // fresh G, so no dequeueRescan necessary
+ gp.gcscanvalid = true
gp.gcscandone = true
- gp.gcRescan = -1
-
- // malg returns status as Gidle, change to Gdead before adding to allg
- // where GC will see it.
- // gccgo uses Gdead here, not Gsyscall, because the split
- // stack context is not initialized.
+ // malg returns status as _Gidle. Change to _Gdead before
+ // adding to allg where GC can see it. We use _Gdead to hide
+ // this from tracebacks and stack scans since it isn't a
+ // "real" goroutine until needm grabs it.
casgstatus(gp, _Gidle, _Gdead)
gp.m = mp
mp.curg = gp
@@ -1436,9 +1442,16 @@ func oneNewExtraM() {
// Here we need to set the context for g0.
makeGContext(mp.g0, g0SP, g0SPSize)
+ // gp is now on the allg list, but we don't want it to be
+ // counted by gcount. It would be more "proper" to increment
+ // sched.ngfree, but that requires locking. Incrementing ngsys
+ // has the same effect.
+ atomic.Xadd(&sched.ngsys, +1)
+
// Add m to the extra list.
mnext := lockextra(true)
mp.schedlink.set(mnext)
+ extraMCount++
unlockextra(mp)
}
@@ -1474,6 +1487,10 @@ func dropm() {
// with no pointer manipulation.
mp := getg().m
+ // Return mp.curg to dead state.
+ casgstatus(mp.curg, _Gsyscall, _Gdead)
+ atomic.Xadd(&sched.ngsys, +1)
+
// Block signals before unminit.
// Unminit unregisters the signal handling stack (but needs g on some systems).
// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
@@ -1489,6 +1506,7 @@ func dropm() {
mp.curg.gcnextsp = 0
mnext := lockextra(true)
+ extraMCount++
mp.schedlink.set(mnext)
setg(nil)
@@ -1505,6 +1523,7 @@ func getm() uintptr {
}
var extram uintptr
+var extraMCount uint32 // Protected by lockextra
var extraMWaiters uint32
// lockextra locks the extra list and returns the list head.
@@ -1551,6 +1570,10 @@ func unlockextra(mp *m) {
atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
}
+// execLock serializes exec and clone to avoid bugs or unspecified behaviour
+// around exec'ing while creating/destroying threads. See issue #19546.
+var execLock rwmutex
+
// Create a new m. It will start off with a call to fn, or else the scheduler.
// fn needs to be static and not a heap allocated closure.
// May run with m.p==nil, so write barriers are not allowed.
@@ -1559,7 +1582,9 @@ func newm(fn func(), _p_ *p) {
mp, _, _ := allocm(_p_, fn, false)
mp.nextp.set(_p_)
mp.sigmask = initSigmask
+ execLock.rlock() // Prevent process clone.
newosproc(mp)
+ execLock.runlock()
}
// Stops execution of the current m until new work is available.
@@ -1812,7 +1837,7 @@ func execute(gp *g, inheritTime bool) {
// Check whether the profiler needs to be turned on or off.
hz := sched.profilehz
if _g_.m.profilehz != hz {
- resetcpuprofiler(hz)
+ setThreadCPUProfiler(hz)
}
if trace.enabled {
@@ -1850,6 +1875,9 @@ top:
ready(gp, 0, true)
}
}
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
// local runq
if gp, inheritTime := runqget(_p_); gp != nil {
@@ -2007,7 +2035,7 @@ stop:
}
// poll network
- if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
+ if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
if _g_.m.p != 0 {
throw("findrunnable: netpoll with p")
}
@@ -2048,7 +2076,7 @@ func pollWork() bool {
if !runqempty(p) {
return true
}
- if netpollinited() && sched.lastpoll != 0 {
+ if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
if gp := netpoll(false); gp != nil {
injectglist(gp)
return true
@@ -2211,7 +2239,7 @@ func park_m(gp *g) {
_g_ := getg()
if trace.enabled {
- traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp)
+ traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
}
casgstatus(gp, _Grunning, _Gwaiting)
@@ -2256,6 +2284,19 @@ func gosched_m(gp *g) {
goschedImpl(gp)
}
+// goschedguarded is a forbidden-states-avoided version of gosched_m
+func goschedguarded_m(gp *g) {
+
+ if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning {
+ gogo(gp) // never return
+ }
+
+ if trace.enabled {
+ traceGoSched()
+ }
+ goschedImpl(gp)
+}
+
func gopreempt_m(gp *g) {
if trace.enabled {
traceGoPreempt()
@@ -2290,10 +2331,11 @@ func goexit0(gp *g) {
gp.writebuf = nil
gp.waitreason = ""
gp.param = nil
+ gp.labels = nil
+ gp.timer = nil
// Note that gp's stack scan is now "valid" because it has no
- // stack. We could dequeueRescan, but that takes a lock and
- // isn't really necessary.
+ // stack.
gp.gcscanvalid = true
dropg()
@@ -2641,12 +2683,12 @@ func syscall_exitsyscall() {
func beforefork() {
gp := getg().m.curg
- // Fork can hang if preempted with signals frequently enough (see issue 5517).
- // Ensure that we stay on the same M where we disable profiling.
+ // Block signals during a fork, so that the child does not run
+ // a signal handler before exec if a signal is sent to the process
+ // group. See issue #18600.
gp.m.locks++
- if gp.m.profilehz != 0 {
- resetcpuprofiler(0)
- }
+ msigsave(gp.m)
+ sigblock()
}
// Called from syscall package before fork.
@@ -2659,10 +2701,8 @@ func syscall_runtime_BeforeFork() {
func afterfork() {
gp := getg().m.curg
- hz := sched.profilehz
- if hz != 0 {
- resetcpuprofiler(hz)
- }
+ msigrestore(gp.m.sigmask)
+
gp.m.locks--
}
@@ -2673,6 +2713,50 @@ func syscall_runtime_AfterFork() {
systemstack(afterfork)
}
+// inForkedChild is true while manipulating signals in the child process.
+// This is used to avoid calling libc functions in case we are using vfork.
+var inForkedChild bool
+
+// Called from syscall package after fork in child.
+// It resets non-sigignored signals to the default handler, and
+// restores the signal mask in preparation for the exec.
+//
+// Because this might be called during a vfork, and therefore may be
+// temporarily sharing address space with the parent process, this must
+// not change any global variables or calling into C code that may do so.
+//
+//go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
+//go:nosplit
+//go:nowritebarrierrec
+func syscall_runtime_AfterForkInChild() {
+ // It's OK to change the global variable inForkedChild here
+ // because we are going to change it back. There is no race here,
+ // because if we are sharing address space with the parent process,
+ // then the parent process can not be running concurrently.
+ inForkedChild = true
+
+ clearSignalHandlers()
+
+ // When we are the child we are the only thread running,
+ // so we know that nothing else has changed gp.m.sigmask.
+ msigrestore(getg().m.sigmask)
+
+ inForkedChild = false
+}
+
+// Called from syscall package before Exec.
+//go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
+func syscall_runtime_BeforeExec() {
+ // Prevent thread creation during exec.
+ execLock.lock()
+}
+
+// Called from syscall package after Exec.
+//go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
+func syscall_runtime_AfterExec() {
+ execLock.unlock()
+}
+
// Create a new g running fn passing arg as the single argument.
// Put it on the queue of g's waiting to run.
// The compiler turns a go statement into a call to this.
@@ -2695,7 +2779,6 @@ func newproc(fn uintptr, arg unsafe.Pointer) *g {
if newg == nil {
newg = malg(true, false, &sp, &spsize)
casgstatus(newg, _Gidle, _Gdead)
- newg.gcRescan = -1
allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
} else {
resetNewG(newg, &sp, &spsize)
@@ -2717,17 +2800,13 @@ func newproc(fn uintptr, arg unsafe.Pointer) *g {
newg.param = arg
newg.gopc = getcallerpc(unsafe.Pointer(&fn))
newg.startpc = fn
- // The stack is dirty from the argument frame, so queue it for
- // scanning. Do this before setting it to runnable so we still
- // own the G. If we're recycling a G, it may already be on the
- // rescan list.
- if newg.gcRescan == -1 {
- queueRescan(newg)
- } else {
- // The recycled G is already on the rescan list. Just
- // mark the stack dirty.
- newg.gcscanvalid = false
+ if _g_.m.curg != nil {
+ newg.labels = _g_.m.curg.labels
+ }
+ if isSystemGoroutine(newg) {
+ atomic.Xadd(&sched.ngsys, +1)
}
+ newg.gcscanvalid = false
casgstatus(newg, _Gdead, _Grunnable)
if _p_.goidcache == _p_.goidcacheend {
@@ -2926,8 +3005,7 @@ func badunlockosthread() {
func gcount() int32 {
n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
- for i := 0; ; i++ {
- _p_ := allp[i]
+ for _, _p_ := range &allp {
if _p_ == nil {
break
}
@@ -2947,13 +3025,18 @@ func mcount() int32 {
}
var prof struct {
- lock uint32
- hz int32
+ signalLock uint32
+ hz int32
}
-func _System() { _System() }
-func _ExternalCode() { _ExternalCode() }
-func _GC() { _GC() }
+func _System() { _System() }
+func _ExternalCode() { _ExternalCode() }
+func _LostExternalCode() { _LostExternalCode() }
+func _GC() { _GC() }
+func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
+
+// Counts SIGPROFs received while in atomic64 critical section, on mips{,le}
+var lostAtomic64Count uint64
var _SystemPC = funcPC(_System)
var _ExternalCodePC = funcPC(_ExternalCode)
@@ -3009,14 +3092,11 @@ func sigprof(pc uintptr, gp *g, mp *m) {
}
if prof.hz != 0 {
- // Simple cas-lock to coordinate with setcpuprofilerate.
- for !atomic.Cas(&prof.lock, 0, 1) {
- osyield()
+ if (GOARCH == "mips" || GOARCH == "mipsle") && lostAtomic64Count > 0 {
+ cpuprof.addLostAtomic64(lostAtomic64Count)
+ lostAtomic64Count = 0
}
- if prof.hz != 0 {
- cpuprof.add(stk[:n])
- }
- atomic.Store(&prof.lock, 0)
+ cpuprof.add(gp, stk[:n])
}
getg().m.mallocing--
}
@@ -3047,19 +3127,28 @@ func sigprofNonGo(pc uintptr) {
nonprofGoStk[1] = _ExternalCodePC + sys.PCQuantum
}
- // Simple cas-lock to coordinate with setcpuprofilerate.
- for !atomic.Cas(&prof.lock, 0, 1) {
- osyield()
- }
- if prof.hz != 0 {
- cpuprof.addNonGo(nonprofGoStk[:n])
+ cpuprof.addNonGo(nonprofGoStk[:n])
+ }
+}
+
+// sigprofNonGoPC is called when a profiling signal arrived on a
+// non-Go thread and we have a single PC value, not a stack trace.
+// g is nil, and what we can do is very limited.
+//go:nosplit
+//go:nowritebarrierrec
+func sigprofNonGoPC(pc uintptr) {
+ if prof.hz != 0 {
+ stk := []uintptr{
+ pc,
+ funcPC(_ExternalCode) + sys.PCQuantum,
}
- atomic.Store(&prof.lock, 0)
+ cpuprof.addNonGo(stk)
}
}
-// Arrange to call fn with a traceback hz times a second.
-func setcpuprofilerate_m(hz int32) {
+// setcpuprofilerate sets the CPU profiling rate to hz times per second.
+// If hz <= 0, setcpuprofilerate turns off CPU profiling.
+func setcpuprofilerate(hz int32) {
// Force sane arguments.
if hz < 0 {
hz = 0
@@ -3073,20 +3162,23 @@ func setcpuprofilerate_m(hz int32) {
// Stop profiler on this thread so that it is safe to lock prof.
// if a profiling signal came in while we had prof locked,
// it would deadlock.
- resetcpuprofiler(0)
+ setThreadCPUProfiler(0)
- for !atomic.Cas(&prof.lock, 0, 1) {
+ for !atomic.Cas(&prof.signalLock, 0, 1) {
osyield()
}
- prof.hz = hz
- atomic.Store(&prof.lock, 0)
+ if prof.hz != hz {
+ setProcessCPUProfiler(hz)
+ prof.hz = hz
+ }
+ atomic.Store(&prof.signalLock, 0)
lock(&sched.lock)
sched.profilehz = hz
unlock(&sched.lock)
if hz != 0 {
- resetcpuprofiler(hz)
+ setThreadCPUProfiler(hz)
}
_g_.m.locks--
@@ -3424,7 +3516,25 @@ func sysmon() {
if scavengelimit < forcegcperiod {
maxsleep = scavengelimit / 2
}
+ shouldRelax := true
+ if osRelaxMinNS > 0 {
+ lock(&timers.lock)
+ if timers.sleeping {
+ now := nanotime()
+ next := timers.sleepUntil
+ if next-now < osRelaxMinNS {
+ shouldRelax = false
+ }
+ }
+ unlock(&timers.lock)
+ }
+ if shouldRelax {
+ osRelax(true)
+ }
notetsleep(&sched.sysmonnote, maxsleep)
+ if shouldRelax {
+ osRelax(false)
+ }
lock(&sched.lock)
atomic.Store(&sched.sysmonwait, 0)
noteclear(&sched.sysmonnote)
@@ -3433,10 +3543,13 @@ func sysmon() {
}
unlock(&sched.lock)
}
+ // trigger libc interceptors if needed
+ if *cgo_yield != nil {
+ asmcgocall(*cgo_yield, nil)
+ }
// poll network if not polled for more than 10ms
lastpoll := int64(atomic.Load64(&sched.lastpoll))
now := nanotime()
- unixnow := unixnanotime()
if lastpoll != 0 && lastpoll+10*1000*1000 < now {
atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
gp := netpoll(false) // non-blocking - returns list of goroutines
@@ -3461,8 +3574,7 @@ func sysmon() {
idle++
}
// check if we need to force a GC
- lastgc := int64(atomic.Load64(&memstats.last_gc))
- if gcphase == _GCoff && lastgc != 0 && unixnow-lastgc > forcegcperiod && atomic.Load(&forcegc.idle) != 0 {
+ if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
lock(&forcegc.lock)
forcegc.idle = 0
forcegc.g.schedlink = 0
@@ -3482,7 +3594,7 @@ func sysmon() {
}
}
-var pdesc [_MaxGomaxprocs]struct {
+type sysmontick struct {
schedtick uint32
schedwhen int64
syscalltick uint32
@@ -3500,7 +3612,7 @@ func retake(now int64) uint32 {
if _p_ == nil {
continue
}
- pd := &pdesc[i]
+ pd := &_p_.sysmontick
s := _p_.status
if s == _Psyscall {
// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
@@ -3905,7 +4017,7 @@ func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
if randomizeScheduler {
for i := uint32(1); i <= n; i++ {
- j := fastrand() % (i + 1)
+ j := fastrandn(i + 1)
batch[i], batch[j] = batch[j], batch[i]
}
}
diff --git a/libgo/go/runtime/proc_test.go b/libgo/go/runtime/proc_test.go
index 813c92912b9..313a9610e0e 100644
--- a/libgo/go/runtime/proc_test.go
+++ b/libgo/go/runtime/proc_test.go
@@ -53,14 +53,14 @@ func TestStopTheWorldDeadlock(t *testing.T) {
}
func TestYieldProgress(t *testing.T) {
- testYieldProgress(t, false)
+ testYieldProgress(false)
}
func TestYieldLockedProgress(t *testing.T) {
- testYieldProgress(t, true)
+ testYieldProgress(true)
}
-func testYieldProgress(t *testing.T, locked bool) {
+func testYieldProgress(locked bool) {
c := make(chan bool)
cack := make(chan bool)
go func() {
@@ -430,10 +430,13 @@ func TestPingPongHog(t *testing.T) {
<-lightChan
// Check that hogCount and lightCount are within a factor of
- // 2, which indicates that both pairs of goroutines handed off
- // the P within a time-slice to their buddy.
- if hogCount > lightCount*2 || lightCount > hogCount*2 {
- t.Fatalf("want hogCount/lightCount in [0.5, 2]; got %d/%d = %g", hogCount, lightCount, float64(hogCount)/float64(lightCount))
+ // 5, which indicates that both pairs of goroutines handed off
+ // the P within a time-slice to their buddy. We can use a
+ // fairly large factor here to make this robust: if the
+ // scheduler isn't working right, the gap should be ~1000X.
+ const factor = 5
+ if hogCount > lightCount*factor || lightCount > hogCount*factor {
+ t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount))
}
}
diff --git a/libgo/go/runtime/profbuf.go b/libgo/go/runtime/profbuf.go
new file mode 100644
index 00000000000..f40881aed51
--- /dev/null
+++ b/libgo/go/runtime/profbuf.go
@@ -0,0 +1,561 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
+
+// A profBuf is a lock-free buffer for profiling events,
+// safe for concurrent use by one reader and one writer.
+// The writer may be a signal handler running without a user g.
+// The reader is assumed to be a user g.
+//
+// Each logged event corresponds to a fixed size header, a list of
+// uintptrs (typically a stack), and exactly one unsafe.Pointer tag.
+// The header and uintptrs are stored in the circular buffer data and the
+// tag is stored in a circular buffer tags, running in parallel.
+// In the circular buffer data, each event takes 2+hdrsize+len(stk)
+// words: the value 2+hdrsize+len(stk), then the time of the event, then
+// hdrsize words giving the fixed-size header, and then len(stk) words
+// for the stack.
+//
+// The current effective offsets into the tags and data circular buffers
+// for reading and writing are stored in the high 30 and low 32 bits of r and w.
+// The bottom bits of the high 32 are additional flag bits in w, unused in r.
+// "Effective" offsets means the total number of reads or writes, mod 2^length.
+// The offset in the buffer is the effective offset mod the length of the buffer.
+// To make wraparound mod 2^length match wraparound mod length of the buffer,
+// the length of the buffer must be a power of two.
+//
+// If the reader catches up to the writer, a flag passed to read controls
+// whether the read blocks until more data is available. A read returns a
+// pointer to the buffer data itself; the caller is assumed to be done with
+// that data at the next read. The read offset rNext tracks the next offset to
+// be returned by read. By definition, r ⤠rNext ⤠w (before wraparound),
+// and rNext is only used by the reader, so it can be accessed without atomics.
+//
+// If the writer gets ahead of the reader, so that the buffer fills,
+// future writes are discarded and replaced in the output stream by an
+// overflow entry, which has size 2+hdrsize+1, time set to the time of
+// the first discarded write, a header of all zeroed words, and a "stack"
+// containing one word, the number of discarded writes.
+//
+// Between the time the buffer fills and the buffer becomes empty enough
+// to hold more data, the overflow entry is stored as a pending overflow
+// entry in the fields overflow and overflowTime. The pending overflow
+// entry can be turned into a real record by either the writer or the
+// reader. If the writer is called to write a new record and finds that
+// the output buffer has room for both the pending overflow entry and the
+// new record, the writer emits the pending overflow entry and the new
+// record into the buffer. If the reader is called to read data and finds
+// that the output buffer is empty but that there is a pending overflow
+// entry, the reader will return a synthesized record for the pending
+// overflow entry.
+//
+// Only the writer can create or add to a pending overflow entry, but
+// either the reader or the writer can clear the pending overflow entry.
+// A pending overflow entry is indicated by the low 32 bits of 'overflow'
+// holding the number of discarded writes, and overflowTime holding the
+// time of the first discarded write. The high 32 bits of 'overflow'
+// increment each time the low 32 bits transition from zero to non-zero
+// or vice versa. This sequence number avoids ABA problems in the use of
+// compare-and-swap to coordinate between reader and writer.
+// The overflowTime is only written when the low 32 bits of overflow are
+// zero, that is, only when there is no pending overflow entry, in
+// preparation for creating a new one. The reader can therefore fetch and
+// clear the entry atomically using
+//
+// for {
+// overflow = load(&b.overflow)
+// if uint32(overflow) == 0 {
+// // no pending entry
+// break
+// }
+// time = load(&b.overflowTime)
+// if cas(&b.overflow, overflow, ((overflow>>32)+1)<<32) {
+// // pending entry cleared
+// break
+// }
+// }
+// if uint32(overflow) > 0 {
+// emit entry for uint32(overflow), time
+// }
+//
+type profBuf struct {
+ // accessed atomically
+ r, w profAtomic
+ overflow uint64
+ overflowTime uint64
+ eof uint32
+
+ // immutable (excluding slice content)
+ hdrsize uintptr
+ data []uint64
+ tags []unsafe.Pointer
+
+ // owned by reader
+ rNext profIndex
+ overflowBuf []uint64 // for use by reader to return overflow record
+ wait note
+}
+
+// A profAtomic is the atomically-accessed word holding a profIndex.
+type profAtomic uint64
+
+// A profIndex is the packet tag and data counts and flags bits, described above.
+type profIndex uint64
+
+const (
+ profReaderSleeping profIndex = 1 << 32 // reader is sleeping and must be woken up
+ profWriteExtra profIndex = 1 << 33 // overflow or eof waiting
+)
+
+func (x *profAtomic) load() profIndex {
+ return profIndex(atomic.Load64((*uint64)(x)))
+}
+
+func (x *profAtomic) store(new profIndex) {
+ atomic.Store64((*uint64)(x), uint64(new))
+}
+
+func (x *profAtomic) cas(old, new profIndex) bool {
+ return atomic.Cas64((*uint64)(x), uint64(old), uint64(new))
+}
+
+func (x profIndex) dataCount() uint32 {
+ return uint32(x)
+}
+
+func (x profIndex) tagCount() uint32 {
+ return uint32(x >> 34)
+}
+
+// countSub subtracts two counts obtained from profIndex.dataCount or profIndex.tagCount,
+// assuming that they are no more than 2^29 apart (guaranteed since they are never more than
+// len(data) or len(tags) apart, respectively).
+// tagCount wraps at 2^30, while dataCount wraps at 2^32.
+// This function works for both.
+func countSub(x, y uint32) int {
+ // x-y is 32-bit signed or 30-bit signed; sign-extend to 32 bits and convert to int.
+ return int(int32(x-y) << 2 >> 2)
+}
+
+// addCountsAndClearFlags returns the packed form of "x + (data, tag) - all flags".
+func (x profIndex) addCountsAndClearFlags(data, tag int) profIndex {
+ return profIndex((uint64(x)>>34+uint64(uint32(tag)<<2>>2))<<34 | uint64(uint32(x)+uint32(data)))
+}
+
+// hasOverflow reports whether b has any overflow records pending.
+func (b *profBuf) hasOverflow() bool {
+ return uint32(atomic.Load64(&b.overflow)) > 0
+}
+
+// takeOverflow consumes the pending overflow records, returning the overflow count
+// and the time of the first overflow.
+// When called by the reader, it is racing against incrementOverflow.
+func (b *profBuf) takeOverflow() (count uint32, time uint64) {
+ overflow := atomic.Load64(&b.overflow)
+ time = atomic.Load64(&b.overflowTime)
+ for {
+ count = uint32(overflow)
+ if count == 0 {
+ time = 0
+ break
+ }
+ // Increment generation, clear overflow count in low bits.
+ if atomic.Cas64(&b.overflow, overflow, ((overflow>>32)+1)<<32) {
+ break
+ }
+ overflow = atomic.Load64(&b.overflow)
+ time = atomic.Load64(&b.overflowTime)
+ }
+ return uint32(overflow), time
+}
+
+// incrementOverflow records a single overflow at time now.
+// It is racing against a possible takeOverflow in the reader.
+func (b *profBuf) incrementOverflow(now int64) {
+ for {
+ overflow := atomic.Load64(&b.overflow)
+
+ // Once we see b.overflow reach 0, it's stable: no one else is changing it underfoot.
+ // We need to set overflowTime if we're incrementing b.overflow from 0.
+ if uint32(overflow) == 0 {
+ // Store overflowTime first so it's always available when overflow != 0.
+ atomic.Store64(&b.overflowTime, uint64(now))
+ atomic.Store64(&b.overflow, (((overflow>>32)+1)<<32)+1)
+ break
+ }
+ // Otherwise we're racing to increment against reader
+ // who wants to set b.overflow to 0.
+ // Out of paranoia, leave 2³²-1 a sticky overflow value,
+ // to avoid wrapping around. Extremely unlikely.
+ if int32(overflow) == -1 {
+ break
+ }
+ if atomic.Cas64(&b.overflow, overflow, overflow+1) {
+ break
+ }
+ }
+}
+
+// newProfBuf returns a new profiling buffer with room for
+// a header of hdrsize words and a buffer of at least bufwords words.
+func newProfBuf(hdrsize, bufwords, tags int) *profBuf {
+ if min := 2 + hdrsize + 1; bufwords < min {
+ bufwords = min
+ }
+
+ // Buffer sizes must be power of two, so that we don't have to
+ // worry about uint32 wraparound changing the effective position
+ // within the buffers. We store 30 bits of count; limiting to 28
+ // gives us some room for intermediate calculations.
+ if bufwords >= 1<<28 || tags >= 1<<28 {
+ throw("newProfBuf: buffer too large")
+ }
+ var i int
+ for i = 1; i < bufwords; i <<= 1 {
+ }
+ bufwords = i
+ for i = 1; i < tags; i <<= 1 {
+ }
+ tags = i
+
+ b := new(profBuf)
+ b.hdrsize = uintptr(hdrsize)
+ b.data = make([]uint64, bufwords)
+ b.tags = make([]unsafe.Pointer, tags)
+ b.overflowBuf = make([]uint64, 2+b.hdrsize+1)
+ return b
+}
+
+// canWriteRecord reports whether the buffer has room
+// for a single contiguous record with a stack of length nstk.
+func (b *profBuf) canWriteRecord(nstk int) bool {
+ br := b.r.load()
+ bw := b.w.load()
+
+ // room for tag?
+ if countSub(br.tagCount(), bw.tagCount())+len(b.tags) < 1 {
+ return false
+ }
+
+ // room for data?
+ nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
+ want := 2 + int(b.hdrsize) + nstk
+ i := int(bw.dataCount() % uint32(len(b.data)))
+ if i+want > len(b.data) {
+ // Can't fit in trailing fragment of slice.
+ // Skip over that and start over at beginning of slice.
+ nd -= len(b.data) - i
+ }
+ return nd >= want
+}
+
+// canWriteTwoRecords reports whether the buffer has room
+// for two records with stack lengths nstk1, nstk2, in that order.
+// Each record must be contiguous on its own, but the two
+// records need not be contiguous (one can be at the end of the buffer
+// and the other can wrap around and start at the beginning of the buffer).
+func (b *profBuf) canWriteTwoRecords(nstk1, nstk2 int) bool {
+ br := b.r.load()
+ bw := b.w.load()
+
+ // room for tag?
+ if countSub(br.tagCount(), bw.tagCount())+len(b.tags) < 2 {
+ return false
+ }
+
+ // room for data?
+ nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
+
+ // first record
+ want := 2 + int(b.hdrsize) + nstk1
+ i := int(bw.dataCount() % uint32(len(b.data)))
+ if i+want > len(b.data) {
+ // Can't fit in trailing fragment of slice.
+ // Skip over that and start over at beginning of slice.
+ nd -= len(b.data) - i
+ i = 0
+ }
+ i += want
+ nd -= want
+
+ // second record
+ want = 2 + int(b.hdrsize) + nstk2
+ if i+want > len(b.data) {
+ // Can't fit in trailing fragment of slice.
+ // Skip over that and start over at beginning of slice.
+ nd -= len(b.data) - i
+ i = 0
+ }
+ return nd >= want
+}
+
+// write writes an entry to the profiling buffer b.
+// The entry begins with a fixed hdr, which must have
+// length b.hdrsize, followed by a variable-sized stack
+// and a single tag pointer *tagPtr (or nil if tagPtr is nil).
+// No write barriers allowed because this might be called from a signal handler.
+func (b *profBuf) write(tagPtr *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
+ if b == nil {
+ return
+ }
+ if len(hdr) > int(b.hdrsize) {
+ throw("misuse of profBuf.write")
+ }
+
+ if hasOverflow := b.hasOverflow(); hasOverflow && b.canWriteTwoRecords(1, len(stk)) {
+ // Room for both an overflow record and the one being written.
+ // Write the overflow record if the reader hasn't gotten to it yet.
+ // Only racing against reader, not other writers.
+ count, time := b.takeOverflow()
+ if count > 0 {
+ var stk [1]uintptr
+ stk[0] = uintptr(count)
+ b.write(nil, int64(time), nil, stk[:])
+ }
+ } else if hasOverflow || !b.canWriteRecord(len(stk)) {
+ // Pending overflow without room to write overflow and new records
+ // or no overflow but also no room for new record.
+ b.incrementOverflow(now)
+ b.wakeupExtra()
+ return
+ }
+
+ // There's room: write the record.
+ br := b.r.load()
+ bw := b.w.load()
+
+ // Profiling tag
+ //
+ // The tag is a pointer, but we can't run a write barrier here.
+ // We have interrupted the OS-level execution of gp, but the
+ // runtime still sees gp as executing. In effect, we are running
+ // in place of the real gp. Since gp is the only goroutine that
+ // can overwrite gp.labels, the value of gp.labels is stable during
+ // this signal handler: it will still be reachable from gp when
+ // we finish executing. If a GC is in progress right now, it must
+ // keep gp.labels alive, because gp.labels is reachable from gp.
+ // If gp were to overwrite gp.labels, the deletion barrier would
+ // still shade that pointer, which would preserve it for the
+ // in-progress GC, so all is well. Any future GC will see the
+ // value we copied when scanning b.tags (heap-allocated).
+ // We arrange that the store here is always overwriting a nil,
+ // so there is no need for a deletion barrier on b.tags[wt].
+ wt := int(bw.tagCount() % uint32(len(b.tags)))
+ if tagPtr != nil {
+ *(*uintptr)(unsafe.Pointer(&b.tags[wt])) = uintptr(unsafe.Pointer(*tagPtr))
+ }
+
+ // Main record.
+ // It has to fit in a contiguous section of the slice, so if it doesn't fit at the end,
+ // leave a rewind marker (0) and start over at the beginning of the slice.
+ wd := int(bw.dataCount() % uint32(len(b.data)))
+ nd := countSub(br.dataCount(), bw.dataCount()) + len(b.data)
+ skip := 0
+ if wd+2+int(b.hdrsize)+len(stk) > len(b.data) {
+ b.data[wd] = 0
+ skip = len(b.data) - wd
+ nd -= skip
+ wd = 0
+ }
+ data := b.data[wd:]
+ data[0] = uint64(2 + b.hdrsize + uintptr(len(stk))) // length
+ data[1] = uint64(now) // time stamp
+ // header, zero-padded
+ i := uintptr(copy(data[2:2+b.hdrsize], hdr))
+ for ; i < b.hdrsize; i++ {
+ data[2+i] = 0
+ }
+ for i, pc := range stk {
+ data[2+b.hdrsize+uintptr(i)] = uint64(pc)
+ }
+
+ for {
+ // Commit write.
+ // Racing with reader setting flag bits in b.w, to avoid lost wakeups.
+ old := b.w.load()
+ new := old.addCountsAndClearFlags(skip+2+len(stk)+int(b.hdrsize), 1)
+ if !b.w.cas(old, new) {
+ continue
+ }
+ // If there was a reader, wake it up.
+ if old&profReaderSleeping != 0 {
+ notewakeup(&b.wait)
+ }
+ break
+ }
+}
+
+// close signals that there will be no more writes on the buffer.
+// Once all the data has been read from the buffer, reads will return eof=true.
+func (b *profBuf) close() {
+ if atomic.Load(&b.eof) > 0 {
+ throw("runtime: profBuf already closed")
+ }
+ atomic.Store(&b.eof, 1)
+ b.wakeupExtra()
+}
+
+// wakeupExtra must be called after setting one of the "extra"
+// atomic fields b.overflow or b.eof.
+// It records the change in b.w and wakes up the reader if needed.
+func (b *profBuf) wakeupExtra() {
+ for {
+ old := b.w.load()
+ new := old | profWriteExtra
+ if !b.w.cas(old, new) {
+ continue
+ }
+ if old&profReaderSleeping != 0 {
+ notewakeup(&b.wait)
+ }
+ break
+ }
+}
+
+// profBufReadMode specifies whether to block when no data is available to read.
+type profBufReadMode int
+
+const (
+ profBufBlocking profBufReadMode = iota
+ profBufNonBlocking
+)
+
+var overflowTag [1]unsafe.Pointer // always nil
+
+func (b *profBuf) read(mode profBufReadMode) (data []uint64, tags []unsafe.Pointer, eof bool) {
+ if b == nil {
+ return nil, nil, true
+ }
+
+ br := b.rNext
+
+ // Commit previous read, returning that part of the ring to the writer.
+ // First clear tags that have now been read, both to avoid holding
+ // up the memory they point at for longer than necessary
+ // and so that b.write can assume it is always overwriting
+ // nil tag entries (see comment in b.write).
+ rPrev := b.r.load()
+ if rPrev != br {
+ ntag := countSub(br.tagCount(), rPrev.tagCount())
+ ti := int(rPrev.tagCount() % uint32(len(b.tags)))
+ for i := 0; i < ntag; i++ {
+ b.tags[ti] = nil
+ if ti++; ti == len(b.tags) {
+ ti = 0
+ }
+ }
+ b.r.store(br)
+ }
+
+Read:
+ bw := b.w.load()
+ numData := countSub(bw.dataCount(), br.dataCount())
+ if numData == 0 {
+ if b.hasOverflow() {
+ // No data to read, but there is overflow to report.
+ // Racing with writer flushing b.overflow into a real record.
+ count, time := b.takeOverflow()
+ if count == 0 {
+ // Lost the race, go around again.
+ goto Read
+ }
+ // Won the race, report overflow.
+ dst := b.overflowBuf
+ dst[0] = uint64(2 + b.hdrsize + 1)
+ dst[1] = uint64(time)
+ for i := uintptr(0); i < b.hdrsize; i++ {
+ dst[2+i] = 0
+ }
+ dst[2+b.hdrsize] = uint64(count)
+ return dst[:2+b.hdrsize+1], overflowTag[:1], false
+ }
+ if atomic.Load(&b.eof) > 0 {
+ // No data, no overflow, EOF set: done.
+ return nil, nil, true
+ }
+ if bw&profWriteExtra != 0 {
+ // Writer claims to have published extra information (overflow or eof).
+ // Attempt to clear notification and then check again.
+ // If we fail to clear the notification it means b.w changed,
+ // so we still need to check again.
+ b.w.cas(bw, bw&^profWriteExtra)
+ goto Read
+ }
+
+ // Nothing to read right now.
+ // Return or sleep according to mode.
+ if mode == profBufNonBlocking {
+ return nil, nil, false
+ }
+ if !b.w.cas(bw, bw|profReaderSleeping) {
+ goto Read
+ }
+ // Committed to sleeping.
+ notetsleepg(&b.wait, -1)
+ noteclear(&b.wait)
+ goto Read
+ }
+ data = b.data[br.dataCount()%uint32(len(b.data)):]
+ if len(data) > numData {
+ data = data[:numData]
+ } else {
+ numData -= len(data) // available in case of wraparound
+ }
+ skip := 0
+ if data[0] == 0 {
+ // Wraparound record. Go back to the beginning of the ring.
+ skip = len(data)
+ data = b.data
+ if len(data) > numData {
+ data = data[:numData]
+ }
+ }
+
+ ntag := countSub(bw.tagCount(), br.tagCount())
+ if ntag == 0 {
+ throw("runtime: malformed profBuf buffer - tag and data out of sync")
+ }
+ tags = b.tags[br.tagCount()%uint32(len(b.tags)):]
+ if len(tags) > ntag {
+ tags = tags[:ntag]
+ }
+
+ // Count out whole data records until either data or tags is done.
+ // They are always in sync in the buffer, but due to an end-of-slice
+ // wraparound we might need to stop early and return the rest
+ // in the next call.
+ di := 0
+ ti := 0
+ for di < len(data) && data[di] != 0 && ti < len(tags) {
+ if uintptr(di)+uintptr(data[di]) > uintptr(len(data)) {
+ throw("runtime: malformed profBuf buffer - invalid size")
+ }
+ di += int(data[di])
+ ti++
+ }
+
+ // Remember how much we returned, to commit read on next call.
+ b.rNext = br.addCountsAndClearFlags(skip+di, ti)
+
+ if raceenabled {
+ // Match racereleasemerge in runtime_setProfLabel,
+ // so that the setting of the labels in runtime_setProfLabel
+ // is treated as happening before any use of the labels
+ // by our caller. The synchronization on labelSync itself is a fiction
+ // for the race detector. The actual synchronization is handled
+ // by the fact that the signal handler only reads from the current
+ // goroutine and uses atomics to write the updated queue indices,
+ // and then the read-out from the signal handler buffer uses
+ // atomics to read those queue indices.
+ raceacquire(unsafe.Pointer(&labelSync))
+ }
+
+ return data[:di], tags[:ti], false
+}
diff --git a/libgo/go/runtime/profbuf_test.go b/libgo/go/runtime/profbuf_test.go
new file mode 100644
index 00000000000..d9c5264b732
--- /dev/null
+++ b/libgo/go/runtime/profbuf_test.go
@@ -0,0 +1,182 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "reflect"
+ . "runtime"
+ "testing"
+ "time"
+ "unsafe"
+)
+
+func TestProfBuf(t *testing.T) {
+ const hdrSize = 2
+
+ write := func(t *testing.T, b *ProfBuf, tag unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
+ b.Write(&tag, now, hdr, stk)
+ }
+ read := func(t *testing.T, b *ProfBuf, data []uint64, tags []unsafe.Pointer) {
+ rdata, rtags, eof := b.Read(ProfBufNonBlocking)
+ if !reflect.DeepEqual(rdata, data) || !reflect.DeepEqual(rtags, tags) {
+ t.Fatalf("unexpected profile read:\nhave data %#x\nwant data %#x\nhave tags %#x\nwant tags %#x", rdata, data, rtags, tags)
+ }
+ if eof {
+ t.Fatalf("unexpected eof")
+ }
+ }
+ readBlock := func(t *testing.T, b *ProfBuf, data []uint64, tags []unsafe.Pointer) func() {
+ c := make(chan int)
+ go func() {
+ eof := data == nil
+ rdata, rtags, reof := b.Read(ProfBufBlocking)
+ if !reflect.DeepEqual(rdata, data) || !reflect.DeepEqual(rtags, tags) || reof != eof {
+ // Errorf, not Fatalf, because called in goroutine.
+ t.Errorf("unexpected profile read:\nhave data %#x\nwant data %#x\nhave tags %#x\nwant tags %#x\nhave eof=%v, want %v", rdata, data, rtags, tags, reof, eof)
+ }
+ c <- 1
+ }()
+ time.Sleep(10 * time.Millisecond) // let goroutine run and block
+ return func() {
+ select {
+ case <-c:
+ case <-time.After(1 * time.Second):
+ t.Fatalf("timeout waiting for blocked read")
+ }
+ }
+ }
+ readEOF := func(t *testing.T, b *ProfBuf) {
+ rdata, rtags, eof := b.Read(ProfBufBlocking)
+ if rdata != nil || rtags != nil || !eof {
+ t.Errorf("unexpected profile read: %#x, %#x, eof=%v; want nil, nil, eof=true", rdata, rtags, eof)
+ }
+ rdata, rtags, eof = b.Read(ProfBufNonBlocking)
+ if rdata != nil || rtags != nil || !eof {
+ t.Errorf("unexpected profile read (non-blocking): %#x, %#x, eof=%v; want nil, nil, eof=true", rdata, rtags, eof)
+ }
+ }
+
+ myTags := make([]byte, 100)
+ t.Logf("myTags is %p", &myTags[0])
+
+ t.Run("BasicWriteRead", func(t *testing.T) {
+ b := NewProfBuf(2, 11, 1)
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
+ read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
+ read(t, b, nil, nil) // release data returned by previous read
+ write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
+ read(t, b, []uint64{8, 99, 101, 102, 201, 202, 203, 204}, []unsafe.Pointer{unsafe.Pointer(&myTags[2])})
+ })
+
+ t.Run("ReadMany", func(t *testing.T) {
+ b := NewProfBuf(2, 50, 50)
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
+ write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
+ write(t, b, unsafe.Pointer(&myTags[1]), 500, []uint64{502, 504}, []uintptr{506})
+ read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 99, 101, 102, 201, 202, 203, 204, 5, 500, 502, 504, 506}, []unsafe.Pointer{unsafe.Pointer(&myTags[0]), unsafe.Pointer(&myTags[2]), unsafe.Pointer(&myTags[1])})
+ })
+
+ t.Run("ReadManyShortData", func(t *testing.T) {
+ b := NewProfBuf(2, 50, 50)
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
+ write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
+ read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 99, 101, 102, 201, 202, 203, 204}, []unsafe.Pointer{unsafe.Pointer(&myTags[0]), unsafe.Pointer(&myTags[2])})
+ })
+
+ t.Run("ReadManyShortTags", func(t *testing.T) {
+ b := NewProfBuf(2, 50, 50)
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
+ write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
+ read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 99, 101, 102, 201, 202, 203, 204}, []unsafe.Pointer{unsafe.Pointer(&myTags[0]), unsafe.Pointer(&myTags[2])})
+ })
+
+ t.Run("ReadAfterOverflow1", func(t *testing.T) {
+ // overflow record synthesized by write
+ b := NewProfBuf(2, 16, 5)
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9}) // uses 10
+ read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])}) // reads 10 but still in use until next read
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5}) // uses 6
+ read(t, b, []uint64{6, 1, 2, 3, 4, 5}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])}) // reads 6 but still in use until next read
+ // now 10 available
+ write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204, 205, 206, 207, 208, 209}) // no room
+ for i := 0; i < 299; i++ {
+ write(t, b, unsafe.Pointer(&myTags[3]), int64(100+i), []uint64{101, 102}, []uintptr{201, 202, 203, 204}) // no room for overflow+this record
+ }
+ write(t, b, unsafe.Pointer(&myTags[1]), 500, []uint64{502, 504}, []uintptr{506}) // room for overflow+this record
+ read(t, b, []uint64{5, 99, 0, 0, 300, 5, 500, 502, 504, 506}, []unsafe.Pointer{nil, unsafe.Pointer(&myTags[1])})
+ })
+
+ t.Run("ReadAfterOverflow2", func(t *testing.T) {
+ // overflow record synthesized by read
+ b := NewProfBuf(2, 16, 5)
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
+ write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213})
+ for i := 0; i < 299; i++ {
+ write(t, b, unsafe.Pointer(&myTags[3]), 100, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
+ }
+ read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])}) // reads 10 but still in use until next read
+ write(t, b, unsafe.Pointer(&myTags[1]), 500, []uint64{502, 504}, []uintptr{}) // still overflow
+ read(t, b, []uint64{5, 99, 0, 0, 301}, []unsafe.Pointer{nil}) // overflow synthesized by read
+ write(t, b, unsafe.Pointer(&myTags[1]), 500, []uint64{502, 505}, []uintptr{506}) // written
+ read(t, b, []uint64{5, 500, 502, 505, 506}, []unsafe.Pointer{unsafe.Pointer(&myTags[1])})
+ })
+
+ t.Run("ReadAtEndAfterOverflow", func(t *testing.T) {
+ b := NewProfBuf(2, 12, 5)
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
+ write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
+ for i := 0; i < 299; i++ {
+ write(t, b, unsafe.Pointer(&myTags[3]), 100, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
+ }
+ read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
+ read(t, b, []uint64{5, 99, 0, 0, 300}, []unsafe.Pointer{nil})
+ write(t, b, unsafe.Pointer(&myTags[1]), 500, []uint64{502, 504}, []uintptr{506})
+ read(t, b, []uint64{5, 500, 502, 504, 506}, []unsafe.Pointer{unsafe.Pointer(&myTags[1])})
+ })
+
+ t.Run("BlockingWriteRead", func(t *testing.T) {
+ b := NewProfBuf(2, 11, 1)
+ wait := readBlock(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
+ wait()
+ wait = readBlock(t, b, []uint64{8, 99, 101, 102, 201, 202, 203, 204}, []unsafe.Pointer{unsafe.Pointer(&myTags[2])})
+ time.Sleep(10 * time.Millisecond)
+ write(t, b, unsafe.Pointer(&myTags[2]), 99, []uint64{101, 102}, []uintptr{201, 202, 203, 204})
+ wait()
+ wait = readBlock(t, b, nil, nil)
+ b.Close()
+ wait()
+ wait = readBlock(t, b, nil, nil)
+ wait()
+ readEOF(t, b)
+ })
+
+ t.Run("DataWraparound", func(t *testing.T) {
+ b := NewProfBuf(2, 16, 1024)
+ for i := 0; i < 10; i++ {
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
+ read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
+ read(t, b, nil, nil) // release data returned by previous read
+ }
+ })
+
+ t.Run("TagWraparound", func(t *testing.T) {
+ b := NewProfBuf(2, 1024, 2)
+ for i := 0; i < 10; i++ {
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
+ read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
+ read(t, b, nil, nil) // release data returned by previous read
+ }
+ })
+
+ t.Run("BothWraparound", func(t *testing.T) {
+ b := NewProfBuf(2, 16, 2)
+ for i := 0; i < 10; i++ {
+ write(t, b, unsafe.Pointer(&myTags[0]), 1, []uint64{2, 3}, []uintptr{4, 5, 6, 7, 8, 9})
+ read(t, b, []uint64{10, 1, 2, 3, 4, 5, 6, 7, 8, 9}, []unsafe.Pointer{unsafe.Pointer(&myTags[0])})
+ read(t, b, nil, nil) // release data returned by previous read
+ }
+ })
+}
diff --git a/libgo/go/runtime/proflabel.go b/libgo/go/runtime/proflabel.go
new file mode 100644
index 00000000000..ff73fe4856f
--- /dev/null
+++ b/libgo/go/runtime/proflabel.go
@@ -0,0 +1,40 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+var labelSync uintptr
+
+//go:linkname runtime_setProfLabel runtime_pprof.runtime_setProfLabel
+func runtime_setProfLabel(labels unsafe.Pointer) {
+ // Introduce race edge for read-back via profile.
+ // This would more properly use &getg().labels as the sync address,
+ // but we do the read in a signal handler and can't call the race runtime then.
+ //
+ // This uses racereleasemerge rather than just racerelease so
+ // the acquire in profBuf.read synchronizes with *all* prior
+ // setProfLabel operations, not just the most recent one. This
+ // is important because profBuf.read will observe different
+ // labels set by different setProfLabel operations on
+ // different goroutines, so it needs to synchronize with all
+ // of them (this wouldn't be an issue if we could synchronize
+ // on &getg().labels since we would synchronize with each
+ // most-recent labels write separately.)
+ //
+ // racereleasemerge is like a full read-modify-write on
+ // labelSync, rather than just a store-release, so it carries
+ // a dependency on the previous racereleasemerge, which
+ // ultimately carries forward to the acquire in profBuf.read.
+ if raceenabled {
+ racereleasemerge(unsafe.Pointer(&labelSync))
+ }
+ getg().labels = labels
+}
+
+//go:linkname runtime_getProfLabel runtime_pprof.runtime_getProfLabel
+func runtime_getProfLabel() unsafe.Pointer {
+ return getg().labels
+}
diff --git a/libgo/go/runtime/rand_test.go b/libgo/go/runtime/rand_test.go
new file mode 100644
index 00000000000..f8831b05f9f
--- /dev/null
+++ b/libgo/go/runtime/rand_test.go
@@ -0,0 +1,45 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ . "runtime"
+ "strconv"
+ "testing"
+)
+
+func BenchmarkFastrand(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ Fastrand()
+ }
+ })
+}
+
+func BenchmarkFastrandHashiter(b *testing.B) {
+ var m = make(map[int]int, 10)
+ for i := 0; i < 10; i++ {
+ m[i] = i
+ }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ for _ = range m {
+ break
+ }
+ }
+ })
+}
+
+var sink32 uint32
+
+func BenchmarkFastrandn(b *testing.B) {
+ for n := uint32(2); n <= 5; n++ {
+ b.Run(strconv.Itoa(int(n)), func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ sink32 = Fastrandn(n)
+ }
+ })
+ }
+}
diff --git a/libgo/go/runtime/relax_stub.go b/libgo/go/runtime/relax_stub.go
new file mode 100644
index 00000000000..81ed1291b8b
--- /dev/null
+++ b/libgo/go/runtime/relax_stub.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows
+
+package runtime
+
+// osRelaxMinNS is the number of nanoseconds of idleness to tolerate
+// without performing an osRelax. Since osRelax may reduce the
+// precision of timers, this should be enough larger than the relaxed
+// timer precision to keep the timer error acceptable.
+const osRelaxMinNS = 0
+
+// osRelax is called by the scheduler when transitioning to and from
+// all Ps being idle.
+func osRelax(relax bool) {}
diff --git a/libgo/go/runtime/runtime1.go b/libgo/go/runtime/runtime1.go
index dd3f7b277a6..627adf74765 100644
--- a/libgo/go/runtime/runtime1.go
+++ b/libgo/go/runtime/runtime1.go
@@ -47,15 +47,14 @@ var traceback_env uint32
//go:nosplit
func gotraceback() (level int32, all, crash bool) {
_g_ := getg()
- all = _g_.m.throwing > 0
+ t := atomic.Load(&traceback_cache)
+ crash = t&tracebackCrash != 0
+ all = _g_.m.throwing > 0 || t&tracebackAll != 0
if _g_.m.traceback != 0 {
level = int32(_g_.m.traceback)
- return
+ } else {
+ level = int32(t >> tracebackShift)
}
- t := atomic.Load(&traceback_cache)
- crash = t&tracebackCrash != 0
- all = all || t&tracebackAll != 0
- level = int32(t >> tracebackShift)
return
}
@@ -330,35 +329,23 @@ type dbgVar struct {
// except for "memprofilerate" since there is an
// existing int var for that value, which may
// already have an initial value.
-
-// For gccgo we use a named type so that the C code can see the
-// definition.
-type debugVars struct {
- allocfreetrace int32
- cgocheck int32
- efence int32
- gccheckmark int32
- gcpacertrace int32
- gcshrinkstackoff int32
- gcstackbarrieroff int32
- gcstackbarrierall int32
- gcrescanstacks int32
- gcstoptheworld int32
- gctrace int32
- invalidptr int32
- sbrk int32
- scavenge int32
- scheddetail int32
- schedtrace int32
- wbshadow int32
+var debug struct {
+ allocfreetrace int32
+ cgocheck int32
+ efence int32
+ gccheckmark int32
+ gcpacertrace int32
+ gcshrinkstackoff int32
+ gcrescanstacks int32
+ gcstoptheworld int32
+ gctrace int32
+ invalidptr int32
+ sbrk int32
+ scavenge int32
+ scheddetail int32
+ schedtrace int32
}
-var debug debugVars
-
-// For gccgo's C code.
-//extern runtime_setdebug
-func runtime_setdebug(*debugVars)
-
var dbgvars = []dbgVar{
{"allocfreetrace", &debug.allocfreetrace},
{"cgocheck", &debug.cgocheck},
@@ -366,8 +353,6 @@ var dbgvars = []dbgVar{
{"gccheckmark", &debug.gccheckmark},
{"gcpacertrace", &debug.gcpacertrace},
{"gcshrinkstackoff", &debug.gcshrinkstackoff},
- {"gcstackbarrieroff", &debug.gcstackbarrieroff},
- {"gcstackbarrierall", &debug.gcstackbarrierall},
{"gcrescanstacks", &debug.gcrescanstacks},
{"gcstoptheworld", &debug.gcstoptheworld},
{"gctrace", &debug.gctrace},
@@ -376,7 +361,6 @@ var dbgvars = []dbgVar{
{"scavenge", &debug.scavenge},
{"scheddetail", &debug.scheddetail},
{"schedtrace", &debug.schedtrace},
- {"wbshadow", &debug.wbshadow},
}
func parsedebugvars() {
@@ -430,26 +414,12 @@ func parsedebugvars() {
setTraceback(gogetenv("GOTRACEBACK"))
traceback_env = traceback_cache
- if debug.gcrescanstacks == 0 {
- // Without rescanning, there's no need for stack
- // barriers.
- debug.gcstackbarrieroff = 1
- debug.gcstackbarrierall = 0
- }
-
- // if debug.gcstackbarrierall > 0 {
- // firstStackBarrierOffset = 0
- // }
-
// For cgocheck > 1, we turn on the write barrier at all times
// and check all pointer writes.
if debug.cgocheck > 1 {
writeBarrier.cgo = true
writeBarrier.enabled = true
}
-
- // Tell the C code what the value is.
- runtime_setdebug(&debug)
}
//go:linkname setTraceback runtime_debug.SetTraceback
diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go
index cdd3fcc7911..045e76ff4df 100644
--- a/libgo/go/runtime/runtime2.go
+++ b/libgo/go/runtime/runtime2.go
@@ -254,7 +254,7 @@ func setMNoWB(mp **m, new *m) {
type sudog struct {
// The following fields are protected by the hchan.lock of the
// channel this sudog is blocking on. shrinkstack depends on
- // this.
+ // this for sudogs involved in channel ops.
g *g
selectdone *uint32 // CAS to 1 to win select race (may point to stack)
@@ -263,25 +263,19 @@ type sudog struct {
elem unsafe.Pointer // data element (may point to stack)
// The following fields are never accessed concurrently.
- // waitlink is only accessed by g.
+ // For channels, waitlink is only accessed by g.
+ // For semaphores, all fields (including the ones above)
+ // are only accessed when holding a semaRoot lock.
acquiretime int64
releasetime int64
ticket uint32
- waitlink *sudog // g.waiting list
+ parent *sudog // semaRoot binary tree
+ waitlink *sudog // g.waiting list or semaRoot
+ waittail *sudog // semaRoot
c *hchan // channel
}
-type gcstats struct {
- // the struct must consist of only uint64's,
- // because it is casted to uint64[].
- nhandoff uint64
- nhandoffcnt uint64
- nprocyield uint64
- nosyield uint64
- nsleep uint64
-}
-
/*
Not used by gccgo.
@@ -318,12 +312,6 @@ type stack struct {
lo uintptr
hi uintptr
}
-
-// stkbar records the state of a G's stack barrier.
-type stkbar struct {
- savedLRPtr uintptr // location overwritten by stack barrier PC
- savedLRVal uintptr // value overwritten at savedLRPtr
-}
*/
type g struct {
@@ -341,12 +329,9 @@ type g struct {
_panic *_panic // innermost panic - offset known to liblink
_defer *_defer // innermost defer
m *m // current m; offset known to arm liblink
- // Not for gccgo: stackAlloc uintptr // stack allocation is [stack.lo,stack.lo+stackAlloc)
// Not for gccgo: sched gobuf
syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
- // Not for gccgo: stkbar []stkbar // stack barriers, from low to high (see top of mstkbar.go)
- // Not for gccgo: stkbarPos uintptr // index of lowest stack barrier not hit
// Not for gccgo: stktopsp uintptr // expected sp at top of stack, to check in traceback
param unsafe.Pointer // passed parameter on wakeup
atomicstatus uint32
@@ -359,7 +344,7 @@ type g struct {
paniconfault bool // panic (instead of crash) on unexpected fault address
preemptscan bool // preempted g does scan for gc
gcscandone bool // g has scanned stack; protected by _Gscan bit in status
- gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan; transition from true to false by calling queueRescan and false to true by calling dequeueRescan
+ gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan; TODO: remove?
throwsplit bool // must not split stack
raceignore int8 // ignore race detection events
sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
@@ -376,17 +361,12 @@ type g struct {
startpc uintptr // pc of goroutine function
// Not for gccgo: racectx uintptr
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
- // Not for gccgo: cgoCtxt []uintptr // cgo traceback context
+ // Not for gccgo: cgoCtxt []uintptr // cgo traceback context
+ labels unsafe.Pointer // profiler labels
+ timer *timer // cached timer for time.Sleep
// Per-G GC state
- // gcRescan is this G's index in work.rescan.list. If this is
- // -1, this G is not on the rescan list.
- //
- // If gcphase != _GCoff and this G is visible to the garbage
- // collector, writes to this are protected by work.rescan.lock.
- gcRescan int32
-
// gcAssistBytes is this G's GC assist credit in terms of
// bytes allocated. If this is positive, then the G has credit
// to allocate gcAssistBytes bytes without assisting. If this
@@ -452,6 +432,7 @@ type m struct {
inwb bool // m is executing a write barrier
newSigstack bool // minit on C thread called sigaltstack
printlock int8
+ incgo bool // m is executing a cgo call
fastrand uint32
ncgocall uint64 // number of cgo calls in total
ncgo int32 // number of cgo calls currently in progress
@@ -468,7 +449,6 @@ type m struct {
// Not for gccgo: fflag uint32 // floating point compare flags
locked uint32 // tracking for lockosthread
nextwaitm uintptr // next m waiting for lock
- gcstats gcstats
needextram bool
traceback uint8
waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
@@ -505,9 +485,10 @@ type p struct {
id int32
status uint32 // one of pidle/prunning/...
link puintptr
- schedtick uint32 // incremented on every scheduler call
- syscalltick uint32 // incremented on every system call
- m muintptr // back-link to associated m (nil if idle)
+ schedtick uint32 // incremented on every scheduler call
+ syscalltick uint32 // incremented on every system call
+ sysmontick sysmontick // last tick observed by sysmon
+ m muintptr // back-link to associated m (nil if idle)
mcache *mcache
// Not for gccgo: racectx uintptr
@@ -543,6 +524,14 @@ type p struct {
tracebuf traceBufPtr
+ // traceSweep indicates the sweep events should be traced.
+ // This is used to defer the sweep start event until a span
+ // has actually been swept.
+ traceSweep bool
+ // traceSwept and traceReclaimed track the number of bytes
+ // swept and reclaimed by sweeping in the current sweep loop.
+ traceSwept, traceReclaimed uintptr
+
palloc persistentAlloc // per-P to avoid mutex
// Per-P GC state
@@ -563,7 +552,7 @@ type p struct {
const (
// The max value of GOMAXPROCS.
// There are no fundamental restrictions on the value.
- _MaxGomaxprocs = 1 << 8
+ _MaxGomaxprocs = 1 << 10
)
type schedt struct {
@@ -639,7 +628,6 @@ const (
_SigThrow // if signal.Notify doesn't take it, exit loudly
_SigPanic // if the signal is from the kernel, panic
_SigDefault // if the signal isn't explicitly requested, don't monitor it
- _SigHandling // our signal handler is registered
_SigGoExit // cause all runtime procs to exit (only used on Plan 9).
_SigSetStack // add SA_ONSTACK to libc handler
_SigUnblock // unblocked in minit
@@ -753,13 +741,10 @@ const (
const _TracebackMaxFrames = 100
var (
- // emptystring string
-
allglen uintptr
allm *m
allp [_MaxGomaxprocs + 1]*p
gomaxprocs int32
- panicking uint32
ncpu int32
forcegc forcegcstate
sched schedt
@@ -767,6 +752,8 @@ var (
// Information about what cpu features are available.
// Set on startup in asm_{x86,amd64}.s.
+ // Packages outside the runtime should not use these
+ // as they are not an external api.
cpuid_ecx uint32
support_aes bool
diff --git a/libgo/go/runtime/runtime_test.go b/libgo/go/runtime/runtime_test.go
index 1f403a17056..b8f6ac2aed4 100644
--- a/libgo/go/runtime/runtime_test.go
+++ b/libgo/go/runtime/runtime_test.go
@@ -50,6 +50,23 @@ func BenchmarkIfaceCmpNil100(b *testing.B) {
}
}
+var efaceCmp1 interface{}
+var efaceCmp2 interface{}
+
+func BenchmarkEfaceCmpDiff(b *testing.B) {
+ x := 5
+ efaceCmp1 = &x
+ y := 6
+ efaceCmp2 = &y
+ for i := 0; i < b.N; i++ {
+ for j := 0; j < 100; j++ {
+ if efaceCmp1 == efaceCmp2 {
+ b.Fatal("bad comparison")
+ }
+ }
+ }
+}
+
func BenchmarkDefer(b *testing.B) {
for i := 0; i < b.N; i++ {
defer1()
@@ -62,7 +79,6 @@ func defer1() {
panic("bad recover")
}
}(1, 2, 3)
- return
}
func BenchmarkDefer10(b *testing.B) {
diff --git a/libgo/go/runtime/rwmutex.go b/libgo/go/runtime/rwmutex.go
new file mode 100644
index 00000000000..7eeb559adb5
--- /dev/null
+++ b/libgo/go/runtime/rwmutex.go
@@ -0,0 +1,125 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+)
+
+// This is a copy of sync/rwmutex.go rewritten to work in the runtime.
+
+// An rwmutex is a reader/writer mutual exclusion lock.
+// The lock can be held by an arbitrary number of readers or a single writer.
+// This is a variant of sync.RWMutex, for the runtime package.
+// Like mutex, rwmutex blocks the calling M.
+// It does not interact with the goroutine scheduler.
+type rwmutex struct {
+ rLock mutex // protects readers, readerPass, writer
+ readers muintptr // list of pending readers
+ readerPass uint32 // number of pending readers to skip readers list
+
+ wLock mutex // serializes writers
+ writer muintptr // pending writer waiting for completing readers
+
+ readerCount uint32 // number of pending readers
+ readerWait uint32 // number of departing readers
+}
+
+const rwmutexMaxReaders = 1 << 30
+
+// rlock locks rw for reading.
+func (rw *rwmutex) rlock() {
+ // The reader must not be allowed to lose its P or else other
+ // things blocking on the lock may consume all of the Ps and
+ // deadlock (issue #20903). Alternatively, we could drop the P
+ // while sleeping.
+ acquirem()
+ if int32(atomic.Xadd(&rw.readerCount, 1)) < 0 {
+ // A writer is pending. Park on the reader queue.
+ systemstack(func() {
+ lock(&rw.rLock)
+ if rw.readerPass > 0 {
+ // Writer finished.
+ rw.readerPass -= 1
+ unlock(&rw.rLock)
+ } else {
+ // Queue this reader to be woken by
+ // the writer.
+ m := getg().m
+ m.schedlink = rw.readers
+ rw.readers.set(m)
+ unlock(&rw.rLock)
+ notesleep(&m.park)
+ noteclear(&m.park)
+ }
+ })
+ }
+}
+
+// runlock undoes a single rlock call on rw.
+func (rw *rwmutex) runlock() {
+ if r := int32(atomic.Xadd(&rw.readerCount, -1)); r < 0 {
+ if r+1 == 0 || r+1 == -rwmutexMaxReaders {
+ throw("runlock of unlocked rwmutex")
+ }
+ // A writer is pending.
+ if atomic.Xadd(&rw.readerWait, -1) == 0 {
+ // The last reader unblocks the writer.
+ lock(&rw.rLock)
+ w := rw.writer.ptr()
+ if w != nil {
+ notewakeup(&w.park)
+ }
+ unlock(&rw.rLock)
+ }
+ }
+ releasem(getg().m)
+}
+
+// lock locks rw for writing.
+func (rw *rwmutex) lock() {
+ // Resolve competition with other writers and stick to our P.
+ lock(&rw.wLock)
+ m := getg().m
+ // Announce that there is a pending writer.
+ r := int32(atomic.Xadd(&rw.readerCount, -rwmutexMaxReaders)) + rwmutexMaxReaders
+ // Wait for any active readers to complete.
+ lock(&rw.rLock)
+ if r != 0 && atomic.Xadd(&rw.readerWait, r) != 0 {
+ // Wait for reader to wake us up.
+ systemstack(func() {
+ rw.writer.set(m)
+ unlock(&rw.rLock)
+ notesleep(&m.park)
+ noteclear(&m.park)
+ })
+ } else {
+ unlock(&rw.rLock)
+ }
+}
+
+// unlock unlocks rw for writing.
+func (rw *rwmutex) unlock() {
+ // Announce to readers that there is no active writer.
+ r := int32(atomic.Xadd(&rw.readerCount, rwmutexMaxReaders))
+ if r >= rwmutexMaxReaders {
+ throw("unlock of unlocked rwmutex")
+ }
+ // Unblock blocked readers.
+ lock(&rw.rLock)
+ for rw.readers.ptr() != nil {
+ reader := rw.readers.ptr()
+ rw.readers = reader.schedlink
+ reader.schedlink.set(nil)
+ notewakeup(&reader.park)
+ r -= 1
+ }
+ // If r > 0, there are pending readers that aren't on the
+ // queue. Tell them to skip waiting.
+ rw.readerPass += uint32(r)
+ unlock(&rw.rLock)
+ // Allow other writers to proceed.
+ unlock(&rw.wLock)
+}
diff --git a/libgo/go/runtime/rwmutex_test.go b/libgo/go/runtime/rwmutex_test.go
new file mode 100644
index 00000000000..a69eca1511f
--- /dev/null
+++ b/libgo/go/runtime/rwmutex_test.go
@@ -0,0 +1,178 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// GOMAXPROCS=10 go test
+
+// This is a copy of sync/rwmutex_test.go rewritten to test the
+// runtime rwmutex.
+
+package runtime_test
+
+import (
+ "fmt"
+ . "runtime"
+ "sync/atomic"
+ "testing"
+)
+
+func parallelReader(m *RWMutex, clocked chan bool, cunlock *uint32, cdone chan bool) {
+ m.RLock()
+ clocked <- true
+ for atomic.LoadUint32(cunlock) == 0 {
+ }
+ m.RUnlock()
+ cdone <- true
+}
+
+func doTestParallelReaders(numReaders int) {
+ GOMAXPROCS(numReaders + 1)
+ var m RWMutex
+ clocked := make(chan bool, numReaders)
+ var cunlock uint32
+ cdone := make(chan bool)
+ for i := 0; i < numReaders; i++ {
+ go parallelReader(&m, clocked, &cunlock, cdone)
+ }
+ // Wait for all parallel RLock()s to succeed.
+ for i := 0; i < numReaders; i++ {
+ <-clocked
+ }
+ atomic.StoreUint32(&cunlock, 1)
+ // Wait for the goroutines to finish.
+ for i := 0; i < numReaders; i++ {
+ <-cdone
+ }
+}
+
+func TestParallelRWMutexReaders(t *testing.T) {
+ defer GOMAXPROCS(GOMAXPROCS(-1))
+ doTestParallelReaders(1)
+ doTestParallelReaders(3)
+ doTestParallelReaders(4)
+}
+
+func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
+ for i := 0; i < num_iterations; i++ {
+ rwm.RLock()
+ n := atomic.AddInt32(activity, 1)
+ if n < 1 || n >= 10000 {
+ panic(fmt.Sprintf("wlock(%d)\n", n))
+ }
+ for i := 0; i < 100; i++ {
+ }
+ atomic.AddInt32(activity, -1)
+ rwm.RUnlock()
+ }
+ cdone <- true
+}
+
+func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
+ for i := 0; i < num_iterations; i++ {
+ rwm.Lock()
+ n := atomic.AddInt32(activity, 10000)
+ if n != 10000 {
+ panic(fmt.Sprintf("wlock(%d)\n", n))
+ }
+ for i := 0; i < 100; i++ {
+ }
+ atomic.AddInt32(activity, -10000)
+ rwm.Unlock()
+ }
+ cdone <- true
+}
+
+func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
+ GOMAXPROCS(gomaxprocs)
+ // Number of active readers + 10000 * number of active writers.
+ var activity int32
+ var rwm RWMutex
+ cdone := make(chan bool)
+ go writer(&rwm, num_iterations, &activity, cdone)
+ var i int
+ for i = 0; i < numReaders/2; i++ {
+ go reader(&rwm, num_iterations, &activity, cdone)
+ }
+ go writer(&rwm, num_iterations, &activity, cdone)
+ for ; i < numReaders; i++ {
+ go reader(&rwm, num_iterations, &activity, cdone)
+ }
+ // Wait for the 2 writers and all readers to finish.
+ for i := 0; i < 2+numReaders; i++ {
+ <-cdone
+ }
+}
+
+func TestRWMutex(t *testing.T) {
+ defer GOMAXPROCS(GOMAXPROCS(-1))
+ n := 1000
+ if testing.Short() {
+ n = 5
+ }
+ HammerRWMutex(1, 1, n)
+ HammerRWMutex(1, 3, n)
+ HammerRWMutex(1, 10, n)
+ HammerRWMutex(4, 1, n)
+ HammerRWMutex(4, 3, n)
+ HammerRWMutex(4, 10, n)
+ HammerRWMutex(10, 1, n)
+ HammerRWMutex(10, 3, n)
+ HammerRWMutex(10, 10, n)
+ HammerRWMutex(10, 5, n)
+}
+
+func BenchmarkRWMutexUncontended(b *testing.B) {
+ type PaddedRWMutex struct {
+ RWMutex
+ pad [32]uint32
+ }
+ b.RunParallel(func(pb *testing.PB) {
+ var rwm PaddedRWMutex
+ for pb.Next() {
+ rwm.RLock()
+ rwm.RLock()
+ rwm.RUnlock()
+ rwm.RUnlock()
+ rwm.Lock()
+ rwm.Unlock()
+ }
+ })
+}
+
+func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
+ var rwm RWMutex
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 0
+ for pb.Next() {
+ foo++
+ if foo%writeRatio == 0 {
+ rwm.Lock()
+ rwm.Unlock()
+ } else {
+ rwm.RLock()
+ for i := 0; i != localWork; i += 1 {
+ foo *= 2
+ foo /= 2
+ }
+ rwm.RUnlock()
+ }
+ }
+ _ = foo
+ })
+}
+
+func BenchmarkRWMutexWrite100(b *testing.B) {
+ benchmarkRWMutex(b, 0, 100)
+}
+
+func BenchmarkRWMutexWrite10(b *testing.B) {
+ benchmarkRWMutex(b, 0, 10)
+}
+
+func BenchmarkRWMutexWorkWrite100(b *testing.B) {
+ benchmarkRWMutex(b, 100, 100)
+}
+
+func BenchmarkRWMutexWorkWrite10(b *testing.B) {
+ benchmarkRWMutex(b, 100, 10)
+}
diff --git a/libgo/go/runtime/select.go b/libgo/go/runtime/select.go
index f0cad1bf990..9f8ac49d972 100644
--- a/libgo/go/runtime/select.go
+++ b/libgo/go/runtime/select.go
@@ -98,7 +98,6 @@ func selectsend(sel *hselect, c *hchan, elem unsafe.Pointer) {
return
}
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
-
cas.pc = pc
cas.c = c
cas.kind = caseSend
@@ -246,7 +245,7 @@ func selectgo(sel *hselect) int {
pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)}
pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice))
for i := 1; i < int(sel.ncase); i++ {
- j := int(fastrand()) % (i + 1)
+ j := fastrandn(uint32(i + 1))
pollorder[i] = pollorder[j]
pollorder[j] = uint16(i)
}
@@ -408,7 +407,7 @@ loop:
// wait for someone to wake us up
gp.param = nil
- gopark(selparkcommit, nil, "select", traceEvGoBlockSelect, 2)
+ gopark(selparkcommit, nil, "select", traceEvGoBlockSelect, 1)
// While we were asleep, some goroutine came along and completed
// one of the cases in the select and woke us up (called ready).
@@ -602,7 +601,7 @@ bufsend:
recv:
// can receive from sleeping sender (sg)
- recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) })
+ recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
if debugSelect {
print("syncrecv: sel=", sel, " c=", c, "\n")
}
@@ -633,7 +632,7 @@ send:
if msanenabled {
msanread(cas.elem, c.elemtype.size)
}
- send(c, sg, cas.elem, func() { selunlock(scases, lockorder) })
+ send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
if debugSelect {
print("syncsend: sel=", sel, " c=", c, "\n")
}
@@ -641,7 +640,7 @@ send:
retc:
if cas.releasetime > 0 {
- blockevent(cas.releasetime-t0, 2)
+ blockevent(cas.releasetime-t0, 1)
}
return casi
diff --git a/libgo/go/runtime/sema.go b/libgo/go/runtime/sema.go
index 37318ff9d55..d04e6f592fc 100644
--- a/libgo/go/runtime/sema.go
+++ b/libgo/go/runtime/sema.go
@@ -27,10 +27,19 @@ import (
// Asynchronous semaphore for sync.Mutex.
+// A semaRoot holds a balanced tree of sudog with distinct addresses (s.elem).
+// Each of those sudog may in turn point (through s.waitlink) to a list
+// of other sudogs waiting on the same address.
+// The operations on the inner lists of sudogs with the same address
+// are all O(1). The scanning of the top-level semaRoot list is O(log n),
+// where n is the number of distinct addresses with goroutines blocked
+// on them that hash to the given semaRoot.
+// See golang.org/issue/17953 for a program that worked badly
+// before we introduced the second level of list, and test/locklinear.go
+// for a test that exercises this.
type semaRoot struct {
lock mutex
- head *sudog
- tail *sudog
+ treap *sudog // root of balanced tree of unique waiters.
nwait uint32 // Number of waiters. Read w/o the lock.
}
@@ -44,26 +53,26 @@ var semtable [semTabSize]struct {
//go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
func sync_runtime_Semacquire(addr *uint32) {
- semacquire(addr, semaBlockProfile)
+ semacquire1(addr, false, semaBlockProfile)
}
-//go:linkname net_runtime_Semacquire net.runtime_Semacquire
-func net_runtime_Semacquire(addr *uint32) {
- semacquire(addr, semaBlockProfile)
+//go:linkname poll_runtime_Semacquire internal_poll.runtime_Semacquire
+func poll_runtime_Semacquire(addr *uint32) {
+ semacquire1(addr, false, semaBlockProfile)
}
//go:linkname sync_runtime_Semrelease sync.runtime_Semrelease
-func sync_runtime_Semrelease(addr *uint32) {
- semrelease(addr)
+func sync_runtime_Semrelease(addr *uint32, handoff bool) {
+ semrelease1(addr, handoff)
}
//go:linkname sync_runtime_SemacquireMutex sync.runtime_SemacquireMutex
-func sync_runtime_SemacquireMutex(addr *uint32) {
- semacquire(addr, semaBlockProfile|semaMutexProfile)
+func sync_runtime_SemacquireMutex(addr *uint32, lifo bool) {
+ semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile)
}
-//go:linkname net_runtime_Semrelease net.runtime_Semrelease
-func net_runtime_Semrelease(addr *uint32) {
+//go:linkname poll_runtime_Semrelease internal_poll.runtime_Semrelease
+func poll_runtime_Semrelease(addr *uint32) {
semrelease(addr)
}
@@ -82,7 +91,11 @@ const (
)
// Called from runtime.
-func semacquire(addr *uint32, profile semaProfileFlags) {
+func semacquire(addr *uint32) {
+ semacquire1(addr, false, 0)
+}
+
+func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags) {
gp := getg()
if gp != gp.m.curg {
throw("semacquire not on the G stack")
@@ -104,6 +117,7 @@ func semacquire(addr *uint32, profile semaProfileFlags) {
t0 := int64(0)
s.releasetime = 0
s.acquiretime = 0
+ s.ticket = 0
if profile&semaBlockProfile != 0 && blockprofilerate > 0 {
t0 = cputicks()
s.releasetime = -1
@@ -126,9 +140,9 @@ func semacquire(addr *uint32, profile semaProfileFlags) {
}
// Any semrelease after the cansemacquire knows we're waiting
// (we set nwait above), so go to sleep.
- root.queue(addr, s)
+ root.queue(addr, s, lifo)
goparkunlock(&root.lock, "semacquire", traceEvGoBlockSync, 4)
- if cansemacquire(addr) {
+ if s.ticket != 0 || cansemacquire(addr) {
break
}
}
@@ -139,6 +153,10 @@ func semacquire(addr *uint32, profile semaProfileFlags) {
}
func semrelease(addr *uint32) {
+ semrelease1(addr, false)
+}
+
+func semrelease1(addr *uint32, handoff bool) {
root := semroot(addr)
atomic.Xadd(addr, 1)
@@ -157,28 +175,22 @@ func semrelease(addr *uint32) {
unlock(&root.lock)
return
}
- s := root.head
- for ; s != nil; s = s.next {
- if s.elem == unsafe.Pointer(addr) {
- atomic.Xadd(&root.nwait, -1)
- root.dequeue(s)
- break
- }
- }
+ s, t0 := root.dequeue(addr)
if s != nil {
- if s.acquiretime != 0 {
- t0 := cputicks()
- for x := root.head; x != nil; x = x.next {
- if x.elem == unsafe.Pointer(addr) {
- x.acquiretime = t0
- break
- }
- }
- mutexevent(t0-s.acquiretime, 3)
- }
+ atomic.Xadd(&root.nwait, -1)
}
unlock(&root.lock)
if s != nil { // May be slow, so unlock first
+ acquiretime := s.acquiretime
+ if acquiretime != 0 {
+ mutexevent(t0-acquiretime, 3)
+ }
+ if s.ticket != 0 {
+ throw("corrupted semaphore ticket")
+ }
+ if handoff && cansemacquire(addr) {
+ s.ticket = 1
+ }
readyWithTime(s, 5)
}
}
@@ -199,33 +211,230 @@ func cansemacquire(addr *uint32) bool {
}
}
-func (root *semaRoot) queue(addr *uint32, s *sudog) {
+// queue adds s to the blocked goroutines in semaRoot.
+func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) {
s.g = getg()
s.elem = unsafe.Pointer(addr)
s.next = nil
- s.prev = root.tail
- if root.tail != nil {
- root.tail.next = s
- } else {
- root.head = s
+ s.prev = nil
+
+ var last *sudog
+ pt := &root.treap
+ for t := *pt; t != nil; t = *pt {
+ if t.elem == unsafe.Pointer(addr) {
+ // Already have addr in list.
+ if lifo {
+ // Substitute s in t's place in treap.
+ *pt = s
+ s.ticket = t.ticket
+ s.acquiretime = t.acquiretime
+ s.parent = t.parent
+ s.prev = t.prev
+ s.next = t.next
+ if s.prev != nil {
+ s.prev.parent = s
+ }
+ if s.next != nil {
+ s.next.parent = s
+ }
+ // Add t first in s's wait list.
+ s.waitlink = t
+ s.waittail = t.waittail
+ if s.waittail == nil {
+ s.waittail = t
+ }
+ t.parent = nil
+ t.prev = nil
+ t.next = nil
+ t.waittail = nil
+ } else {
+ // Add s to end of t's wait list.
+ if t.waittail == nil {
+ t.waitlink = s
+ } else {
+ t.waittail.waitlink = s
+ }
+ t.waittail = s
+ s.waitlink = nil
+ }
+ return
+ }
+ last = t
+ if uintptr(unsafe.Pointer(addr)) < uintptr(t.elem) {
+ pt = &t.prev
+ } else {
+ pt = &t.next
+ }
+ }
+
+ // Add s as new leaf in tree of unique addrs.
+ // The balanced tree is a treap using ticket as the random heap priority.
+ // That is, it is a binary tree ordered according to the elem addresses,
+ // but then among the space of possible binary trees respecting those
+ // addresses, it is kept balanced on average by maintaining a heap ordering
+ // on the ticket: s.ticket <= both s.prev.ticket and s.next.ticket.
+ // https://en.wikipedia.org/wiki/Treap
+ // http://faculty.washington.edu/aragon/pubs/rst89.pdf
+ s.ticket = fastrand()
+ s.parent = last
+ *pt = s
+
+ // Rotate up into tree according to ticket (priority).
+ for s.parent != nil && s.parent.ticket > s.ticket {
+ if s.parent.prev == s {
+ root.rotateRight(s.parent)
+ } else {
+ if s.parent.next != s {
+ panic("semaRoot queue")
+ }
+ root.rotateLeft(s.parent)
+ }
}
- root.tail = s
}
-func (root *semaRoot) dequeue(s *sudog) {
- if s.next != nil {
- s.next.prev = s.prev
- } else {
- root.tail = s.prev
+// dequeue searches for and finds the first goroutine
+// in semaRoot blocked on addr.
+// If the sudog was being profiled, dequeue returns the time
+// at which it was woken up as now. Otherwise now is 0.
+func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now int64) {
+ ps := &root.treap
+ s := *ps
+ for ; s != nil; s = *ps {
+ if s.elem == unsafe.Pointer(addr) {
+ goto Found
+ }
+ if uintptr(unsafe.Pointer(addr)) < uintptr(s.elem) {
+ ps = &s.prev
+ } else {
+ ps = &s.next
+ }
}
- if s.prev != nil {
- s.prev.next = s.next
+ return nil, 0
+
+Found:
+ now = int64(0)
+ if s.acquiretime != 0 {
+ now = cputicks()
+ }
+ if t := s.waitlink; t != nil {
+ // Substitute t, also waiting on addr, for s in root tree of unique addrs.
+ *ps = t
+ t.ticket = s.ticket
+ t.parent = s.parent
+ t.prev = s.prev
+ if t.prev != nil {
+ t.prev.parent = t
+ }
+ t.next = s.next
+ if t.next != nil {
+ t.next.parent = t
+ }
+ if t.waitlink != nil {
+ t.waittail = s.waittail
+ } else {
+ t.waittail = nil
+ }
+ t.acquiretime = now
+ s.waitlink = nil
+ s.waittail = nil
} else {
- root.head = s.next
+ // Rotate s down to be leaf of tree for removal, respecting priorities.
+ for s.next != nil || s.prev != nil {
+ if s.next == nil || s.prev != nil && s.prev.ticket < s.next.ticket {
+ root.rotateRight(s)
+ } else {
+ root.rotateLeft(s)
+ }
+ }
+ // Remove s, now a leaf.
+ if s.parent != nil {
+ if s.parent.prev == s {
+ s.parent.prev = nil
+ } else {
+ s.parent.next = nil
+ }
+ } else {
+ root.treap = nil
+ }
}
+ s.parent = nil
s.elem = nil
s.next = nil
s.prev = nil
+ s.ticket = 0
+ return s, now
+}
+
+// rotateLeft rotates the tree rooted at node x.
+// turning (x a (y b c)) into (y (x a b) c).
+func (root *semaRoot) rotateLeft(x *sudog) {
+ // p -> (x a (y b c))
+ p := x.parent
+ a, y := x.prev, x.next
+ b, c := y.prev, y.next
+
+ y.prev = x
+ x.parent = y
+ y.next = c
+ if c != nil {
+ c.parent = y
+ }
+ x.prev = a
+ if a != nil {
+ a.parent = x
+ }
+ x.next = b
+ if b != nil {
+ b.parent = x
+ }
+
+ y.parent = p
+ if p == nil {
+ root.treap = y
+ } else if p.prev == x {
+ p.prev = y
+ } else {
+ if p.next != x {
+ throw("semaRoot rotateLeft")
+ }
+ p.next = y
+ }
+}
+
+// rotateRight rotates the tree rooted at node y.
+// turning (y (x a b) c) into (x a (y b c)).
+func (root *semaRoot) rotateRight(y *sudog) {
+ // p -> (y (x a b) c)
+ p := y.parent
+ x, c := y.prev, y.next
+ a, b := x.prev, x.next
+
+ x.prev = a
+ if a != nil {
+ a.parent = x
+ }
+ x.next = y
+ y.parent = x
+ y.prev = b
+ if b != nil {
+ b.parent = y
+ }
+ y.next = c
+ if c != nil {
+ c.parent = y
+ }
+
+ x.parent = p
+ if p == nil {
+ root.treap = x
+ } else if p.prev == y {
+ p.prev = x
+ } else {
+ if p.next != y {
+ throw("semaRoot rotateRight")
+ }
+ p.next = x
+ }
}
// notifyList is a ticket-based notification list used to implement sync.Cond.
@@ -352,10 +561,22 @@ func notifyListNotifyOne(l *notifyList) {
return
}
- // Update the next notify ticket number, and try to find the G that
- // needs to be notified. If it hasn't made it to the list yet we won't
- // find it, but it won't park itself once it sees the new notify number.
+ // Update the next notify ticket number.
atomic.Store(&l.notify, t+1)
+
+ // Try to find the g that needs to be notified.
+ // If it hasn't made it to the list yet we won't find it,
+ // but it won't park itself once it sees the new notify number.
+ //
+ // This scan looks linear but essentially always stops quickly.
+ // Because g's queue separately from taking numbers,
+ // there may be minor reorderings in the list, but we
+ // expect the g we're looking for to be near the front.
+ // The g has others in front of it on the list only to the
+ // extent that it lost the race, so the iteration will not
+ // be too long. This applies even when the g is missing:
+ // it hasn't yet gotten to sleep and has lost the race to
+ // the (few) other g's that we find on the list.
for p, s := (*sudog)(nil), l.head; s != nil; p, s = s, s.next {
if s.ticket == t {
n := s.next
@@ -383,3 +604,8 @@ func notifyListCheck(sz uintptr) {
throw("bad notifyList size")
}
}
+
+//go:linkname sync_nanotime sync.runtime_nanotime
+func sync_nanotime() int64 {
+ return nanotime()
+}
diff --git a/libgo/go/runtime/signal_sighandler.go b/libgo/go/runtime/signal_sighandler.go
index b71b21e1d5c..378c68e1d90 100644
--- a/libgo/go/runtime/signal_sighandler.go
+++ b/libgo/go/runtime/signal_sighandler.go
@@ -111,7 +111,7 @@ func sighandler(sig uint32, info *_siginfo_t, ctxt unsafe.Pointer, gp *g) {
if docrash {
crashing++
- if crashing < sched.mcount {
+ if crashing < sched.mcount-int32(extraMCount) {
// There are other m's that need to dump their stacks.
// Relay SIGQUIT to the next m by sending it to the current process.
// All m's that have already received SIGQUIT have signal masks blocking
diff --git a/libgo/go/runtime/signal_unix.go b/libgo/go/runtime/signal_unix.go
index e2642ee25fc..3237e18765f 100644
--- a/libgo/go/runtime/signal_unix.go
+++ b/libgo/go/runtime/signal_unix.go
@@ -7,14 +7,13 @@
package runtime
import (
+ "runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
// For gccgo's C code to call:
//go:linkname initsig runtime.initsig
-//go:linkname crash runtime.crash
-//go:linkname resetcpuprofiler runtime.resetcpuprofiler
//go:linkname sigtrampgo runtime.sigtrampgo
//go:linkname os_sigpipe os.sigpipe
@@ -37,11 +36,18 @@ const (
// Stores the signal handlers registered before Go installed its own.
// These signal handlers will be invoked in cases where Go doesn't want to
// handle a particular signal (e.g., signal occurred on a non-Go thread).
-// See sigfwdgo() for more information on when the signals are forwarded.
+// See sigfwdgo for more information on when the signals are forwarded.
//
-// Signal forwarding is currently available only on Darwin and Linux.
+// This is read by the signal handler; accesses should use
+// atomic.Loaduintptr and atomic.Storeuintptr.
var fwdSig [_NSIG]uintptr
+// handlingSig is indexed by signal number and is non-zero if we are
+// currently handling the signal. Or, to put it another way, whether
+// the signal handler is currently set to the Go signal handler or not.
+// This is uint32 rather than bool so that we can use atomic instructions.
+var handlingSig [_NSIG]uint32
+
// channels for synchronizing signal mask updates with the signal mask
// thread
var (
@@ -87,6 +93,9 @@ func initsig(preinit bool) {
if t.flags == 0 || t.flags&_SigDefault != 0 {
continue
}
+
+ // We don't need to use atomic operations here because
+ // there shouldn't be any other goroutines running yet.
fwdSig[i] = getsig(i)
if !sigInstallGoHandler(i) {
@@ -98,7 +107,7 @@ func initsig(preinit bool) {
continue
}
- t.flags |= _SigHandling
+ handlingSig[i] = 1
setsig(i, getSigtramp())
}
}
@@ -111,7 +120,7 @@ func sigInstallGoHandler(sig uint32) bool {
// Even these signals can be fetched using the os/signal package.
switch sig {
case _SIGHUP, _SIGINT:
- if fwdSig[sig] == _SIG_IGN {
+ if atomic.Loaduintptr(&fwdSig[sig]) == _SIG_IGN {
return false
}
}
@@ -122,37 +131,52 @@ func sigInstallGoHandler(sig uint32) bool {
}
// When built using c-archive or c-shared, only install signal
- // handlers for synchronous signals.
- if (isarchive || islibrary) && t.flags&_SigPanic == 0 {
+ // handlers for synchronous signals and SIGPIPE.
+ if (isarchive || islibrary) && t.flags&_SigPanic == 0 && sig != _SIGPIPE {
return false
}
return true
}
+// sigenable enables the Go signal handler to catch the signal sig.
+// It is only called while holding the os/signal.handlers lock,
+// via os/signal.enableSignal and signal_enable.
func sigenable(sig uint32) {
if sig >= uint32(len(sigtable)) {
return
}
+ // SIGPROF is handled specially for profiling.
+ if sig == _SIGPROF {
+ return
+ }
+
t := &sigtable[sig]
if t.flags&_SigNotify != 0 {
ensureSigM()
enableSigChan <- sig
<-maskUpdatedChan
- if t.flags&_SigHandling == 0 {
- t.flags |= _SigHandling
- fwdSig[sig] = getsig(sig)
+ if atomic.Cas(&handlingSig[sig], 0, 1) {
+ atomic.Storeuintptr(&fwdSig[sig], getsig(sig))
setsig(sig, getSigtramp())
}
}
}
+// sigdisable disables the Go signal handler for the signal sig.
+// It is only called while holding the os/signal.handlers lock,
+// via os/signal.disableSignal and signal_disable.
func sigdisable(sig uint32) {
if sig >= uint32(len(sigtable)) {
return
}
+ // SIGPROF is handled specially for profiling.
+ if sig == _SIGPROF {
+ return
+ }
+
t := &sigtable[sig]
if t.flags&_SigNotify != 0 {
ensureSigM()
@@ -163,25 +187,71 @@ func sigdisable(sig uint32) {
// signal, then to go back to the state before Notify
// we should remove the one we installed.
if !sigInstallGoHandler(sig) {
- t.flags &^= _SigHandling
- setsig(sig, fwdSig[sig])
+ atomic.Store(&handlingSig[sig], 0)
+ setsig(sig, atomic.Loaduintptr(&fwdSig[sig]))
}
}
}
+// sigignore ignores the signal sig.
+// It is only called while holding the os/signal.handlers lock,
+// via os/signal.ignoreSignal and signal_ignore.
func sigignore(sig uint32) {
if sig >= uint32(len(sigtable)) {
return
}
+ // SIGPROF is handled specially for profiling.
+ if sig == _SIGPROF {
+ return
+ }
+
t := &sigtable[sig]
if t.flags&_SigNotify != 0 {
- t.flags &^= _SigHandling
+ atomic.Store(&handlingSig[sig], 0)
setsig(sig, _SIG_IGN)
}
}
-func resetcpuprofiler(hz int32) {
+// clearSignalHandlers clears all signal handlers that are not ignored
+// back to the default. This is called by the child after a fork, so that
+// we can enable the signal mask for the exec without worrying about
+// running a signal handler in the child.
+//go:nosplit
+//go:nowritebarrierrec
+func clearSignalHandlers() {
+ for i := uint32(0); i < _NSIG; i++ {
+ if atomic.Load(&handlingSig[i]) != 0 {
+ setsig(i, _SIG_DFL)
+ }
+ }
+}
+
+// setProcessCPUProfiler is called when the profiling timer changes.
+// It is called with prof.lock held. hz is the new timer, and is 0 if
+// profiling is being disabled. Enable or disable the signal as
+// required for -buildmode=c-archive.
+func setProcessCPUProfiler(hz int32) {
+ if hz != 0 {
+ // Enable the Go signal handler if not enabled.
+ if atomic.Cas(&handlingSig[_SIGPROF], 0, 1) {
+ atomic.Storeuintptr(&fwdSig[_SIGPROF], getsig(_SIGPROF))
+ setsig(_SIGPROF, funcPC(sighandler))
+ }
+ } else {
+ // If the Go signal handler should be disabled by default,
+ // disable it if it is enabled.
+ if !sigInstallGoHandler(_SIGPROF) {
+ if atomic.Cas(&handlingSig[_SIGPROF], 1, 0) {
+ setsig(_SIGPROF, atomic.Loaduintptr(&fwdSig[_SIGPROF]))
+ }
+ }
+ }
+}
+
+// setThreadCPUProfiler makes any thread-specific changes required to
+// implement profiling at a rate of hz.
+func setThreadCPUProfiler(hz int32) {
var it _itimerval
if hz == 0 {
setitimer(_ITIMER_PROF, &it, nil)
@@ -315,7 +385,7 @@ func raisebadsignal(sig uint32, c *sigctxt) {
if sig >= _NSIG {
handler = _SIG_DFL
} else {
- handler = fwdSig[sig]
+ handler = atomic.Loaduintptr(&fwdSig[sig])
}
// Reset the signal handler and raise the signal.
@@ -431,6 +501,16 @@ func sigNotOnStack(sig uint32) {
throw("non-Go code set up signal handler without SA_ONSTACK flag")
}
+// signalDuringFork is called if we receive a signal while doing a fork.
+// We do not want signals at that time, as a signal sent to the process
+// group may be delivered to the child process, causing confusion.
+// This should never be called, because we block signals across the fork;
+// this function is just a safety check. See issue 18600 for background.
+func signalDuringFork(sig uint32) {
+ println("signal", sig, "received during fork")
+ throw("signal received during fork")
+}
+
// This runs on a foreign stack, without an m or a g. No stack split.
//go:nosplit
//go:norace
@@ -455,7 +535,7 @@ func sigfwdgo(sig uint32, info *_siginfo_t, ctx unsafe.Pointer) bool {
if sig >= uint32(len(sigtable)) {
return false
}
- fwdFn := fwdSig[sig]
+ fwdFn := atomic.Loaduintptr(&fwdSig[sig])
if !signalsOK {
// The only way we can get here is if we are in a
@@ -470,35 +550,44 @@ func sigfwdgo(sig uint32, info *_siginfo_t, ctx unsafe.Pointer) bool {
return true
}
- flags := sigtable[sig].flags
-
// If there is no handler to forward to, no need to forward.
if fwdFn == _SIG_DFL {
return false
}
// If we aren't handling the signal, forward it.
- if flags&_SigHandling == 0 {
+ // Really if we aren't handling the signal, we shouldn't get here,
+ // but on Darwin setsigstack can lead us here because it sets
+ // the sa_tramp field. The sa_tramp field is not returned by
+ // sigaction, so the fix for that is non-obvious.
+ if atomic.Load(&handlingSig[sig]) == 0 {
sigfwd(fwdFn, sig, info, ctx)
return true
}
- // Only forward synchronous signals.
+ flags := sigtable[sig].flags
+
c := sigctxt{info, ctx}
- if c.sigcode() == _SI_USER || flags&_SigPanic == 0 {
+ // Only forward synchronous signals and SIGPIPE.
+ // Unfortunately, user generated SIGPIPEs will also be forwarded, because si_code
+ // is set to _SI_USER even for a SIGPIPE raised from a write to a closed socket
+ // or pipe.
+ if (c.sigcode() == _SI_USER || flags&_SigPanic == 0) && sig != _SIGPIPE {
return false
}
// Determine if the signal occurred inside Go code. We test that:
// (1) we were in a goroutine (i.e., m.curg != nil), and
- // (2) we weren't in CGO (i.e., m.curg.syscallsp == 0).
+ // (2) we weren't in CGO.
g := getg()
- if g != nil && g.m != nil && g.m.curg != nil && g.m.curg.syscallsp == 0 {
+ if g != nil && g.m != nil && g.m.curg != nil && !g.m.incgo {
return false
}
+
// Signal not handled by Go, forward it.
if fwdFn != _SIG_IGN {
sigfwd(fwdFn, sig, info, ctx)
}
+
return true
}
diff --git a/libgo/go/runtime/sigqueue.go b/libgo/go/runtime/sigqueue.go
index a6d498f9b03..cd036ce364c 100644
--- a/libgo/go/runtime/sigqueue.go
+++ b/libgo/go/runtime/sigqueue.go
@@ -33,6 +33,17 @@ import (
_ "unsafe" // for go:linkname
)
+// sig handles communication between the signal handler and os/signal.
+// Other than the inuse and recv fields, the fields are accessed atomically.
+//
+// The wanted and ignored fields are only written by one goroutine at
+// a time; access is controlled by the handlers Mutex in os/signal.
+// The fields are only read by that one goroutine and by the signal handler.
+// We access them atomically to minimize the race between setting them
+// in the goroutine calling os/signal and the signal handler,
+// which may be running in a different thread. That race is unavoidable,
+// as there is no connection between handling a signal and receiving one,
+// but atomic instructions should minimize it.
var sig struct {
note note
mask [(_NSIG + 31) / 32]uint32
@@ -53,7 +64,11 @@ const (
// Reports whether the signal was sent. If not, the caller typically crashes the program.
func sigsend(s uint32) bool {
bit := uint32(1) << uint(s&31)
- if !sig.inuse || s >= uint32(32*len(sig.wanted)) || sig.wanted[s/32]&bit == 0 {
+ if !sig.inuse || s >= uint32(32*len(sig.wanted)) {
+ return false
+ }
+
+ if w := atomic.Load(&sig.wanted[s/32]); w&bit == 0 {
return false
}
@@ -131,6 +146,23 @@ func signal_recv() uint32 {
}
}
+// signalWaitUntilIdle waits until the signal delivery mechanism is idle.
+// This is used to ensure that we do not drop a signal notification due
+// to a race between disabling a signal and receiving a signal.
+// This assumes that signal delivery has already been disabled for
+// the signal(s) in question, and here we are just waiting to make sure
+// that all the signals have been delivered to the user channels
+// by the os/signal package.
+//go:linkname signalWaitUntilIdle os_signal.signalWaitUntilIdle
+func signalWaitUntilIdle() {
+ // Although WaitUntilIdle seems like the right name for this
+ // function, the state we are looking for is sigReceiving, not
+ // sigIdle. The sigIdle state is really more like sigProcessing.
+ for atomic.Load(&sig.state) != sigReceiving {
+ Gosched()
+ }
+}
+
// Must only be called from a single goroutine at a time.
//go:linkname signal_enable os_signal.signal_enable
func signal_enable(s uint32) {
@@ -146,8 +178,15 @@ func signal_enable(s uint32) {
if s >= uint32(len(sig.wanted)*32) {
return
}
- sig.wanted[s/32] |= 1 << (s & 31)
- sig.ignored[s/32] &^= 1 << (s & 31)
+
+ w := sig.wanted[s/32]
+ w |= 1 << (s & 31)
+ atomic.Store(&sig.wanted[s/32], w)
+
+ i := sig.ignored[s/32]
+ i &^= 1 << (s & 31)
+ atomic.Store(&sig.ignored[s/32], i)
+
sigenable(s)
}
@@ -157,8 +196,11 @@ func signal_disable(s uint32) {
if s >= uint32(len(sig.wanted)*32) {
return
}
- sig.wanted[s/32] &^= 1 << (s & 31)
sigdisable(s)
+
+ w := sig.wanted[s/32]
+ w &^= 1 << (s & 31)
+ atomic.Store(&sig.wanted[s/32], w)
}
// Must only be called from a single goroutine at a time.
@@ -167,12 +209,19 @@ func signal_ignore(s uint32) {
if s >= uint32(len(sig.wanted)*32) {
return
}
- sig.wanted[s/32] &^= 1 << (s & 31)
- sig.ignored[s/32] |= 1 << (s & 31)
sigignore(s)
+
+ w := sig.wanted[s/32]
+ w &^= 1 << (s & 31)
+ atomic.Store(&sig.wanted[s/32], w)
+
+ i := sig.ignored[s/32]
+ i |= 1 << (s & 31)
+ atomic.Store(&sig.ignored[s/32], i)
}
// Checked by signal handlers.
func signal_ignored(s uint32) bool {
- return sig.ignored[s/32]&(1<<(s&31)) != 0
+ i := atomic.Load(&sig.ignored[s/32])
+ return i&(1<<(s&31)) != 0
}
diff --git a/libgo/go/runtime/sizeclasses.go b/libgo/go/runtime/sizeclasses.go
index e616e95148c..5366564afda 100644
--- a/libgo/go/runtime/sizeclasses.go
+++ b/libgo/go/runtime/sizeclasses.go
@@ -1,4 +1,4 @@
-// AUTO-GENERATED by mksizeclasses.go; DO NOT EDIT
+// Code generated by mksizeclasses.go; DO NOT EDIT.
//go:generate go run mksizeclasses.go
package runtime
diff --git a/libgo/go/runtime/string.go b/libgo/go/runtime/string.go
index 9cc28737434..7436ddfdf4b 100644
--- a/libgo/go/runtime/string.go
+++ b/libgo/go/runtime/string.go
@@ -88,7 +88,7 @@ func concatstring5(buf *tmpBuf, a [5]string) string {
// Buf is a fixed-size buffer for the result,
// it is not nil if the result does not escape.
-func slicebytetostring(buf *tmpBuf, b []byte) string {
+func slicebytetostring(buf *tmpBuf, b []byte) (str string) {
l := len(b)
if l == 0 {
// Turns out to be a relatively common case.
@@ -96,18 +96,26 @@ func slicebytetostring(buf *tmpBuf, b []byte) string {
// you find the indices and convert the subslice to string.
return ""
}
- if raceenabled && l > 0 {
+ if raceenabled {
racereadrangepc(unsafe.Pointer(&b[0]),
uintptr(l),
getcallerpc(unsafe.Pointer(&buf)),
funcPC(slicebytetostring))
}
- if msanenabled && l > 0 {
+ if msanenabled {
msanread(unsafe.Pointer(&b[0]), uintptr(l))
}
- s, c := rawstringtmp(buf, l)
- copy(c, b)
- return s
+
+ var p unsafe.Pointer
+ if buf != nil && len(b) <= len(buf) {
+ p = unsafe.Pointer(buf)
+ } else {
+ p = mallocgc(uintptr(len(b)), nil, false)
+ }
+ stringStructOf(&str).str = p
+ stringStructOf(&str).len = len(b)
+ memmove(p, (*(*slice)(unsafe.Pointer(&b))).array, uintptr(len(b)))
+ return
}
func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte) {
diff --git a/libgo/go/runtime/string_test.go b/libgo/go/runtime/string_test.go
index ee306999e2e..555a7fc7ef9 100644
--- a/libgo/go/runtime/string_test.go
+++ b/libgo/go/runtime/string_test.go
@@ -6,6 +6,7 @@ package runtime_test
import (
"runtime"
+ "strconv"
"strings"
"testing"
)
@@ -89,6 +90,20 @@ func BenchmarkConcatStringAndBytes(b *testing.B) {
}
}
+var escapeString string
+
+func BenchmarkSliceByteToString(b *testing.B) {
+ buf := []byte{'!'}
+ for n := 0; n < 8; n++ {
+ b.Run(strconv.Itoa(len(buf)), func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ escapeString = string(buf)
+ }
+ })
+ buf = append(buf, buf...)
+ }
+}
+
var stringdata = []struct{ name, data string }{
{"ASCII", "01234567890"},
{"Japanese", "æ¥æ¬èªæ¥æ¬èªæ¥æ¬èª"},
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
index 30d87c4121b..84fa1c79689 100644
--- a/libgo/go/runtime/stubs.go
+++ b/libgo/go/runtime/stubs.go
@@ -109,8 +109,22 @@ func memcmp(a, b unsafe.Pointer, size uintptr) int32
// exported value for testing
var hashLoad = loadFactor
-// in asm_*.s
-func fastrand() uint32
+//go:nosplit
+func fastrand() uint32 {
+ mp := getg().m
+ fr := mp.fastrand
+ mx := uint32(int32(fr)>>31) & 0xa8888eef
+ fr = fr<<1 ^ mx
+ mp.fastrand = fr
+ return fr
+}
+
+//go:nosplit
+func fastrandn(n uint32) uint32 {
+ // This is similar to fastrand() % n, but faster.
+ // See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
+ return uint32(uint64(fastrand()) * uint64(n) >> 32)
+}
//go:linkname sync_fastrand sync.fastrand
func sync_fastrand() uint32 { return fastrand() }
@@ -156,7 +170,7 @@ type neverCallThisFunction struct{}
// This function must never be called directly. Call goexit1 instead.
// gentraceback assumes that goexit terminates the stack. A direct
// call on the stack will cause gentraceback to stop walking the stack
-// prematurely and if there are leftover stack barriers it may panic.
+// prematurely and if there is leftover state it may panic.
func goexit(neverCallThisFunction)
// publicationBarrier performs a store/store barrier (a "publication"
@@ -176,9 +190,6 @@ func goexit(neverCallThisFunction)
// data dependency ordering.
func publicationBarrier()
-//go:noescape
-func setcallerpc(argp unsafe.Pointer, pc uintptr)
-
// getcallerpc returns the program counter (PC) of its caller's caller.
// getcallersp returns the stack pointer (SP) of its caller's caller.
// For both, the argp must be a pointer to the caller's first function argument.
@@ -213,12 +224,14 @@ func getcallerpc(argp unsafe.Pointer) uintptr
//go:noescape
func getcallersp(argp unsafe.Pointer) uintptr
+func asmcgocall(fn, arg unsafe.Pointer) int32 {
+ throw("asmcgocall")
+ return 0
+}
+
// argp used in Defer structs when there is no argp.
const _NoArgs = ^uintptr(0)
-//go:linkname time_now time.now
-func time_now() (sec int64, nsec int32)
-
//extern __builtin_prefetch
func prefetch(addr unsafe.Pointer, rw int32, locality int32)
@@ -238,13 +251,6 @@ func prefetchnta(addr uintptr) {
prefetch(unsafe.Pointer(addr), 0, 0)
}
-// For gccgo, expose this for C callers.
-//go:linkname unixnanotime runtime.unixnanotime
-func unixnanotime() int64 {
- sec, nsec := time_now()
- return sec*1e9 + int64(nsec)
-}
-
// round n up to a multiple of a. a must be a power of 2.
func round(n, a uintptr) uintptr {
return (n + a - 1) &^ (a - 1)
@@ -315,18 +321,6 @@ func entersyscallblock(int32)
// Here for gccgo until we port mgc.go.
func GC()
-// For gccgo to call from C code.
-//go:linkname acquireWorldsema runtime.acquireWorldsema
-func acquireWorldsema() {
- semacquire(&worldsema, 0)
-}
-
-// For gccgo to call from C code.
-//go:linkname releaseWorldsema runtime.releaseWorldsema
-func releaseWorldsema() {
- semrelease(&worldsema)
-}
-
// For gccgo to call from C code, so that the C code and the Go code
// can share the memstats variable for now.
//go:linkname getMstats runtime.getMstats
@@ -436,10 +430,6 @@ func setpagesize(s uintptr) {
}
}
-// Temporary for gccgo until we port more of proc.go.
-func sigprofNonGoPC(pc uintptr) {
-}
-
// Temporary for gccgo until we port mgc.go.
//go:linkname runtime_m0 runtime.runtime_m0
func runtime_m0() *m {
@@ -458,3 +448,11 @@ type bitvector struct {
n int32 // # of bits
bytedata *uint8
}
+
+// bool2int returns 0 if x is false or 1 if x is true.
+func bool2int(x bool) int {
+ if x {
+ return 1
+ }
+ return 0
+}
diff --git a/libgo/go/runtime/stubs_linux.go b/libgo/go/runtime/stubs_linux.go
new file mode 100644
index 00000000000..d10f657197f
--- /dev/null
+++ b/libgo/go/runtime/stubs_linux.go
@@ -0,0 +1,9 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package runtime
+
+func sbrk0() uintptr
diff --git a/libgo/go/runtime/stubs_nonlinux.go b/libgo/go/runtime/stubs_nonlinux.go
new file mode 100644
index 00000000000..e1ea05cf0b1
--- /dev/null
+++ b/libgo/go/runtime/stubs_nonlinux.go
@@ -0,0 +1,12 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !linux
+
+package runtime
+
+// sbrk0 returns the current process brk, or 0 if not implemented.
+func sbrk0() uintptr {
+ return 0
+}
diff --git a/libgo/go/runtime/symtab.go b/libgo/go/runtime/symtab.go
index bad03471c46..3d15fc8039e 100644
--- a/libgo/go/runtime/symtab.go
+++ b/libgo/go/runtime/symtab.go
@@ -7,6 +7,7 @@ package runtime
// Frames may be used to get function/file/line information for a
// slice of PC values returned by Callers.
type Frames struct {
+ // callers is a slice of PCs that have not yet been expanded.
callers []uintptr
// The last PC we saw.
@@ -18,23 +19,34 @@ type Frames struct {
// Frame is the information returned by Frames for each call frame.
type Frame struct {
- // Program counter for this frame; multiple frames may have
- // the same PC value.
+ // PC is the program counter for the location in this frame.
+ // For a frame that calls another frame, this will be the
+ // program counter of a call instruction. Because of inlining,
+ // multiple frames may have the same PC value, but different
+ // symbolic information.
PC uintptr
- // Func for this frame; may be nil for non-Go code or fully
- // inlined functions.
+ // Func is the Func value of this call frame. This may be nil
+ // for non-Go code or fully inlined functions.
Func *Func
- // Function name, file name, and line number for this call frame.
- // May be the empty string or zero if not known.
+ // Function is the package path-qualified function name of
+ // this call frame. If non-empty, this string uniquely
+ // identifies a single function in the program.
+ // This may be the empty string if not known.
// If Func is not nil then Function == Func.Name().
Function string
- File string
- Line int
- // Entry point for the function; may be zero if not known.
- // If Func is not nil then Entry == Func.Entry().
+ // File and Line are the file name and line number of the
+ // location in this frame. For non-leaf frames, this will be
+ // the location of a call. These may be the empty string and
+ // zero, respectively, if not known.
+ File string
+ Line int
+
+ // Entry point program counter for the function; may be zero
+ // if not known. If Func is not nil then Entry ==
+ // Func.Entry().
Entry uintptr
}
@@ -94,7 +106,8 @@ func (ci *Frames) Next() (frame Frame, more bool) {
// NOTE: Func does not expose the actual unexported fields, because we return *Func
// values to users, and we want to keep them from being able to overwrite the data
// with (say) *f = Func{}.
-// All code operating on a *Func must call raw to get the *_func instead.
+// All code operating on a *Func must call raw() to get the *_func
+// or funcInfo() to get the funcInfo instead.
// A Func represents a Go function in the running binary.
type Func struct {
@@ -104,6 +117,9 @@ type Func struct {
// FuncForPC returns a *Func describing the function that contains the
// given program counter address, or else nil.
+//
+// If pc represents multiple functions because of inlining, it returns
+// the *Func describing the outermost function.
func FuncForPC(pc uintptr) *Func {
name, _, _ := funcfileline(pc, -1)
if name == "" {
diff --git a/libgo/go/runtime/symtab_test.go b/libgo/go/runtime/symtab_test.go
index 8c8281f78a1..807b50de990 100644
--- a/libgo/go/runtime/symtab_test.go
+++ b/libgo/go/runtime/symtab_test.go
@@ -31,10 +31,14 @@ func TestCaller(t *testing.T) {
}
}
+// These are marked noinline so that we can use FuncForPC
+// in testCallerBar.
+//go:noinline
func testCallerFoo(t *testing.T) {
testCallerBar(t)
}
+//go:noinline
func testCallerBar(t *testing.T) {
for i := 0; i < 2; i++ {
pc, file, line, ok := runtime.Caller(i)
@@ -94,7 +98,7 @@ var mapLit = map[int]int{ // 28
} // 33
var intLit = lineNumber() + // 34
lineNumber() + // 35
- lineNumber() // 36
+ lineNumber() // 36
func trythis() { // 37
recordLines(lineNumber(), // 38
lineNumber(), // 39
@@ -156,3 +160,14 @@ func TestLineNumber(t *testing.T) {
}
}
}
+
+func TestNilName(t *testing.T) {
+ defer func() {
+ if ex := recover(); ex != nil {
+ t.Fatalf("expected no nil panic, got=%v", ex)
+ }
+ }()
+ if got := (*runtime.Func)(nil).Name(); got != "" {
+ t.Errorf("Name() = %q, want %q", got, "")
+ }
+}
diff --git a/libgo/go/runtime/testdata/testprog/numcpu_freebsd.go b/libgo/go/runtime/testdata/testprog/numcpu_freebsd.go
new file mode 100644
index 00000000000..035c53470be
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprog/numcpu_freebsd.go
@@ -0,0 +1,126 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+func init() {
+ register("FreeBSDNumCPU", FreeBSDNumCPU)
+ register("FreeBSDNumCPUHelper", FreeBSDNumCPUHelper)
+}
+
+func FreeBSDNumCPUHelper() {
+ fmt.Printf("%d\n", runtime.NumCPU())
+}
+
+func FreeBSDNumCPU() {
+ _, err := exec.LookPath("cpuset")
+ if err != nil {
+ // Can not test without cpuset command.
+ fmt.Println("OK")
+ return
+ }
+ _, err = exec.LookPath("sysctl")
+ if err != nil {
+ // Can not test without sysctl command.
+ fmt.Println("OK")
+ return
+ }
+ cmd := exec.Command("sysctl", "-n", "kern.smp.active")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ fmt.Printf("fail to launch '%s', error: %s, output: %s\n", strings.Join(cmd.Args, " "), err, output)
+ return
+ }
+ if bytes.Equal(output, []byte("1\n")) == false {
+ // SMP mode deactivated in kernel.
+ fmt.Println("OK")
+ return
+ }
+
+ list, err := getList()
+ if err != nil {
+ fmt.Printf("%s\n", err)
+ return
+ }
+ err = checkNCPU(list)
+ if err != nil {
+ fmt.Printf("%s\n", err)
+ return
+ }
+ if len(list) >= 2 {
+ err = checkNCPU(list[:len(list)-1])
+ if err != nil {
+ fmt.Printf("%s\n", err)
+ return
+ }
+ }
+ fmt.Println("OK")
+ return
+}
+
+func getList() ([]string, error) {
+ pid := syscall.Getpid()
+
+ // Launch cpuset to print a list of available CPUs: pid mask: 0, 1, 2, 3.
+ cmd := exec.Command("cpuset", "-g", "-p", strconv.Itoa(pid))
+ cmdline := strings.Join(cmd.Args, " ")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return nil, fmt.Errorf("fail to execute '%s': %s", cmdline, err)
+ }
+ pos := bytes.IndexRune(output, ':')
+ if pos == -1 {
+ return nil, fmt.Errorf("invalid output from '%s', ':' not found: %s", cmdline, output)
+ }
+
+ var list []string
+ for _, val := range bytes.Split(output[pos+1:], []byte(",")) {
+ index := string(bytes.TrimSpace(val))
+ if len(index) == 0 {
+ continue
+ }
+ list = append(list, index)
+ }
+ if len(list) == 0 {
+ return nil, fmt.Errorf("empty CPU list from '%s': %s", cmdline, output)
+ }
+ return list, nil
+}
+
+func checkNCPU(list []string) error {
+ listString := strings.Join(list, ",")
+ if len(listString) == 0 {
+ return fmt.Errorf("could not check against an empty CPU list")
+ }
+
+ // Launch FreeBSDNumCPUHelper() with specified CPUs list.
+ cmd := exec.Command("cpuset", "-l", listString, os.Args[0], "FreeBSDNumCPUHelper")
+ cmdline := strings.Join(cmd.Args, " ")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return fmt.Errorf("fail to launch child '%s', error: %s, output: %s", cmdline, err, output)
+ }
+
+ // NumCPU from FreeBSDNumCPUHelper come with '\n'.
+ output = bytes.TrimSpace(output)
+ n, err := strconv.Atoi(string(output))
+ if err != nil {
+ return fmt.Errorf("fail to parse output from child '%s', error: %s, output: %s", cmdline, err, output)
+ }
+ if n != len(list) {
+ return fmt.Errorf("runtime.NumCPU() expected to %d, got %d when run with CPU list %s", len(list), n, listString)
+ }
+ return nil
+}
diff --git a/libgo/go/runtime/testdata/testprog/panicrace.go b/libgo/go/runtime/testdata/testprog/panicrace.go
new file mode 100644
index 00000000000..f0589940b56
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprog/panicrace.go
@@ -0,0 +1,27 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "runtime"
+ "sync"
+)
+
+func init() {
+ register("PanicRace", PanicRace)
+}
+
+func PanicRace() {
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer func() {
+ wg.Done()
+ runtime.Gosched()
+ }()
+ panic("crash")
+ }()
+ wg.Wait()
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/cgo.go b/libgo/go/runtime/testdata/testprogcgo/cgo.go
index 870d4efdead..209524a24db 100644
--- a/libgo/go/runtime/testdata/testprogcgo/cgo.go
+++ b/libgo/go/runtime/testdata/testprogcgo/cgo.go
@@ -45,10 +45,13 @@ func CgoSignalDeadlock() {
}()
var s *string
*s = ""
+ fmt.Printf("continued after expected panic\n")
}()
}
}()
time.Sleep(time.Millisecond)
+ start := time.Now()
+ var times []time.Duration
for i := 0; i < 64; i++ {
go func() {
runtime.LockOSThread()
@@ -62,8 +65,9 @@ func CgoSignalDeadlock() {
ping <- false
select {
case <-ping:
+ times = append(times, time.Since(start))
case <-time.After(time.Second):
- fmt.Printf("HANG\n")
+ fmt.Printf("HANG 1 %v\n", times)
return
}
}
@@ -71,7 +75,7 @@ func CgoSignalDeadlock() {
select {
case <-ping:
case <-time.After(time.Second):
- fmt.Printf("HANG\n")
+ fmt.Printf("HANG 2 %v\n", times)
return
}
fmt.Printf("OK\n")
diff --git a/libgo/go/runtime/testdata/testprogcgo/numgoroutine.go b/libgo/go/runtime/testdata/testprogcgo/numgoroutine.go
new file mode 100644
index 00000000000..12fda49a131
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/numgoroutine.go
@@ -0,0 +1,99 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!windows
+
+package main
+
+/*
+#include
+#include
+
+extern void CallbackNumGoroutine();
+
+static void* thread2(void* arg __attribute__ ((unused))) {
+ CallbackNumGoroutine();
+ return NULL;
+}
+
+static void CheckNumGoroutine() {
+ pthread_t tid;
+ pthread_create(&tid, NULL, thread2, NULL);
+ pthread_join(tid, NULL);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+)
+
+var baseGoroutines int
+
+func init() {
+ register("NumGoroutine", NumGoroutine)
+}
+
+func NumGoroutine() {
+ // Test that there are just the expected number of goroutines
+ // running. Specifically, test that the spare M's goroutine
+ // doesn't show up.
+ //
+ // On non-Windows platforms there's a signal handling thread
+ // started by os/signal.init in addition to the main
+ // goroutine.
+ if runtime.GOOS != "windows" {
+ baseGoroutines = 1
+ }
+ if _, ok := checkNumGoroutine("first", 1+baseGoroutines); !ok {
+ return
+ }
+
+ // Test that the goroutine for a callback from C appears.
+ if C.CheckNumGoroutine(); !callbackok {
+ return
+ }
+
+ // Make sure we're back to the initial goroutines.
+ if _, ok := checkNumGoroutine("third", 1+baseGoroutines); !ok {
+ return
+ }
+
+ fmt.Println("OK")
+}
+
+func checkNumGoroutine(label string, want int) (string, bool) {
+ n := runtime.NumGoroutine()
+ if n != want {
+ fmt.Printf("%s NumGoroutine: want %d; got %d\n", label, want, n)
+ return "", false
+ }
+
+ sbuf := make([]byte, 32<<10)
+ sbuf = sbuf[:runtime.Stack(sbuf, true)]
+ n = strings.Count(string(sbuf), "goroutine ")
+ if n != want {
+ fmt.Printf("%s Stack: want %d; got %d:\n%s\n", label, want, n, string(sbuf))
+ return "", false
+ }
+ return string(sbuf), true
+}
+
+var callbackok bool
+
+//export CallbackNumGoroutine
+func CallbackNumGoroutine() {
+ stk, ok := checkNumGoroutine("second", 2+baseGoroutines)
+ if !ok {
+ return
+ }
+ if !strings.Contains(stk, "CallbackNumGoroutine") {
+ fmt.Printf("missing CallbackNumGoroutine from stack:\n%s\n", stk)
+ return
+ }
+
+ callbackok = true
+}
diff --git a/libgo/go/runtime/testdata/testprognet/signalexec.go b/libgo/go/runtime/testdata/testprognet/signalexec.go
new file mode 100644
index 00000000000..4a988ef6c1d
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprognet/signalexec.go
@@ -0,0 +1,70 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+// This is in testprognet instead of testprog because testprog
+// must not import anything (like net, but also like os/signal)
+// that kicks off background goroutines during init.
+
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+)
+
+func init() {
+ register("SignalDuringExec", SignalDuringExec)
+ register("Nop", Nop)
+}
+
+func SignalDuringExec() {
+ pgrp := syscall.Getpgrp()
+
+ const tries = 10
+
+ var wg sync.WaitGroup
+ c := make(chan os.Signal, tries)
+ signal.Notify(c, syscall.SIGWINCH)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for range c {
+ }
+ }()
+
+ for i := 0; i < tries; i++ {
+ time.Sleep(time.Microsecond)
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ cmd := exec.Command(os.Args[0], "Nop")
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ fmt.Printf("Start failed: %v", err)
+ }
+ }()
+ go func() {
+ defer wg.Done()
+ syscall.Kill(-pgrp, syscall.SIGWINCH)
+ }()
+ }
+
+ signal.Stop(c)
+ close(c)
+ wg.Wait()
+
+ fmt.Println("OK")
+}
+
+func Nop() {
+ // This is just for SignalDuringExec.
+}
diff --git a/libgo/go/runtime/time.go b/libgo/go/runtime/time.go
index cc167a8bcfe..f204830a6f7 100644
--- a/libgo/go/runtime/time.go
+++ b/libgo/go/runtime/time.go
@@ -31,6 +31,7 @@ var timers struct {
created bool
sleeping bool
rescheduling bool
+ sleepUntil int64
waitnote note
t []*timer
}
@@ -50,7 +51,12 @@ func timeSleep(ns int64) {
return
}
- t := new(timer)
+ t := getg().timer
+ if t == nil {
+ t = new(timer)
+ getg().timer = t
+ }
+ *t = timer{}
t.when = nanotime() + ns
t.f = goroutineReady
t.arg = getg()
@@ -207,6 +213,7 @@ func timerproc() {
}
// At least one timer pending. Sleep until then.
timers.sleeping = true
+ timers.sleepUntil = now + delta
noteclear(&timers.waitnote)
unlock(&timers.lock)
notetsleepg(&timers.waitnote, delta)
@@ -295,8 +302,8 @@ func siftdownTimer(i int) {
// Entry points for net, time to call nanotime.
-//go:linkname net_runtimeNano net.runtimeNano
-func net_runtimeNano() int64 {
+//go:linkname poll_runtimeNano internal_poll.runtimeNano
+func poll_runtimeNano() int64 {
return nanotime()
}
@@ -304,3 +311,5 @@ func net_runtimeNano() int64 {
func time_runtimeNano() int64 {
return nanotime()
}
+
+var startNano int64 = nanotime()
diff --git a/libgo/go/runtime/timeasm.go b/libgo/go/runtime/timeasm.go
new file mode 100644
index 00000000000..d5f5ea33a30
--- /dev/null
+++ b/libgo/go/runtime/timeasm.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Declarations for operating systems implementing time.now directly in assembly.
+// Those systems are also expected to have nanotime subtract startNano,
+// so that time.now and nanotime return the same monotonic clock readings.
+
+// +build ignore
+// +build darwin,amd64 darwin,386 windows
+
+package runtime
+
+import _ "unsafe"
+
+//go:linkname time_now time.now
+func time_now() (sec int64, nsec int32, mono int64)
diff --git a/libgo/go/runtime/timestub.go b/libgo/go/runtime/timestub.go
new file mode 100644
index 00000000000..033734ed92f
--- /dev/null
+++ b/libgo/go/runtime/timestub.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Declarations for operating systems implementing time.now
+// indirectly, in terms of walltime and nanotime assembly.
+
+// -build !darwin !amd64,!386
+// -build !windows
+
+package runtime
+
+import _ "unsafe" // for go:linkname
+
+func walltime() (sec int64, nsec int32)
+
+//go:linkname time_now time.now
+func time_now() (sec int64, nsec int32, mono int64) {
+ sec, nsec = walltime()
+ return sec, nsec, nanotime() - startNano
+}
diff --git a/libgo/go/runtime/trace.go b/libgo/go/runtime/trace.go
index 61cfa8e751c..af9313be37a 100644
--- a/libgo/go/runtime/trace.go
+++ b/libgo/go/runtime/trace.go
@@ -19,50 +19,52 @@ import (
// Event types in the trace, args are given in square brackets.
const (
- traceEvNone = 0 // unused
- traceEvBatch = 1 // start of per-P batch of events [pid, timestamp]
- traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
- traceEvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
- traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
- traceEvProcStart = 5 // start of P [timestamp, thread id]
- traceEvProcStop = 6 // stop of P [timestamp]
- traceEvGCStart = 7 // GC start [timestamp, seq, stack id]
- traceEvGCDone = 8 // GC done [timestamp]
- traceEvGCScanStart = 9 // GC mark termination start [timestamp]
- traceEvGCScanDone = 10 // GC mark termination done [timestamp]
- traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
- traceEvGCSweepDone = 12 // GC sweep done [timestamp]
- traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
- traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq]
- traceEvGoEnd = 15 // goroutine ends [timestamp]
- traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
- traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
- traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
- traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
- traceEvGoBlock = 20 // goroutine blocks [timestamp, stack]
- traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
- traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
- traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
- traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
- traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
- traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
- traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
- traceEvGoSysCall = 28 // syscall enter [timestamp, stack]
- traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
- traceEvGoSysBlock = 30 // syscall blocks [timestamp]
- traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
- traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
- traceEvHeapAlloc = 33 // memstats.heap_live change [timestamp, heap_alloc]
- traceEvNextGC = 34 // memstats.next_gc change [timestamp, next_gc]
- traceEvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
- traceEvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
- traceEvString = 37 // string dictionary entry [ID, length, string]
- traceEvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
- traceEvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
- traceEvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
- traceEvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
- traceEvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack]
- traceEvCount = 43
+ traceEvNone = 0 // unused
+ traceEvBatch = 1 // start of per-P batch of events [pid, timestamp]
+ traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
+ traceEvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
+ traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
+ traceEvProcStart = 5 // start of P [timestamp, thread id]
+ traceEvProcStop = 6 // stop of P [timestamp]
+ traceEvGCStart = 7 // GC start [timestamp, seq, stack id]
+ traceEvGCDone = 8 // GC done [timestamp]
+ traceEvGCScanStart = 9 // GC mark termination start [timestamp]
+ traceEvGCScanDone = 10 // GC mark termination done [timestamp]
+ traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
+ traceEvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed]
+ traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
+ traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq]
+ traceEvGoEnd = 15 // goroutine ends [timestamp]
+ traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
+ traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
+ traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
+ traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
+ traceEvGoBlock = 20 // goroutine blocks [timestamp, stack]
+ traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
+ traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
+ traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
+ traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
+ traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
+ traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
+ traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
+ traceEvGoSysCall = 28 // syscall enter [timestamp, stack]
+ traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
+ traceEvGoSysBlock = 30 // syscall blocks [timestamp]
+ traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
+ traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
+ traceEvHeapAlloc = 33 // memstats.heap_live change [timestamp, heap_alloc]
+ traceEvNextGC = 34 // memstats.next_gc change [timestamp, next_gc]
+ traceEvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
+ traceEvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
+ traceEvString = 37 // string dictionary entry [ID, length, string]
+ traceEvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
+ traceEvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
+ traceEvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
+ traceEvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
+ traceEvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack]
+ traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
+ traceEvGCMarkAssistDone = 44 // GC mark assist done [timestamp]
+ traceEvCount = 45
)
const (
@@ -311,7 +313,7 @@ func StopTrace() {
// The world is started but we've set trace.shutdown, so new tracing can't start.
// Wait for the trace reader to flush pending buffers and stop.
- semacquire(&trace.shutdownSema, 0)
+ semacquire(&trace.shutdownSema)
if raceenabled {
raceacquire(unsafe.Pointer(&trace.shutdownSema))
}
@@ -380,7 +382,7 @@ func ReadTrace() []byte {
trace.headerWritten = true
trace.lockOwner = nil
unlock(&trace.lock)
- return []byte("go 1.8 trace\x00\x00\x00\x00")
+ return []byte("go 1.9 trace\x00\x00\x00\x00")
}
// Wait for new data.
if trace.fullHead == 0 && !trace.shutdown {
@@ -776,11 +778,12 @@ func (tab *traceStackTable) dump() {
for ; stk != nil; stk = stk.link.ptr() {
tmpbuf := tmp[:0]
tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
- tmpbuf = traceAppend(tmpbuf, uint64(stk.n))
- for _, pc := range stk.stack() {
+ frames := stk.stack()
+ tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
+ for _, f := range frames {
var frame traceFrame
- frame, buf = traceFrameForPC(buf, pc)
- tmpbuf = traceAppend(tmpbuf, uint64(pc.pc))
+ frame, buf = traceFrameForPC(buf, f)
+ tmpbuf = traceAppend(tmpbuf, uint64(f.pc))
tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
@@ -810,16 +813,17 @@ type traceFrame struct {
line uint64
}
-func traceFrameForPC(buf *traceBuf, loc location) (traceFrame, *traceBuf) {
+func traceFrameForPC(buf *traceBuf, f location) (traceFrame, *traceBuf) {
var frame traceFrame
- fn := loc.function
+
+ fn := f.function
const maxLen = 1 << 10
if len(fn) > maxLen {
fn = fn[len(fn)-maxLen:]
}
frame.funcID, buf = traceString(buf, fn)
- file, line := loc.filename, loc.lineno
- frame.line = uint64(line)
+ frame.line = uint64(f.lineno)
+ file := f.filename
if len(file) > maxLen {
file = file[len(file)-maxLen:]
}
@@ -921,12 +925,52 @@ func traceGCScanDone() {
traceEvent(traceEvGCScanDone, -1)
}
+// traceGCSweepStart prepares to trace a sweep loop. This does not
+// emit any events until traceGCSweepSpan is called.
+//
+// traceGCSweepStart must be paired with traceGCSweepDone and there
+// must be no preemption points between these two calls.
func traceGCSweepStart() {
- traceEvent(traceEvGCSweepStart, 1)
+ // Delay the actual GCSweepStart event until the first span
+ // sweep. If we don't sweep anything, don't emit any events.
+ _p_ := getg().m.p.ptr()
+ if _p_.traceSweep {
+ throw("double traceGCSweepStart")
+ }
+ _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
+}
+
+// traceGCSweepSpan traces the sweep of a single page.
+//
+// This may be called outside a traceGCSweepStart/traceGCSweepDone
+// pair; however, it will not emit any trace events in this case.
+func traceGCSweepSpan(bytesSwept uintptr) {
+ _p_ := getg().m.p.ptr()
+ if _p_.traceSweep {
+ if _p_.traceSwept == 0 {
+ traceEvent(traceEvGCSweepStart, 1)
+ }
+ _p_.traceSwept += bytesSwept
+ }
}
func traceGCSweepDone() {
- traceEvent(traceEvGCSweepDone, -1)
+ _p_ := getg().m.p.ptr()
+ if !_p_.traceSweep {
+ throw("missing traceGCSweepStart")
+ }
+ if _p_.traceSwept != 0 {
+ traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
+ }
+ _p_.traceSweep = false
+}
+
+func traceGCMarkAssistStart() {
+ traceEvent(traceEvGCMarkAssistStart, 1)
+}
+
+func traceGCMarkAssistDone() {
+ traceEvent(traceEvGCMarkAssistDone, -1)
}
func traceGoCreate(newg *g, pc uintptr) {
@@ -967,7 +1011,7 @@ func traceGoPreempt() {
traceEvent(traceEvGoPreempt, 1)
}
-func traceGoPark(traceEv byte, skip int, gp *g) {
+func traceGoPark(traceEv byte, skip int) {
if traceEv&traceFutileWakeup != 0 {
traceEvent(traceEvFutileWakeup, -1)
}
diff --git a/libgo/go/runtime/trace/trace_stack_test.go b/libgo/go/runtime/trace/trace_stack_test.go
index c37b33de862..274cdf7800a 100644
--- a/libgo/go/runtime/trace/trace_stack_test.go
+++ b/libgo/go/runtime/trace/trace_stack_test.go
@@ -151,7 +151,7 @@ func TestTraceSymbolize(t *testing.T) {
{"testing.tRunner", 0},
}},
{trace.EvGoCreate, []frame{
- {"runtime/trace_test.TestTraceSymbolize", 39},
+ {"runtime/trace_test.TestTraceSymbolize", 37},
{"testing.tRunner", 0},
}},
{trace.EvGoStop, []frame{
@@ -231,6 +231,7 @@ func TestTraceSymbolize(t *testing.T) {
if runtime.GOOS != "windows" && runtime.GOOS != "plan9" {
want = append(want, []eventDesc{
{trace.EvGoBlockNet, []frame{
+ {"internal/poll.(*FD).Accept", 0},
{"net.(*netFD).accept", 0},
{"net.(*TCPListener).accept", 0},
{"net.(*TCPListener).Accept", 0},
@@ -239,6 +240,7 @@ func TestTraceSymbolize(t *testing.T) {
{trace.EvGoSysCall, []frame{
{"syscall.read", 0},
{"syscall.Read", 0},
+ {"internal/poll.(*FD).Read", 0},
{"os.(*File).read", 0},
{"os.(*File).Read", 0},
{"runtime/trace_test.TestTraceSymbolize.func11", 102},
@@ -274,9 +276,10 @@ func TestTraceSymbolize(t *testing.T) {
continue
}
for _, f := range ev.Stk {
- t.Logf(" %v:%v", f.Fn, f.Line)
+ t.Logf(" %v :: %s:%v", f.Fn, f.File, f.Line)
}
t.Logf("---")
}
+ t.Logf("======")
}
}
diff --git a/libgo/go/runtime/traceback_gccgo.go b/libgo/go/runtime/traceback_gccgo.go
index 715772edf92..f29ccd7b564 100644
--- a/libgo/go/runtime/traceback_gccgo.go
+++ b/libgo/go/runtime/traceback_gccgo.go
@@ -12,14 +12,6 @@ import (
_ "unsafe" // for go:linkname
)
-// For gccgo, use go:linkname to rename compiler-called functions to
-// themselves, so that the compiler will export them.
-// These are temporary for C runtime code to call.
-//go:linkname traceback runtime.traceback
-//go:linkname printtrace runtime.printtrace
-//go:linkname goroutineheader runtime.goroutineheader
-//go:linkname printcreatedby runtime.printcreatedby
-
func printcreatedby(gp *g) {
// Show what created goroutine, except main goroutine (goid 1).
pc := gp.gopc
@@ -71,6 +63,7 @@ func traceback(skip int32) {
var locbuf [100]location
c := c_callers(skip+1, &locbuf[0], int32(len(locbuf)), false)
printtrace(locbuf[:c], getg())
+ printcreatedby(getg())
}
// printtrace prints a traceback from locbuf.
@@ -223,7 +216,7 @@ func tracebackothers(me *g) {
print("\tgoroutine running on other thread; stack unavailable\n")
printcreatedby(gp)
} else if readgstatus(gp)&^_Gscan == _Gsyscall {
- print("\tgoroutine in C code; stack unavailable\n")
+ print("\tin C code; stack unavailable\n")
printcreatedby(gp)
} else {
gp.traceback = &tb
diff --git a/libgo/go/runtime/write_err_android.go b/libgo/go/runtime/write_err_android.go
index 748dec644c6..bf99b5f6c5b 100644
--- a/libgo/go/runtime/write_err_android.go
+++ b/libgo/go/runtime/write_err_android.go
@@ -144,7 +144,7 @@ func writeLogdHeader() int {
// hdr[3:7] sec unsigned uint32, little endian.
// hdr[7:11] nsec unsigned uint32, little endian.
hdr[0] = 0 // LOG_ID_MAIN
- sec, nsec := time_now()
+ sec, nsec := walltime()
packUint32(hdr[3:7], uint32(sec))
packUint32(hdr[7:11], uint32(nsec))
diff --git a/libgo/go/sort/example_test.go b/libgo/go/sort/example_test.go
index 980c0d03680..f8d8491bc4c 100644
--- a/libgo/go/sort/example_test.go
+++ b/libgo/go/sort/example_test.go
@@ -41,3 +41,38 @@ func ExampleSlice() {
// Output: By name: [{Alice 55} {Bob 75} {Gopher 7} {Vera 24}]
// By age: [{Gopher 7} {Vera 24} {Alice 55} {Bob 75}]
}
+
+func ExampleSliceStable() {
+
+ people := []struct {
+ Name string
+ Age int
+ }{
+ {"Alice", 25},
+ {"Elizabeth", 75},
+ {"Alice", 75},
+ {"Bob", 75},
+ {"Alice", 75},
+ {"Bob", 25},
+ {"Colin", 25},
+ {"Elizabeth", 25},
+ }
+
+ // Sort by name, preserving original order
+ sort.SliceStable(people, func(i, j int) bool { return people[i].Name < people[j].Name })
+ fmt.Println("By name:", people)
+
+ // Sort by age preserving name order
+ sort.SliceStable(people, func(i, j int) bool { return people[i].Age < people[j].Age })
+ fmt.Println("By age,name:", people)
+
+ // Output: By name: [{Alice 25} {Alice 75} {Alice 75} {Bob 75} {Bob 25} {Colin 25} {Elizabeth 75} {Elizabeth 25}]
+ // By age,name: [{Alice 25} {Bob 25} {Colin 25} {Elizabeth 25} {Alice 75} {Alice 75} {Bob 75} {Elizabeth 75}]
+}
+
+func ExampleStrings() {
+ s := []string{"Go", "Bravo", "Gopher", "Alpha", "Grin", "Delta"}
+ sort.Strings(s)
+ fmt.Println(s)
+ // Output: [Alpha Bravo Delta Go Gopher Grin]
+}
diff --git a/libgo/go/sort/genzfunc.go b/libgo/go/sort/genzfunc.go
index 6d2b471b62e..3bb7691f6a8 100644
--- a/libgo/go/sort/genzfunc.go
+++ b/libgo/go/sort/genzfunc.go
@@ -115,6 +115,10 @@ func rewriteCall(ce *ast.CallExpr) {
// e.g. skip SelectorExpr (data.Less(..) calls)
return
}
+ // skip casts
+ if ident.Name == "int" || ident.Name == "uint" {
+ return
+ }
if len(ce.Args) < 1 {
return
}
diff --git a/libgo/go/sort/search.go b/libgo/go/sort/search.go
index b9640a40aff..fcff0f9491b 100644
--- a/libgo/go/sort/search.go
+++ b/libgo/go/sort/search.go
@@ -24,7 +24,7 @@ package sort
//
// For instance, given a slice data sorted in ascending order,
// the call Search(len(data), func(i int) bool { return data[i] >= 23 })
-// returns the smallest index i such that data[i] >= 23. If the caller
+// returns the smallest index i such that data[i] >= 23. If the caller
// wants to find whether 23 is in the slice, it must test data[i] == 23
// separately.
//
@@ -61,7 +61,7 @@ func Search(n int, f func(int) bool) int {
// Invariant: f(i-1) == false, f(j) == true.
i, j := 0, n
for i < j {
- h := i + (j-i)/2 // avoid overflow when computing h
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ⤠h < j
if !f(h) {
i = h + 1 // preserves f(i-1) == false
diff --git a/libgo/go/sort/sort.go b/libgo/go/sort/sort.go
index 72d24efceab..081b7007989 100644
--- a/libgo/go/sort/sort.go
+++ b/libgo/go/sort/sort.go
@@ -96,7 +96,7 @@ func swapRange(data Interface, a, b, n int) {
}
func doPivot(data Interface, lo, hi int) (midlo, midhi int) {
- m := lo + (hi-lo)/2 // Written like this to avoid integer overflow.
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
if hi-lo > 40 {
// Tukey's ``Ninther,'' median of three medians of three.
s := (hi - lo) / 8
@@ -314,7 +314,8 @@ func (p IntSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Sort is a convenience method.
func (p IntSlice) Sort() { Sort(p) }
-// Float64Slice attaches the methods of Interface to []float64, sorting in increasing order.
+// Float64Slice attaches the methods of Interface to []float64, sorting in increasing order
+// (not-a-number values are treated as less than other values).
type Float64Slice []float64
func (p Float64Slice) Len() int { return len(p) }
@@ -344,7 +345,8 @@ func (p StringSlice) Sort() { Sort(p) }
// Ints sorts a slice of ints in increasing order.
func Ints(a []int) { Sort(IntSlice(a)) }
-// Float64s sorts a slice of float64s in increasing order.
+// Float64s sorts a slice of float64s in increasing order
+// (not-a-number values are treated as less than other values).
func Float64s(a []float64) { Sort(Float64Slice(a)) }
// Strings sorts a slice of strings in increasing order.
@@ -353,7 +355,8 @@ func Strings(a []string) { Sort(StringSlice(a)) }
// IntsAreSorted tests whether a slice of ints is sorted in increasing order.
func IntsAreSorted(a []int) bool { return IsSorted(IntSlice(a)) }
-// Float64sAreSorted tests whether a slice of float64s is sorted in increasing order.
+// Float64sAreSorted tests whether a slice of float64s is sorted in increasing order
+// (not-a-number values are treated as less than other values).
func Float64sAreSorted(a []float64) bool { return IsSorted(Float64Slice(a)) }
// StringsAreSorted tests whether a slice of strings is sorted in increasing order.
@@ -447,7 +450,7 @@ func symMerge(data Interface, a, m, b int) {
i := m
j := b
for i < j {
- h := i + (j-i)/2
+ h := int(uint(i+j) >> 1)
if data.Less(h, a) {
i = h + 1
} else {
@@ -471,7 +474,7 @@ func symMerge(data Interface, a, m, b int) {
i := a
j := m
for i < j {
- h := i + (j-i)/2
+ h := int(uint(i+j) >> 1)
if !data.Less(m, h) {
i = h + 1
} else {
@@ -485,7 +488,7 @@ func symMerge(data Interface, a, m, b int) {
return
}
- mid := a + (b-a)/2
+ mid := int(uint(a+b) >> 1)
n := mid + m
var start, r int
if m > mid {
@@ -498,7 +501,7 @@ func symMerge(data Interface, a, m, b int) {
p := n - 1
for start < r {
- c := start + (r-start)/2
+ c := int(uint(start+r) >> 1)
if !data.Less(p-c, c) {
start = c + 1
} else {
diff --git a/libgo/go/sort/zfuncversion.go b/libgo/go/sort/zfuncversion.go
index 7abb18a24d5..99c95a22c1f 100644
--- a/libgo/go/sort/zfuncversion.go
+++ b/libgo/go/sort/zfuncversion.go
@@ -70,7 +70,7 @@ func swapRange_func(data lessSwap, a, b, n int) {
// Auto-generated variant of sort.go:doPivot
func doPivot_func(data lessSwap, lo, hi int) (midlo, midhi int) {
- m := lo + (hi-lo)/2
+ m := int(uint(lo+hi) >> 1)
if hi-lo > 40 {
s := (hi - lo) / 8
medianOfThree_func(data, lo, lo+s, lo+2*s)
@@ -189,7 +189,7 @@ func symMerge_func(data lessSwap, a, m, b int) {
i := m
j := b
for i < j {
- h := i + (j-i)/2
+ h := int(uint(i+j) >> 1)
if data.Less(h, a) {
i = h + 1
} else {
@@ -205,7 +205,7 @@ func symMerge_func(data lessSwap, a, m, b int) {
i := a
j := m
for i < j {
- h := i + (j-i)/2
+ h := int(uint(i+j) >> 1)
if !data.Less(m, h) {
i = h + 1
} else {
@@ -217,7 +217,7 @@ func symMerge_func(data lessSwap, a, m, b int) {
}
return
}
- mid := a + (b-a)/2
+ mid := int(uint(a+b) >> 1)
n := mid + m
var start, r int
if m > mid {
@@ -229,7 +229,7 @@ func symMerge_func(data lessSwap, a, m, b int) {
}
p := n - 1
for start < r {
- c := start + (r-start)/2
+ c := int(uint(start+r) >> 1)
if !data.Less(p-c, c) {
start = c + 1
} else {
diff --git a/libgo/go/strconv/atof_test.go b/libgo/go/strconv/atof_test.go
index 0a89c3e0bfa..3380b200884 100644
--- a/libgo/go/strconv/atof_test.go
+++ b/libgo/go/strconv/atof_test.go
@@ -10,6 +10,7 @@ import (
"reflect"
. "strconv"
"strings"
+ "sync"
"testing"
"time"
)
@@ -213,12 +214,17 @@ type atofSimpleTest struct {
}
var (
+ atofOnce sync.Once
atofRandomTests []atofSimpleTest
benchmarksRandomBits [1024]string
benchmarksRandomNormal [1024]string
)
-func init() {
+func initAtof() {
+ atofOnce.Do(initAtofOnce)
+}
+
+func initAtofOnce() {
// The atof routines return NumErrors wrapping
// the error and the string. Convert the table above.
for i := range atoftests {
@@ -261,6 +267,7 @@ func init() {
}
func testAtof(t *testing.T, opt bool) {
+ initAtof()
oldopt := SetOptimize(opt)
for i := 0; i < len(atoftests); i++ {
test := &atoftests[i]
@@ -306,6 +313,7 @@ func TestAtof(t *testing.T) { testAtof(t, true) }
func TestAtofSlow(t *testing.T) { testAtof(t, false) }
func TestAtofRandom(t *testing.T) {
+ initAtof()
for _, test := range atofRandomTests {
x, _ := ParseFloat(test.s, 64)
switch {
diff --git a/libgo/go/strconv/decimal.go b/libgo/go/strconv/decimal.go
index 957acd98918..b58001888e8 100644
--- a/libgo/go/strconv/decimal.go
+++ b/libgo/go/strconv/decimal.go
@@ -15,8 +15,8 @@ type decimal struct {
d [800]byte // digits, big-endian representation
nd int // number of digits used
dp int // decimal point
- neg bool
- trunc bool // discarded nonzero digits beyond d[:nd]
+ neg bool // negative flag
+ trunc bool // discarded nonzero digits beyond d[:nd]
}
func (a *decimal) String() string {
diff --git a/libgo/go/strconv/itoa.go b/libgo/go/strconv/itoa.go
index f50d8779408..78527c8ae66 100644
--- a/libgo/go/strconv/itoa.go
+++ b/libgo/go/strconv/itoa.go
@@ -4,10 +4,15 @@
package strconv
+const fastSmalls = true // enable fast path for small integers
+
// FormatUint returns the string representation of i in the given base,
// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z'
// for digit values >= 10.
func FormatUint(i uint64, base int) string {
+ if fastSmalls && i < nSmalls && base == 10 {
+ return small(int(i))
+ }
_, s := formatBits(nil, i, base, false, false)
return s
}
@@ -16,6 +21,9 @@ func FormatUint(i uint64, base int) string {
// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z'
// for digit values >= 10.
func FormatInt(i int64, base int) string {
+ if fastSmalls && 0 <= i && i < nSmalls && base == 10 {
+ return small(int(i))
+ }
_, s := formatBits(nil, uint64(i), base, i < 0, false)
return s
}
@@ -28,6 +36,9 @@ func Itoa(i int) string {
// AppendInt appends the string form of the integer i,
// as generated by FormatInt, to dst and returns the extended buffer.
func AppendInt(dst []byte, i int64, base int) []byte {
+ if fastSmalls && 0 <= i && i < nSmalls && base == 10 {
+ return append(dst, small(int(i))...)
+ }
dst, _ = formatBits(dst, uint64(i), base, i < 0, true)
return dst
}
@@ -35,13 +46,38 @@ func AppendInt(dst []byte, i int64, base int) []byte {
// AppendUint appends the string form of the unsigned integer i,
// as generated by FormatUint, to dst and returns the extended buffer.
func AppendUint(dst []byte, i uint64, base int) []byte {
+ if fastSmalls && i < nSmalls && base == 10 {
+ return append(dst, small(int(i))...)
+ }
dst, _ = formatBits(dst, i, base, false, true)
return dst
}
-const (
- digits = "0123456789abcdefghijklmnopqrstuvwxyz"
-)
+// small returns the string for an i with 0 <= i < nSmalls.
+func small(i int) string {
+ off := 0
+ if i < 10 {
+ off = 1
+ }
+ return smallsString[i*2+off : i*2+2]
+}
+
+const nSmalls = 100
+
+const smallsString = "00010203040506070809" +
+ "10111213141516171819" +
+ "20212223242526272829" +
+ "30313233343536373839" +
+ "40414243444546474849" +
+ "50515253545556575859" +
+ "60616263646566676869" +
+ "70717273747576777879" +
+ "80818283848586878889" +
+ "90919293949596979899"
+
+const host32bit = ^uint(0)>>32 == 0
+
+const digits = "0123456789abcdefghijklmnopqrstuvwxyz"
var shifts = [len(digits) + 1]uint{
1 << 1: 1,
@@ -71,61 +107,84 @@ func formatBits(dst []byte, u uint64, base int, neg, append_ bool) (d []byte, s
}
// convert bits
+ // We use uint values where we can because those will
+ // fit into a single register even on a 32bit machine.
if base == 10 {
// common case: use constants for / because
// the compiler can optimize it into a multiply+shift
- if ^uintptr(0)>>32 == 0 {
- for u > uint64(^uintptr(0)) {
+ if host32bit {
+ // convert the lower digits using 32bit operations
+ for u >= 1e9 {
+ // Avoid using r = a%b in addition to q = a/b
+ // since 64bit division and modulo operations
+ // are calculated by runtime functions on 32bit machines.
q := u / 1e9
- us := uintptr(u - q*1e9) // us % 1e9 fits into a uintptr
- for j := 9; j > 0; j-- {
- i--
- qs := us / 10
- a[i] = byte(us - qs*10 + '0')
- us = qs
+ us := uint(u - q*1e9) // u % 1e9 fits into a uint
+ for j := 4; j > 0; j-- {
+ is := us % 100 * 2
+ us /= 100
+ i -= 2
+ a[i+1] = smallsString[is+1]
+ a[i+0] = smallsString[is+0]
}
+
+ // us < 10, since it contains the last digit
+ // from the initial 9-digit us.
+ i--
+ a[i] = smallsString[us*2+1]
+
u = q
}
+ // u < 1e9
}
- // u guaranteed to fit into a uintptr
- us := uintptr(u)
- for us >= 10 {
- i--
- q := us / 10
- a[i] = byte(us - q*10 + '0')
- us = q
+ // u guaranteed to fit into a uint
+ us := uint(u)
+ for us >= 100 {
+ is := us % 100 * 2
+ us /= 100
+ i -= 2
+ a[i+1] = smallsString[is+1]
+ a[i+0] = smallsString[is+0]
}
- // u < 10
+
+ // us < 100
+ is := us * 2
i--
- a[i] = byte(us + '0')
+ a[i] = smallsString[is+1]
+ if us >= 10 {
+ i--
+ a[i] = smallsString[is]
+ }
} else if s := shifts[base]; s > 0 {
// base is power of 2: use shifts and masks instead of / and %
b := uint64(base)
- m := uintptr(b) - 1 // == 1<= b {
i--
- a[i] = digits[uintptr(u)&m]
+ a[i] = digits[uint(u)&m]
u >>= s
}
// u < base
i--
- a[i] = digits[uintptr(u)]
-
+ a[i] = digits[uint(u)]
} else {
// general case
b := uint64(base)
for u >= b {
i--
+ // Avoid using r = a%b in addition to q = a/b
+ // since 64bit division and modulo operations
+ // are calculated by runtime functions on 32bit machines.
q := u / b
- a[i] = digits[uintptr(u-q*b)]
+ a[i] = digits[uint(u-q*b)]
u = q
}
// u < base
i--
- a[i] = digits[uintptr(u)]
+ a[i] = digits[uint(u)]
}
// add sign, if any
diff --git a/libgo/go/strconv/itoa_test.go b/libgo/go/strconv/itoa_test.go
index 48dc03e8390..89c2de6941a 100644
--- a/libgo/go/strconv/itoa_test.go
+++ b/libgo/go/strconv/itoa_test.go
@@ -126,10 +126,46 @@ func TestUitoa(t *testing.T) {
}
}
+var varlenUints = []struct {
+ in uint64
+ out string
+}{
+ {1, "1"},
+ {12, "12"},
+ {123, "123"},
+ {1234, "1234"},
+ {12345, "12345"},
+ {123456, "123456"},
+ {1234567, "1234567"},
+ {12345678, "12345678"},
+ {123456789, "123456789"},
+ {1234567890, "1234567890"},
+ {12345678901, "12345678901"},
+ {123456789012, "123456789012"},
+ {1234567890123, "1234567890123"},
+ {12345678901234, "12345678901234"},
+ {123456789012345, "123456789012345"},
+ {1234567890123456, "1234567890123456"},
+ {12345678901234567, "12345678901234567"},
+ {123456789012345678, "123456789012345678"},
+ {1234567890123456789, "1234567890123456789"},
+ {12345678901234567890, "12345678901234567890"},
+}
+
+func TestFormatUintVarlen(t *testing.T) {
+ for _, test := range varlenUints {
+ s := FormatUint(test.in, 10)
+ if s != test.out {
+ t.Errorf("FormatUint(%v, 10) = %v want %v", test.in, s, test.out)
+ }
+ }
+}
+
func BenchmarkFormatInt(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, test := range itob64tests {
- FormatInt(test.in, test.base)
+ s := FormatInt(test.in, test.base)
+ BenchSink += len(s)
}
}
}
@@ -138,7 +174,8 @@ func BenchmarkAppendInt(b *testing.B) {
dst := make([]byte, 0, 30)
for i := 0; i < b.N; i++ {
for _, test := range itob64tests {
- AppendInt(dst, test.in, test.base)
+ dst = AppendInt(dst[:0], test.in, test.base)
+ BenchSink += len(dst)
}
}
}
@@ -146,7 +183,8 @@ func BenchmarkAppendInt(b *testing.B) {
func BenchmarkFormatUint(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, test := range uitob64tests {
- FormatUint(test.in, test.base)
+ s := FormatUint(test.in, test.base)
+ BenchSink += len(s)
}
}
}
@@ -155,7 +193,39 @@ func BenchmarkAppendUint(b *testing.B) {
dst := make([]byte, 0, 30)
for i := 0; i < b.N; i++ {
for _, test := range uitob64tests {
- AppendUint(dst, test.in, test.base)
+ dst = AppendUint(dst[:0], test.in, test.base)
+ BenchSink += len(dst)
}
}
}
+
+func BenchmarkFormatIntSmall(b *testing.B) {
+ const smallInt = 42
+ for i := 0; i < b.N; i++ {
+ s := FormatInt(smallInt, 10)
+ BenchSink += len(s)
+ }
+}
+
+func BenchmarkAppendIntSmall(b *testing.B) {
+ dst := make([]byte, 0, 30)
+ const smallInt = 42
+ for i := 0; i < b.N; i++ {
+ dst = AppendInt(dst[:0], smallInt, 10)
+ BenchSink += len(dst)
+ }
+}
+
+func BenchmarkAppendUintVarlen(b *testing.B) {
+ for _, test := range varlenUints {
+ b.Run(test.out, func(b *testing.B) {
+ dst := make([]byte, 0, 30)
+ for j := 0; j < b.N; j++ {
+ dst = AppendUint(dst[:0], test.in, 10)
+ BenchSink += len(dst)
+ }
+ })
+ }
+}
+
+var BenchSink int // make sure compiler cannot optimize away benchmarks
diff --git a/libgo/go/strconv/quote.go b/libgo/go/strconv/quote.go
index 76c5c2a1cbb..db57065cac1 100644
--- a/libgo/go/strconv/quote.go
+++ b/libgo/go/strconv/quote.go
@@ -32,7 +32,7 @@ func appendQuotedWith(buf []byte, s string, quote byte, ASCIIonly, graphicOnly b
buf = append(buf, lowerhex[s[0]&0xF])
continue
}
- buf = appendEscapedRune(buf, r, width, quote, ASCIIonly, graphicOnly)
+ buf = appendEscapedRune(buf, r, quote, ASCIIonly, graphicOnly)
}
buf = append(buf, quote)
return buf
@@ -43,12 +43,12 @@ func appendQuotedRuneWith(buf []byte, r rune, quote byte, ASCIIonly, graphicOnly
if !utf8.ValidRune(r) {
r = utf8.RuneError
}
- buf = appendEscapedRune(buf, r, utf8.RuneLen(r), quote, ASCIIonly, graphicOnly)
+ buf = appendEscapedRune(buf, r, quote, ASCIIonly, graphicOnly)
buf = append(buf, quote)
return buf
}
-func appendEscapedRune(buf []byte, r rune, width int, quote byte, ASCIIonly, graphicOnly bool) []byte {
+func appendEscapedRune(buf []byte, r rune, quote byte, ASCIIonly, graphicOnly bool) []byte {
var runeTmp [utf8.UTFMax]byte
if r == rune(quote) || r == '\\' { // always backslashed
buf = append(buf, '\\')
diff --git a/libgo/go/strings/example_test.go b/libgo/go/strings/example_test.go
index 3f9d63b5a4a..e9621522ef2 100644
--- a/libgo/go/strings/example_test.go
+++ b/libgo/go/strings/example_test.go
@@ -23,6 +23,16 @@ func ExampleFieldsFunc() {
// Output: Fields are: ["foo1" "bar2" "baz3"]
}
+func ExampleCompare() {
+ fmt.Println(strings.Compare("a", "b"))
+ fmt.Println(strings.Compare("a", "a"))
+ fmt.Println(strings.Compare("b", "a"))
+ // Output:
+ // -1
+ // 0
+ // 1
+}
+
func ExampleContains() {
fmt.Println(strings.Contains("seafood", "foo"))
fmt.Println(strings.Contains("seafood", "bar"))
@@ -47,6 +57,16 @@ func ExampleContainsAny() {
// false
}
+func ExampleContainsRune() {
+ // Finds whether a string contains a particular Unicode code point.
+ // The code point for the lowercase letter "a", for example, is 97.
+ fmt.Println(strings.ContainsRune("aardvark", 97))
+ fmt.Println(strings.ContainsRune("timeout", 97))
+ // Output:
+ // true
+ // false
+}
+
func ExampleCount() {
fmt.Println(strings.Count("cheese", "e"))
fmt.Println(strings.Count("five", "")) // before & after each rune
@@ -109,6 +129,15 @@ func ExampleIndexAny() {
// -1
}
+func ExampleIndexByte() {
+ fmt.Println(strings.IndexByte("golang", 'g'))
+ fmt.Println(strings.IndexByte("gophers", 'h'))
+ fmt.Println(strings.IndexByte("golang", 'x'))
+ // Output:
+ // 0
+ // 3
+ // -1
+}
func ExampleIndexRune() {
fmt.Println(strings.IndexRune("chicken", 'k'))
fmt.Println(strings.IndexRune("chicken", 'd'))
@@ -127,6 +156,16 @@ func ExampleLastIndex() {
// -1
}
+func ExampleLastIndexAny() {
+ fmt.Println(strings.LastIndexAny("go gopher", "go"))
+ fmt.Println(strings.LastIndexAny("go gopher", "rodent"))
+ fmt.Println(strings.LastIndexAny("go gopher", "fail"))
+ // Output:
+ // 4
+ // 8
+ // -1
+}
+
func ExampleJoin() {
s := []string{"foo", "bar", "baz"}
fmt.Println(strings.Join(s, ", "))
@@ -195,6 +234,14 @@ func ExampleTrim() {
// Output: ["Achtung! Achtung"]
}
+func ExampleTrimFunc() {
+ f := func(c rune) bool {
+ return !unicode.IsLetter(c) && !unicode.IsNumber(c)
+ }
+ fmt.Printf("[%q]", strings.TrimFunc(" Achtung1! Achtung2,...", f))
+ // Output: ["Achtung1! Achtung2"]
+}
+
func ExampleMap() {
rot13 := func(r rune) rune {
switch {
diff --git a/libgo/go/strings/replace_test.go b/libgo/go/strings/replace_test.go
index 77e48b988bc..34b5badfadf 100644
--- a/libgo/go/strings/replace_test.go
+++ b/libgo/go/strings/replace_test.go
@@ -540,3 +540,44 @@ func BenchmarkByteByteMap(b *testing.B) {
Map(fn, str)
}
}
+
+var mapdata = []struct{ name, data string }{
+ {"ASCII", "a b c d e f g h i j k l m n o p q r s t u v w x y z"},
+ {"Greek", "α β γ δ ε ζ η θ ι κ λ μ ν ξ ο Ï Ï Ï Ï Ï Ï
Ï Ï Ï Ï"},
+}
+
+func BenchmarkMap(b *testing.B) {
+ mapidentity := func(r rune) rune {
+ return r
+ }
+
+ b.Run("identity", func(b *testing.B) {
+ for _, md := range mapdata {
+ b.Run(md.name, func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Map(mapidentity, md.data)
+ }
+ })
+ }
+ })
+
+ mapchange := func(r rune) rune {
+ if 'a' <= r && r <= 'z' {
+ return r + 'A' - 'a'
+ }
+ if 'α' <= r && r <= 'Ï' {
+ return r + 'Î' - 'α'
+ }
+ return r
+ }
+
+ b.Run("change", func(b *testing.B) {
+ for _, md := range mapdata {
+ b.Run(md.name, func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Map(mapchange, md.data)
+ }
+ })
+ }
+ })
+}
diff --git a/libgo/go/strings/strings.go b/libgo/go/strings/strings.go
index 60a281a6ac5..0c836c09d46 100644
--- a/libgo/go/strings/strings.go
+++ b/libgo/go/strings/strings.go
@@ -72,22 +72,20 @@ func hashStrRev(sep string) (uint32, uint32) {
return hash, pow
}
-// Count counts the number of non-overlapping instances of sep in s.
-// If sep is an empty string, Count returns 1 + the number of Unicode code points in s.
-func Count(s, sep string) int {
- n := 0
- // special cases
- if len(sep) == 0 {
+// countGeneric implements Count.
+func countGeneric(s, substr string) int {
+ // special case
+ if len(substr) == 0 {
return utf8.RuneCountInString(s) + 1
}
- offset := 0
+ n := 0
for {
- i := Index(s[offset:], sep)
+ i := Index(s, substr)
if i == -1 {
return n
}
n++
- offset += i + len(sep)
+ s = s[i+len(substr):]
}
}
@@ -106,16 +104,16 @@ func ContainsRune(s string, r rune) bool {
return IndexRune(s, r) >= 0
}
-// LastIndex returns the index of the last instance of sep in s, or -1 if sep is not present in s.
-func LastIndex(s, sep string) int {
- n := len(sep)
+// LastIndex returns the index of the last instance of substr in s, or -1 if substr is not present in s.
+func LastIndex(s, substr string) int {
+ n := len(substr)
switch {
case n == 0:
return len(s)
case n == 1:
- return LastIndexByte(s, sep[0])
+ return LastIndexByte(s, substr[0])
case n == len(s):
- if sep == s {
+ if substr == s {
return 0
}
return -1
@@ -123,20 +121,20 @@ func LastIndex(s, sep string) int {
return -1
}
// Rabin-Karp search from the end of the string
- hashsep, pow := hashStrRev(sep)
+ hashss, pow := hashStrRev(substr)
last := len(s) - n
var h uint32
for i := len(s) - 1; i >= last; i-- {
h = h*primeRK + uint32(s[i])
}
- if h == hashsep && s[last:] == sep {
+ if h == hashss && s[last:] == substr {
return last
}
for i := last - 1; i >= 0; i-- {
h *= primeRK
h += uint32(s[i])
h -= pow * uint32(s[i+n])
- if h == hashsep && s[i:i+n] == sep {
+ if h == hashss && s[i:i+n] == substr {
return i
}
}
@@ -240,61 +238,187 @@ func genSplit(s, sep string, sepSave, n int) []string {
if n < 0 {
n = Count(s, sep) + 1
}
- c := sep[0]
- start := 0
+
a := make([]string, n)
- na := 0
- for i := 0; i+len(sep) <= len(s) && na+1 < n; i++ {
- if s[i] == c && (len(sep) == 1 || s[i:i+len(sep)] == sep) {
- a[na] = s[start : i+sepSave]
- na++
- start = i + len(sep)
- i += len(sep) - 1
- }
+ n--
+ i := 0
+ for i < n {
+ m := Index(s, sep)
+ if m < 0 {
+ break
+ }
+ a[i] = s[:m+sepSave]
+ s = s[m+len(sep):]
+ i++
}
- a[na] = s[start:]
- return a[0 : na+1]
+ a[i] = s
+ return a[:i+1]
}
// SplitN slices s into substrings separated by sep and returns a slice of
// the substrings between those separators.
-// If sep is empty, SplitN splits after each UTF-8 sequence.
+//
// The count determines the number of substrings to return:
// n > 0: at most n substrings; the last substring will be the unsplit remainder.
// n == 0: the result is nil (zero substrings)
// n < 0: all substrings
+//
+// Edge cases for s and sep (for example, empty strings) are handled
+// as described in the documentation for Split.
func SplitN(s, sep string, n int) []string { return genSplit(s, sep, 0, n) }
// SplitAfterN slices s into substrings after each instance of sep and
// returns a slice of those substrings.
-// If sep is empty, SplitAfterN splits after each UTF-8 sequence.
+//
// The count determines the number of substrings to return:
// n > 0: at most n substrings; the last substring will be the unsplit remainder.
// n == 0: the result is nil (zero substrings)
// n < 0: all substrings
+//
+// Edge cases for s and sep (for example, empty strings) are handled
+// as described in the documentation for SplitAfter.
func SplitAfterN(s, sep string, n int) []string {
return genSplit(s, sep, len(sep), n)
}
// Split slices s into all substrings separated by sep and returns a slice of
// the substrings between those separators.
-// If sep is empty, Split splits after each UTF-8 sequence.
+//
+// If s does not contain sep and sep is not empty, Split returns a
+// slice of length 1 whose only element is s.
+//
+// If sep is empty, Split splits after each UTF-8 sequence. If both s
+// and sep are empty, Split returns an empty slice.
+//
// It is equivalent to SplitN with a count of -1.
func Split(s, sep string) []string { return genSplit(s, sep, 0, -1) }
// SplitAfter slices s into all substrings after each instance of sep and
// returns a slice of those substrings.
-// If sep is empty, SplitAfter splits after each UTF-8 sequence.
+//
+// If s does not contain sep and sep is not empty, SplitAfter returns
+// a slice of length 1 whose only element is s.
+//
+// If sep is empty, SplitAfter splits after each UTF-8 sequence. If
+// both s and sep are empty, SplitAfter returns an empty slice.
+//
// It is equivalent to SplitAfterN with a count of -1.
func SplitAfter(s, sep string) []string {
return genSplit(s, sep, len(sep), -1)
}
+var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
+
// Fields splits the string s around each instance of one or more consecutive white space
// characters, as defined by unicode.IsSpace, returning an array of substrings of s or an
// empty list if s contains only white space.
func Fields(s string) []string {
- return FieldsFunc(s, unicode.IsSpace)
+ // First count the fields.
+ // This is an exact count if s is ASCII, otherwise it is an approximation.
+ n := 0
+ wasSpace := 1
+ // setBits is used to track which bits are set in the bytes of s.
+ setBits := uint8(0)
+ for i := 0; i < len(s); i++ {
+ r := s[i]
+ setBits |= r
+ isSpace := int(asciiSpace[r])
+ n += wasSpace & ^isSpace
+ wasSpace = isSpace
+ }
+
+ if setBits < utf8.RuneSelf { // ASCII fast path
+ a := make([]string, n)
+ na := 0
+ fieldStart := 0
+ i := 0
+ // Skip spaces in the front of the input.
+ for i < len(s) && asciiSpace[s[i]] != 0 {
+ i++
+ }
+ fieldStart = i
+ for i < len(s) {
+ if asciiSpace[s[i]] == 0 {
+ i++
+ continue
+ }
+ a[na] = s[fieldStart:i]
+ na++
+ i++
+ // Skip spaces in between fields.
+ for i < len(s) && asciiSpace[s[i]] != 0 {
+ i++
+ }
+ fieldStart = i
+ }
+ if fieldStart < len(s) { // Last field might end at EOF.
+ a[na] = s[fieldStart:]
+ }
+ return a
+ }
+
+ // Some runes in the input string are not ASCII.
+ // Same general approach as in the ASCII path but
+ // uses DecodeRuneInString and unicode.IsSpace if
+ // a non-ASCII rune needs to be decoded and checked
+ // if it corresponds to a space.
+ a := make([]string, 0, n)
+ fieldStart := 0
+ i := 0
+ // Skip spaces in the front of the input.
+ for i < len(s) {
+ if c := s[i]; c < utf8.RuneSelf {
+ if asciiSpace[c] == 0 {
+ break
+ }
+ i++
+ } else {
+ r, w := utf8.DecodeRuneInString(s[i:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ i += w
+ }
+ }
+ fieldStart = i
+ for i < len(s) {
+ if c := s[i]; c < utf8.RuneSelf {
+ if asciiSpace[c] == 0 {
+ i++
+ continue
+ }
+ a = append(a, s[fieldStart:i])
+ i++
+ } else {
+ r, w := utf8.DecodeRuneInString(s[i:])
+ if !unicode.IsSpace(r) {
+ i += w
+ continue
+ }
+ a = append(a, s[fieldStart:i])
+ i += w
+ }
+ // Skip spaces in between fields.
+ for i < len(s) {
+ if c := s[i]; c < utf8.RuneSelf {
+ if asciiSpace[c] == 0 {
+ break
+ }
+ i++
+ } else {
+ r, w := utf8.DecodeRuneInString(s[i:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ i += w
+ }
+ }
+ fieldStart = i
+ }
+ if fieldStart < len(s) { // Last field might end at EOF.
+ a = append(a, s[fieldStart:])
+ }
+ return a
}
// FieldsFunc splits the string s at each run of Unicode code points c satisfying f(c)
@@ -383,40 +507,71 @@ func Map(mapping func(rune) rune, s string) string {
// In the worst case, the string can grow when mapped, making
// things unpleasant. But it's so rare we barge in assuming it's
// fine. It could also shrink but that falls out naturally.
- maxbytes := len(s) // length of b
- nbytes := 0 // number of bytes encoded in b
+
// The output buffer b is initialized on demand, the first
// time a character differs.
var b []byte
+ // nbytes is the number of bytes encoded in b.
+ var nbytes int
for i, c := range s {
r := mapping(c)
- if b == nil {
- if r == c {
- continue
- }
- b = make([]byte, maxbytes)
- nbytes = copy(b, s[:i])
+ if r == c {
+ continue
}
+
+ b = make([]byte, len(s)+utf8.UTFMax)
+ nbytes = copy(b, s[:i])
if r >= 0 {
- wid := 1
- if r >= utf8.RuneSelf {
- wid = utf8.RuneLen(r)
+ if r <= utf8.RuneSelf {
+ b[nbytes] = byte(r)
+ nbytes++
+ } else {
+ nbytes += utf8.EncodeRune(b[nbytes:], r)
}
- if nbytes+wid > maxbytes {
- // Grow the buffer.
- maxbytes = maxbytes*2 + utf8.UTFMax
- nb := make([]byte, maxbytes)
- copy(nb, b[0:nbytes])
- b = nb
- }
- nbytes += utf8.EncodeRune(b[nbytes:maxbytes], r)
}
+
+ if c == utf8.RuneError {
+ // RuneError is the result of either decoding
+ // an invalid sequence or '\uFFFD'. Determine
+ // the correct number of bytes we need to advance.
+ _, w := utf8.DecodeRuneInString(s[i:])
+ i += w
+ } else {
+ i += utf8.RuneLen(c)
+ }
+
+ s = s[i:]
+ break
}
+
if b == nil {
return s
}
- return string(b[0:nbytes])
+
+ for _, c := range s {
+ r := mapping(c)
+
+ // common case
+ if (0 <= r && r <= utf8.RuneSelf) && nbytes < len(b) {
+ b[nbytes] = byte(r)
+ nbytes++
+ continue
+ }
+
+ // b is not big enough or r is not a ASCII rune.
+ if r >= 0 {
+ if nbytes+utf8.UTFMax >= len(b) {
+ // Grow the buffer.
+ nb := make([]byte, 2*len(b))
+ copy(nb, b[:nbytes])
+ b = nb
+ }
+ nbytes += utf8.EncodeRune(b[nbytes:], r)
+ }
+ }
+
+ return string(b[:nbytes])
}
// Repeat returns a new string consisting of count copies of the string s.
@@ -561,17 +716,10 @@ func LastIndexFunc(s string, f func(rune) bool) int {
// truth==false, the sense of the predicate function is
// inverted.
func indexFunc(s string, f func(rune) bool, truth bool) int {
- start := 0
- for start < len(s) {
- wid := 1
- r := rune(s[start])
- if r >= utf8.RuneSelf {
- r, wid = utf8.DecodeRuneInString(s[start:])
- }
+ for i, r := range s {
if f(r) == truth {
- return start
+ return i
}
- start += wid
}
return -1
}
diff --git a/libgo/go/strings/strings_amd64.go b/libgo/go/strings/strings_amd64.go
index e55afd53d01..9648912fd5b 100644
--- a/libgo/go/strings/strings_amd64.go
+++ b/libgo/go/strings/strings_amd64.go
@@ -6,44 +6,46 @@
package strings
+import "internal/cpu"
+
//go:noescape
// indexShortStr returns the index of the first instance of c in s, or -1 if c is not present in s.
// indexShortStr requires 2 <= len(c) <= shortStringLen
-func indexShortStr(s, c string) int // ../runtime/asm_$GOARCH.s
-func supportAVX2() bool // ../runtime/asm_$GOARCH.s
+func indexShortStr(s, c string) int // ../runtime/asm_amd64.s
+func countByte(s string, c byte) int // ../runtime/asm_amd64.s
var shortStringLen int
func init() {
- if supportAVX2() {
+ if cpu.X86.HasAVX2 {
shortStringLen = 63
} else {
shortStringLen = 31
}
}
-// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
-func Index(s, sep string) int {
- n := len(sep)
+// Index returns the index of the first instance of substr in s, or -1 if substr is not present in s.
+func Index(s, substr string) int {
+ n := len(substr)
switch {
case n == 0:
return 0
case n == 1:
- return IndexByte(s, sep[0])
+ return IndexByte(s, substr[0])
case n == len(s):
- if sep == s {
+ if substr == s {
return 0
}
return -1
case n > len(s):
return -1
case n <= shortStringLen:
- // Use brute force when s and sep both are small
+ // Use brute force when s and substr both are small
if len(s) <= 64 {
- return indexShortStr(s, sep)
+ return indexShortStr(s, substr)
}
- c := sep[0]
+ c := substr[0]
i := 0
t := s[:len(s)-n+1]
fails := 0
@@ -57,7 +59,7 @@ func Index(s, sep string) int {
}
i += o
}
- if s[i:i+n] == sep {
+ if s[i:i+n] == substr {
return i
}
fails++
@@ -66,7 +68,7 @@ func Index(s, sep string) int {
// Too many means more that 1 error per 8 characters.
// Allow some errors in the beginning.
if fails > (i+16)/8 {
- r := indexShortStr(s[i:], sep)
+ r := indexShortStr(s[i:], substr)
if r >= 0 {
return r + i
}
@@ -76,12 +78,12 @@ func Index(s, sep string) int {
return -1
}
// Rabin-Karp search
- hashsep, pow := hashStr(sep)
+ hashss, pow := hashStr(substr)
var h uint32
for i := 0; i < n; i++ {
h = h*primeRK + uint32(s[i])
}
- if h == hashsep && s[:n] == sep {
+ if h == hashss && s[:n] == substr {
return 0
}
for i := n; i < len(s); {
@@ -89,9 +91,18 @@ func Index(s, sep string) int {
h += uint32(s[i])
h -= pow * uint32(s[i-n])
i++
- if h == hashsep && s[i-n:i] == sep {
+ if h == hashss && s[i-n:i] == substr {
return i - n
}
}
return -1
}
+
+// Count counts the number of non-overlapping instances of substr in s.
+// If substr is an empty string, Count returns 1 + the number of Unicode code points in s.
+func Count(s, substr string) int {
+ if len(substr) == 1 && cpu.X86.HasPOPCNT {
+ return countByte(s, byte(substr[0]))
+ }
+ return countGeneric(s, substr)
+}
diff --git a/libgo/go/strings/strings_generic.go b/libgo/go/strings/strings_generic.go
index a3ad515444b..9844201db30 100644
--- a/libgo/go/strings/strings_generic.go
+++ b/libgo/go/strings/strings_generic.go
@@ -9,16 +9,16 @@ package strings
// TODO: implements short string optimization on non amd64 platforms
// and get rid of strings_amd64.go
-// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
-func Index(s, sep string) int {
- n := len(sep)
+// Index returns the index of the first instance of substr in s, or -1 if substr is not present in s.
+func Index(s, substr string) int {
+ n := len(substr)
switch {
case n == 0:
return 0
case n == 1:
- return IndexByte(s, sep[0])
+ return IndexByte(s, substr[0])
case n == len(s):
- if sep == s {
+ if substr == s {
return 0
}
return -1
@@ -26,12 +26,12 @@ func Index(s, sep string) int {
return -1
}
// Rabin-Karp search
- hashsep, pow := hashStr(sep)
+ hashss, pow := hashStr(substr)
var h uint32
for i := 0; i < n; i++ {
h = h*primeRK + uint32(s[i])
}
- if h == hashsep && s[:n] == sep {
+ if h == hashss && s[:n] == substr {
return 0
}
for i := n; i < len(s); {
@@ -39,9 +39,15 @@ func Index(s, sep string) int {
h += uint32(s[i])
h -= pow * uint32(s[i-n])
i++
- if h == hashsep && s[i-n:i] == sep {
+ if h == hashss && s[i-n:i] == substr {
return i - n
}
}
return -1
}
+
+// Count counts the number of non-overlapping instances of substr in s.
+// If substr is an empty string, Count returns 1 + the number of Unicode code points in s.
+func Count(s, substr string) int {
+ return countGeneric(s, substr)
+}
diff --git a/libgo/go/strings/strings_s390x.go b/libgo/go/strings/strings_s390x.go
index b47702fd51a..b05fb2b025a 100644
--- a/libgo/go/strings/strings_s390x.go
+++ b/libgo/go/strings/strings_s390x.go
@@ -26,27 +26,27 @@ func init() {
}
}
-// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s.
-func Index(s, sep string) int {
- n := len(sep)
+// Index returns the index of the first instance of substr in s, or -1 if substr is not present in s.
+func Index(s, substr string) int {
+ n := len(substr)
switch {
case n == 0:
return 0
case n == 1:
- return IndexByte(s, sep[0])
+ return IndexByte(s, substr[0])
case n == len(s):
- if sep == s {
+ if substr == s {
return 0
}
return -1
case n > len(s):
return -1
case n <= shortStringLen:
- // Use brute force when s and sep both are small
+ // Use brute force when s and substr both are small
if len(s) <= 64 {
- return indexShortStr(s, sep)
+ return indexShortStr(s, substr)
}
- c := sep[0]
+ c := substr[0]
i := 0
t := s[:len(s)-n+1]
fails := 0
@@ -60,7 +60,7 @@ func Index(s, sep string) int {
}
i += o
}
- if s[i:i+n] == sep {
+ if s[i:i+n] == substr {
return i
}
fails++
@@ -69,7 +69,7 @@ func Index(s, sep string) int {
// Too many means more that 1 error per 8 characters.
// Allow some errors in the beginning.
if fails > (i+16)/8 {
- r := indexShortStr(s[i:], sep)
+ r := indexShortStr(s[i:], substr)
if r >= 0 {
return r + i
}
@@ -79,12 +79,12 @@ func Index(s, sep string) int {
return -1
}
// Rabin-Karp search
- hashsep, pow := hashStr(sep)
+ hashss, pow := hashStr(substr)
var h uint32
for i := 0; i < n; i++ {
h = h*primeRK + uint32(s[i])
}
- if h == hashsep && s[:n] == sep {
+ if h == hashss && s[:n] == substr {
return 0
}
for i := n; i < len(s); {
@@ -92,9 +92,15 @@ func Index(s, sep string) int {
h += uint32(s[i])
h -= pow * uint32(s[i-n])
i++
- if h == hashsep && s[i-n:i] == sep {
+ if h == hashss && s[i-n:i] == substr {
return i - n
}
}
return -1
}
+
+// Count counts the number of non-overlapping instances of substr in s.
+// If substr is an empty string, Count returns 1 + the number of Unicode code points in s.
+func Count(s, substr string) int {
+ return countGeneric(s, substr)
+}
diff --git a/libgo/go/strings/strings_test.go b/libgo/go/strings/strings_test.go
index 449fb502d64..0fddaf0e4a6 100644
--- a/libgo/go/strings/strings_test.go
+++ b/libgo/go/strings/strings_test.go
@@ -456,6 +456,7 @@ var fieldstests = []FieldsTest{
{"", []string{}},
{" ", []string{}},
{" \t ", []string{}},
+ {"\u2000", []string{}},
{" abc ", []string{"abc"}},
{"1 2 3 4", []string{"1", "2", "3", "4"}},
{"1 2 3 4", []string{"1", "2", "3", "4"}},
@@ -463,6 +464,9 @@ var fieldstests = []FieldsTest{
{"1\u20002\u20013\u20024", []string{"1", "2", "3", "4"}},
{"\u2000\u2001\u2002", []string{}},
{"\nâ¢\tâ¢\n", []string{"â¢", "â¢"}},
+ {"\n\u20001â¢2\u2000 \u2001 â¢", []string{"1â¢2", "â¢"}},
+ {"\n1\uFFFD \uFFFD2\u20003\uFFFD4", []string{"1\uFFFD", "\uFFFD2", "3\uFFFD4"}},
+ {"1\xFF\u2000\xFF2\xFF \xFF", []string{"1\xFF", "\xFF2\xFF", "\xFF"}},
{faces, []string{faces}},
}
@@ -629,6 +633,19 @@ func TestMap(t *testing.T) {
(*reflect.StringHeader)(unsafe.Pointer(&m)).Data {
t.Error("unexpected copy during identity map")
}
+
+ // 7. Handle invalid UTF-8 sequence
+ replaceNotLatin := func(r rune) rune {
+ if unicode.Is(unicode.Latin, r) {
+ return r
+ }
+ return '?'
+ }
+ m = Map(replaceNotLatin, "Hello\255World")
+ expect = "Hello?World"
+ if m != expect {
+ t.Errorf("replace invalid sequence: expected %q got %q", expect, m)
+ }
}
func TestToUpper(t *testing.T) { runStringTests(t, ToUpper, "ToUpper", upperTests) }
@@ -1444,6 +1461,24 @@ func BenchmarkCountTortureOverlapping(b *testing.B) {
}
}
+func BenchmarkCountByte(b *testing.B) {
+ indexSizes := []int{10, 32, 4 << 10, 4 << 20, 64 << 20}
+ benchStr := Repeat(benchmarkString,
+ (indexSizes[len(indexSizes)-1]+len(benchmarkString)-1)/len(benchmarkString))
+ benchFunc := func(b *testing.B, benchStr string) {
+ b.SetBytes(int64(len(benchStr)))
+ for i := 0; i < b.N; i++ {
+ Count(benchStr, "=")
+ }
+ }
+ for _, size := range indexSizes {
+ b.Run(fmt.Sprintf("%d", size), func(b *testing.B) {
+ benchFunc(b, benchStr[:size])
+ })
+ }
+
+}
+
var makeFieldsInput = func() string {
x := make([]byte, 1<<20)
// Input is ~10% space, ~10% 2-byte UTF-8, rest ASCII non-space.
@@ -1464,40 +1499,88 @@ var makeFieldsInput = func() string {
return string(x)
}
-var fieldsInput = makeFieldsInput()
+var makeFieldsInputASCII = func() string {
+ x := make([]byte, 1<<20)
+ // Input is ~10% space, rest ASCII non-space.
+ for i := range x {
+ if rand.Intn(10) == 0 {
+ x[i] = ' '
+ } else {
+ x[i] = 'x'
+ }
+ }
+ return string(x)
+}
+
+var stringdata = []struct{ name, data string }{
+ {"ASCII", makeFieldsInputASCII()},
+ {"Mixed", makeFieldsInput()},
+}
func BenchmarkFields(b *testing.B) {
- b.SetBytes(int64(len(fieldsInput)))
- for i := 0; i < b.N; i++ {
- Fields(fieldsInput)
+ for _, sd := range stringdata {
+ b.Run(sd.name, func(b *testing.B) {
+ for j := 1 << 4; j <= 1<<20; j <<= 4 {
+ b.Run(fmt.Sprintf("%d", j), func(b *testing.B) {
+ b.ReportAllocs()
+ b.SetBytes(int64(j))
+ data := sd.data[:j]
+ for i := 0; i < b.N; i++ {
+ Fields(data)
+ }
+ })
+ }
+ })
}
}
func BenchmarkFieldsFunc(b *testing.B) {
- b.SetBytes(int64(len(fieldsInput)))
- for i := 0; i < b.N; i++ {
- FieldsFunc(fieldsInput, unicode.IsSpace)
+ for _, sd := range stringdata {
+ b.Run(sd.name, func(b *testing.B) {
+ for j := 1 << 4; j <= 1<<20; j <<= 4 {
+ b.Run(fmt.Sprintf("%d", j), func(b *testing.B) {
+ b.ReportAllocs()
+ b.SetBytes(int64(j))
+ data := sd.data[:j]
+ for i := 0; i < b.N; i++ {
+ FieldsFunc(data, unicode.IsSpace)
+ }
+ })
+ }
+ })
}
}
-func BenchmarkSplit1(b *testing.B) {
+func BenchmarkSplitEmptySeparator(b *testing.B) {
for i := 0; i < b.N; i++ {
Split(benchInputHard, "")
}
}
-func BenchmarkSplit2(b *testing.B) {
+func BenchmarkSplitSingleByteSeparator(b *testing.B) {
for i := 0; i < b.N; i++ {
Split(benchInputHard, "/")
}
}
-func BenchmarkSplit3(b *testing.B) {
+func BenchmarkSplitMultiByteSeparator(b *testing.B) {
for i := 0; i < b.N; i++ {
Split(benchInputHard, "hello")
}
}
+func BenchmarkSplitNSingleByteSeparator(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ SplitN(benchInputHard, "/", 10)
+ }
+}
+
+func BenchmarkSplitNMultiByteSeparator(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ SplitN(benchInputHard, "hello", 10)
+ }
+}
+
func BenchmarkRepeat(b *testing.B) {
for i := 0; i < b.N; i++ {
Repeat("-", 80)
diff --git a/libgo/go/sync/atomic/atomic_test.go b/libgo/go/sync/atomic/atomic_test.go
index 6d0831c3f9d..17baccb4683 100644
--- a/libgo/go/sync/atomic/atomic_test.go
+++ b/libgo/go/sync/atomic/atomic_test.go
@@ -953,16 +953,20 @@ func hammerSwapUint64(addr *uint64, count int) {
}
}
+const arch32 = unsafe.Sizeof(uintptr(0)) == 4
+
func hammerSwapUintptr64(uaddr *uint64, count int) {
// only safe when uintptr is 64-bit.
// not called on 32-bit systems.
- addr := (*uintptr)(unsafe.Pointer(uaddr))
- seed := int(uintptr(unsafe.Pointer(&count)))
- for i := 0; i < count; i++ {
- new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32
- old := SwapUintptr(addr, new)
- if old>>32 != old<<32>>32 {
- panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old))
+ if !arch32 {
+ addr := (*uintptr)(unsafe.Pointer(uaddr))
+ seed := int(uintptr(unsafe.Pointer(&count)))
+ for i := 0; i < count; i++ {
+ new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32
+ old := SwapUintptr(addr, new)
+ if old>>32 != old<<32>>32 {
+ panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old))
+ }
}
}
}
@@ -1116,8 +1120,6 @@ func hammerStoreLoadUint64(t *testing.T, paddr unsafe.Pointer) {
func hammerStoreLoadUintptr(t *testing.T, paddr unsafe.Pointer) {
addr := (*uintptr)(paddr)
- var test64 uint64 = 1 << 50
- arch32 := uintptr(test64) == 0
v := LoadUintptr(addr)
new := v
if arch32 {
@@ -1144,8 +1146,6 @@ func hammerStoreLoadUintptr(t *testing.T, paddr unsafe.Pointer) {
func hammerStoreLoadPointer(t *testing.T, paddr unsafe.Pointer) {
addr := (*unsafe.Pointer)(paddr)
- var test64 uint64 = 1 << 50
- arch32 := uintptr(test64) == 0
v := uintptr(LoadPointer(addr))
new := v
if arch32 {
@@ -1398,7 +1398,7 @@ func TestUnaligned64(t *testing.T) {
switch runtime.GOARCH {
default:
- if unsafe.Sizeof(int(0)) != 4 {
+ if !arch32 {
t.Skip("test only runs on 32-bit systems")
}
case "amd64p32":
diff --git a/libgo/go/sync/atomic/doc.go b/libgo/go/sync/atomic/doc.go
index 302ff43070a..7c007d7a150 100644
--- a/libgo/go/sync/atomic/doc.go
+++ b/libgo/go/sync/atomic/doc.go
@@ -48,8 +48,8 @@ import (
// On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core.
//
// On both ARM and x86-32, it is the caller's responsibility to arrange for 64-bit
-// alignment of 64-bit words accessed atomically. The first word in a global
-// variable or in an allocated struct or slice can be relied upon to be
+// alignment of 64-bit words accessed atomically. The first word in a
+// variable or in an allocated struct, array, or slice can be relied upon to be
// 64-bit aligned.
// SwapInt32 atomically stores new into *addr and returns the previous *addr value.
diff --git a/libgo/go/sync/atomic/value.go b/libgo/go/sync/atomic/value.go
index 30abf726344..1fc1f681f20 100644
--- a/libgo/go/sync/atomic/value.go
+++ b/libgo/go/sync/atomic/value.go
@@ -9,7 +9,6 @@ import (
)
// A Value provides an atomic load and store of a consistently typed value.
-// Values can be created as part of other data structures.
// The zero value for a Value returns nil from Load.
// Once Store has been called, a Value must not be copied.
//
diff --git a/libgo/go/sync/cond.go b/libgo/go/sync/cond.go
index c070d9d84ef..14e2f6b24d4 100644
--- a/libgo/go/sync/cond.go
+++ b/libgo/go/sync/cond.go
@@ -17,7 +17,6 @@ import (
// which must be held when changing the condition and
// when calling the Wait method.
//
-// A Cond can be created as part of other structures.
// A Cond must not be copied after first use.
type Cond struct {
noCopy noCopy
diff --git a/libgo/go/sync/export_test.go b/libgo/go/sync/export_test.go
index 6ed38dad89d..669076efad3 100644
--- a/libgo/go/sync/export_test.go
+++ b/libgo/go/sync/export_test.go
@@ -7,3 +7,5 @@ package sync
// Export for testing.
var Runtime_Semacquire = runtime_Semacquire
var Runtime_Semrelease = runtime_Semrelease
+var Runtime_procPin = runtime_procPin
+var Runtime_procUnpin = runtime_procUnpin
diff --git a/libgo/go/sync/map.go b/libgo/go/sync/map.go
new file mode 100644
index 00000000000..083f4a563f8
--- /dev/null
+++ b/libgo/go/sync/map.go
@@ -0,0 +1,375 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// Map is a concurrent map with amortized-constant-time loads, stores, and deletes.
+// It is safe for multiple goroutines to call a Map's methods concurrently.
+//
+// It is optimized for use in concurrent loops with keys that are
+// stable over time, and either few steady-state stores, or stores
+// localized to one goroutine per key.
+//
+// For use cases that do not share these attributes, it will likely have
+// comparable or worse performance and worse type safety than an ordinary
+// map paired with a read-write mutex.
+//
+// The zero Map is valid and empty.
+//
+// A Map must not be copied after first use.
+type Map struct {
+ mu Mutex
+
+ // read contains the portion of the map's contents that are safe for
+ // concurrent access (with or without mu held).
+ //
+ // The read field itself is always safe to load, but must only be stored with
+ // mu held.
+ //
+ // Entries stored in read may be updated concurrently without mu, but updating
+ // a previously-expunged entry requires that the entry be copied to the dirty
+ // map and unexpunged with mu held.
+ read atomic.Value // readOnly
+
+ // dirty contains the portion of the map's contents that require mu to be
+ // held. To ensure that the dirty map can be promoted to the read map quickly,
+ // it also includes all of the non-expunged entries in the read map.
+ //
+ // Expunged entries are not stored in the dirty map. An expunged entry in the
+ // clean map must be unexpunged and added to the dirty map before a new value
+ // can be stored to it.
+ //
+ // If the dirty map is nil, the next write to the map will initialize it by
+ // making a shallow copy of the clean map, omitting stale entries.
+ dirty map[interface{}]*entry
+
+ // misses counts the number of loads since the read map was last updated that
+ // needed to lock mu to determine whether the key was present.
+ //
+ // Once enough misses have occurred to cover the cost of copying the dirty
+ // map, the dirty map will be promoted to the read map (in the unamended
+ // state) and the next store to the map will make a new dirty copy.
+ misses int
+}
+
+// readOnly is an immutable struct stored atomically in the Map.read field.
+type readOnly struct {
+ m map[interface{}]*entry
+ amended bool // true if the dirty map contains some key not in m.
+}
+
+// expunged is an arbitrary pointer that marks entries which have been deleted
+// from the dirty map.
+var expunged = unsafe.Pointer(new(interface{}))
+
+// An entry is a slot in the map corresponding to a particular key.
+type entry struct {
+ // p points to the interface{} value stored for the entry.
+ //
+ // If p == nil, the entry has been deleted and m.dirty == nil.
+ //
+ // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
+ // is missing from m.dirty.
+ //
+ // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
+ // != nil, in m.dirty[key].
+ //
+ // An entry can be deleted by atomic replacement with nil: when m.dirty is
+ // next created, it will atomically replace nil with expunged and leave
+ // m.dirty[key] unset.
+ //
+ // An entry's associated value can be updated by atomic replacement, provided
+ // p != expunged. If p == expunged, an entry's associated value can be updated
+ // only after first setting m.dirty[key] = e so that lookups using the dirty
+ // map find the entry.
+ p unsafe.Pointer // *interface{}
+}
+
+func newEntry(i interface{}) *entry {
+ return &entry{p: unsafe.Pointer(&i)}
+}
+
+// Load returns the value stored in the map for a key, or nil if no
+// value is present.
+// The ok result indicates whether value was found in the map.
+func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
+ read, _ := m.read.Load().(readOnly)
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ // Avoid reporting a spurious miss if m.dirty got promoted while we were
+ // blocked on m.mu. (If further loads of the same key will not miss, it's
+ // not worth copying the dirty map for this key.)
+ read, _ = m.read.Load().(readOnly)
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ e, ok = m.dirty[key]
+ // Regardless of whether the entry was present, record a miss: this key
+ // will take the slow path until the dirty map is promoted to the read
+ // map.
+ m.missLocked()
+ }
+ m.mu.Unlock()
+ }
+ if !ok {
+ return nil, false
+ }
+ return e.load()
+}
+
+func (e *entry) load() (value interface{}, ok bool) {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == expunged {
+ return nil, false
+ }
+ return *(*interface{})(p), true
+}
+
+// Store sets the value for a key.
+func (m *Map) Store(key, value interface{}) {
+ read, _ := m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok && e.tryStore(&value) {
+ return
+ }
+
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok {
+ if e.unexpungeLocked() {
+ // The entry was previously expunged, which implies that there is a
+ // non-nil dirty map and this entry is not in it.
+ m.dirty[key] = e
+ }
+ e.storeLocked(&value)
+ } else if e, ok := m.dirty[key]; ok {
+ e.storeLocked(&value)
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(readOnly{m: read.m, amended: true})
+ }
+ m.dirty[key] = newEntry(value)
+ }
+ m.mu.Unlock()
+}
+
+// tryStore stores a value if the entry has not been expunged.
+//
+// If the entry is expunged, tryStore returns false and leaves the entry
+// unchanged.
+func (e *entry) tryStore(i *interface{}) bool {
+ p := atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return false
+ }
+ for {
+ if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
+ return true
+ }
+ p = atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return false
+ }
+ }
+}
+
+// unexpungeLocked ensures that the entry is not marked as expunged.
+//
+// If the entry was previously expunged, it must be added to the dirty map
+// before m.mu is unlocked.
+func (e *entry) unexpungeLocked() (wasExpunged bool) {
+ return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
+}
+
+// storeLocked unconditionally stores a value to the entry.
+//
+// The entry must be known not to be expunged.
+func (e *entry) storeLocked(i *interface{}) {
+ atomic.StorePointer(&e.p, unsafe.Pointer(i))
+}
+
+// LoadOrStore returns the existing value for the key if present.
+// Otherwise, it stores and returns the given value.
+// The loaded result is true if the value was loaded, false if stored.
+func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
+ // Avoid locking if it's a clean hit.
+ read, _ := m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok {
+ actual, loaded, ok := e.tryLoadOrStore(value)
+ if ok {
+ return actual, loaded
+ }
+ }
+
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ if e, ok := read.m[key]; ok {
+ if e.unexpungeLocked() {
+ m.dirty[key] = e
+ }
+ actual, loaded, _ = e.tryLoadOrStore(value)
+ } else if e, ok := m.dirty[key]; ok {
+ actual, loaded, _ = e.tryLoadOrStore(value)
+ m.missLocked()
+ } else {
+ if !read.amended {
+ // We're adding the first new key to the dirty map.
+ // Make sure it is allocated and mark the read-only map as incomplete.
+ m.dirtyLocked()
+ m.read.Store(readOnly{m: read.m, amended: true})
+ }
+ m.dirty[key] = newEntry(value)
+ actual, loaded = value, false
+ }
+ m.mu.Unlock()
+
+ return actual, loaded
+}
+
+// tryLoadOrStore atomically loads or stores a value if the entry is not
+// expunged.
+//
+// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
+// returns with ok==false.
+func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
+ p := atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return nil, false, false
+ }
+ if p != nil {
+ return *(*interface{})(p), true, true
+ }
+
+ // Copy the interface after the first load to make this method more amenable
+ // to escape analysis: if we hit the "load" path or the entry is expunged, we
+ // shouldn't bother heap-allocating.
+ ic := i
+ for {
+ if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
+ return i, false, true
+ }
+ p = atomic.LoadPointer(&e.p)
+ if p == expunged {
+ return nil, false, false
+ }
+ if p != nil {
+ return *(*interface{})(p), true, true
+ }
+ }
+}
+
+// Delete deletes the value for a key.
+func (m *Map) Delete(key interface{}) {
+ read, _ := m.read.Load().(readOnly)
+ e, ok := read.m[key]
+ if !ok && read.amended {
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ e, ok = read.m[key]
+ if !ok && read.amended {
+ delete(m.dirty, key)
+ }
+ m.mu.Unlock()
+ }
+ if ok {
+ e.delete()
+ }
+}
+
+func (e *entry) delete() (hadValue bool) {
+ for {
+ p := atomic.LoadPointer(&e.p)
+ if p == nil || p == expunged {
+ return false
+ }
+ if atomic.CompareAndSwapPointer(&e.p, p, nil) {
+ return true
+ }
+ }
+}
+
+// Range calls f sequentially for each key and value present in the map.
+// If f returns false, range stops the iteration.
+//
+// Range does not necessarily correspond to any consistent snapshot of the Map's
+// contents: no key will be visited more than once, but if the value for any key
+// is stored or deleted concurrently, Range may reflect any mapping for that key
+// from any point during the Range call.
+//
+// Range may be O(N) with the number of elements in the map even if f returns
+// false after a constant number of calls.
+func (m *Map) Range(f func(key, value interface{}) bool) {
+ // We need to be able to iterate over all of the keys that were already
+ // present at the start of the call to Range.
+ // If read.amended is false, then read.m satisfies that property without
+ // requiring us to hold m.mu for a long time.
+ read, _ := m.read.Load().(readOnly)
+ if read.amended {
+ // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
+ // (assuming the caller does not break out early), so a call to Range
+ // amortizes an entire copy of the map: we can promote the dirty copy
+ // immediately!
+ m.mu.Lock()
+ read, _ = m.read.Load().(readOnly)
+ if read.amended {
+ read = readOnly{m: m.dirty}
+ m.read.Store(read)
+ m.dirty = nil
+ m.misses = 0
+ }
+ m.mu.Unlock()
+ }
+
+ for k, e := range read.m {
+ v, ok := e.load()
+ if !ok {
+ continue
+ }
+ if !f(k, v) {
+ break
+ }
+ }
+}
+
+func (m *Map) missLocked() {
+ m.misses++
+ if m.misses < len(m.dirty) {
+ return
+ }
+ m.read.Store(readOnly{m: m.dirty})
+ m.dirty = nil
+ m.misses = 0
+}
+
+func (m *Map) dirtyLocked() {
+ if m.dirty != nil {
+ return
+ }
+
+ read, _ := m.read.Load().(readOnly)
+ m.dirty = make(map[interface{}]*entry, len(read.m))
+ for k, e := range read.m {
+ if !e.tryExpungeLocked() {
+ m.dirty[k] = e
+ }
+ }
+}
+
+func (e *entry) tryExpungeLocked() (isExpunged bool) {
+ p := atomic.LoadPointer(&e.p)
+ for p == nil {
+ if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
+ return true
+ }
+ p = atomic.LoadPointer(&e.p)
+ }
+ return p == expunged
+}
diff --git a/libgo/go/sync/map_bench_test.go b/libgo/go/sync/map_bench_test.go
new file mode 100644
index 00000000000..e6a8badddba
--- /dev/null
+++ b/libgo/go/sync/map_bench_test.go
@@ -0,0 +1,215 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+ "sync/atomic"
+ "testing"
+)
+
+type bench struct {
+ setup func(*testing.B, mapInterface)
+ perG func(b *testing.B, pb *testing.PB, i int, m mapInterface)
+}
+
+func benchMap(b *testing.B, bench bench) {
+ for _, m := range [...]mapInterface{&DeepCopyMap{}, &RWMutexMap{}, &sync.Map{}} {
+ b.Run(fmt.Sprintf("%T", m), func(b *testing.B) {
+ m = reflect.New(reflect.TypeOf(m).Elem()).Interface().(mapInterface)
+ if bench.setup != nil {
+ bench.setup(b, m)
+ }
+
+ b.ResetTimer()
+
+ var i int64
+ b.RunParallel(func(pb *testing.PB) {
+ id := int(atomic.AddInt64(&i, 1) - 1)
+ bench.perG(b, pb, id*b.N, m)
+ })
+ })
+ }
+}
+
+func BenchmarkLoadMostlyHits(b *testing.B) {
+ const hits, misses = 1023, 1
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.Load(i % (hits + misses))
+ }
+ },
+ })
+}
+
+func BenchmarkLoadMostlyMisses(b *testing.B) {
+ const hits, misses = 1, 1023
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.Load(i % (hits + misses))
+ }
+ },
+ })
+}
+
+func BenchmarkLoadOrStoreBalanced(b *testing.B) {
+ const hits, misses = 128, 128
+
+ benchMap(b, bench{
+ setup: func(b *testing.B, m mapInterface) {
+ if _, ok := m.(*DeepCopyMap); ok {
+ b.Skip("DeepCopyMap has quadratic running time.")
+ }
+ for i := 0; i < hits; i++ {
+ m.LoadOrStore(i, i)
+ }
+ // Prime the map to get it into a steady state.
+ for i := 0; i < hits*2; i++ {
+ m.Load(i % hits)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ j := i % (hits + misses)
+ if j < hits {
+ if _, ok := m.LoadOrStore(j, i); !ok {
+ b.Fatalf("unexpected miss for %v", j)
+ }
+ } else {
+ if v, loaded := m.LoadOrStore(i, i); loaded {
+ b.Fatalf("failed to store %v: existing value %v", i, v)
+ }
+ }
+ }
+ },
+ })
+}
+
+func BenchmarkLoadOrStoreUnique(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(b *testing.B, m mapInterface) {
+ if _, ok := m.(*DeepCopyMap); ok {
+ b.Skip("DeepCopyMap has quadratic running time.")
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.LoadOrStore(i, i)
+ }
+ },
+ })
+}
+
+func BenchmarkLoadOrStoreCollision(b *testing.B) {
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ m.LoadOrStore(0, 0)
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.LoadOrStore(0, 0)
+ }
+ },
+ })
+}
+
+func BenchmarkRange(b *testing.B) {
+ const mapSize = 1 << 10
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < mapSize; i++ {
+ m.Store(i, i)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.Range(func(_, _ interface{}) bool { return true })
+ }
+ },
+ })
+}
+
+// BenchmarkAdversarialAlloc tests performance when we store a new value
+// immediately whenever the map is promoted to clean and otherwise load a
+// unique, missing key.
+//
+// This forces the Load calls to always acquire the map's mutex.
+func BenchmarkAdversarialAlloc(b *testing.B) {
+ benchMap(b, bench{
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ var stores, loadsSinceStore int64
+ for ; pb.Next(); i++ {
+ m.Load(i)
+ if loadsSinceStore++; loadsSinceStore > stores {
+ m.LoadOrStore(i, stores)
+ loadsSinceStore = 0
+ stores++
+ }
+ }
+ },
+ })
+}
+
+// BenchmarkAdversarialDelete tests performance when we periodically delete
+// one key and add a different one in a large map.
+//
+// This forces the Load calls to always acquire the map's mutex and periodically
+// makes a full copy of the map despite changing only one entry.
+func BenchmarkAdversarialDelete(b *testing.B) {
+ const mapSize = 1 << 10
+
+ benchMap(b, bench{
+ setup: func(_ *testing.B, m mapInterface) {
+ for i := 0; i < mapSize; i++ {
+ m.Store(i, i)
+ }
+ },
+
+ perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+ for ; pb.Next(); i++ {
+ m.Load(i)
+
+ if i%mapSize == 0 {
+ m.Range(func(k, _ interface{}) bool {
+ m.Delete(k)
+ return false
+ })
+ m.Store(i, i)
+ }
+ }
+ },
+ })
+}
diff --git a/libgo/go/sync/map_reference_test.go b/libgo/go/sync/map_reference_test.go
new file mode 100644
index 00000000000..9f27b07c329
--- /dev/null
+++ b/libgo/go/sync/map_reference_test.go
@@ -0,0 +1,151 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// This file contains reference map implementations for unit-tests.
+
+// mapInterface is the interface Map implements.
+type mapInterface interface {
+ Load(interface{}) (interface{}, bool)
+ Store(key, value interface{})
+ LoadOrStore(key, value interface{}) (actual interface{}, loaded bool)
+ Delete(interface{})
+ Range(func(key, value interface{}) (shouldContinue bool))
+}
+
+// RWMutexMap is an implementation of mapInterface using a sync.RWMutex.
+type RWMutexMap struct {
+ mu sync.RWMutex
+ dirty map[interface{}]interface{}
+}
+
+func (m *RWMutexMap) Load(key interface{}) (value interface{}, ok bool) {
+ m.mu.RLock()
+ value, ok = m.dirty[key]
+ m.mu.RUnlock()
+ return
+}
+
+func (m *RWMutexMap) Store(key, value interface{}) {
+ m.mu.Lock()
+ if m.dirty == nil {
+ m.dirty = make(map[interface{}]interface{})
+ }
+ m.dirty[key] = value
+ m.mu.Unlock()
+}
+
+func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
+ m.mu.Lock()
+ actual, loaded = m.dirty[key]
+ if !loaded {
+ actual = value
+ if m.dirty == nil {
+ m.dirty = make(map[interface{}]interface{})
+ }
+ m.dirty[key] = value
+ }
+ m.mu.Unlock()
+ return actual, loaded
+}
+
+func (m *RWMutexMap) Delete(key interface{}) {
+ m.mu.Lock()
+ delete(m.dirty, key)
+ m.mu.Unlock()
+}
+
+func (m *RWMutexMap) Range(f func(key, value interface{}) (shouldContinue bool)) {
+ m.mu.RLock()
+ keys := make([]interface{}, 0, len(m.dirty))
+ for k := range m.dirty {
+ keys = append(keys, k)
+ }
+ m.mu.RUnlock()
+
+ for _, k := range keys {
+ v, ok := m.Load(k)
+ if !ok {
+ continue
+ }
+ if !f(k, v) {
+ break
+ }
+ }
+}
+
+// DeepCopyMap is an implementation of mapInterface using a Mutex and
+// atomic.Value. It makes deep copies of the map on every write to avoid
+// acquiring the Mutex in Load.
+type DeepCopyMap struct {
+ mu sync.Mutex
+ clean atomic.Value
+}
+
+func (m *DeepCopyMap) Load(key interface{}) (value interface{}, ok bool) {
+ clean, _ := m.clean.Load().(map[interface{}]interface{})
+ value, ok = clean[key]
+ return value, ok
+}
+
+func (m *DeepCopyMap) Store(key, value interface{}) {
+ m.mu.Lock()
+ dirty := m.dirty()
+ dirty[key] = value
+ m.clean.Store(dirty)
+ m.mu.Unlock()
+}
+
+func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
+ clean, _ := m.clean.Load().(map[interface{}]interface{})
+ actual, loaded = clean[key]
+ if loaded {
+ return actual, loaded
+ }
+
+ m.mu.Lock()
+ // Reload clean in case it changed while we were waiting on m.mu.
+ clean, _ = m.clean.Load().(map[interface{}]interface{})
+ actual, loaded = clean[key]
+ if !loaded {
+ dirty := m.dirty()
+ dirty[key] = value
+ actual = value
+ m.clean.Store(dirty)
+ }
+ m.mu.Unlock()
+ return actual, loaded
+}
+
+func (m *DeepCopyMap) Delete(key interface{}) {
+ m.mu.Lock()
+ dirty := m.dirty()
+ delete(dirty, key)
+ m.clean.Store(dirty)
+ m.mu.Unlock()
+}
+
+func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool)) {
+ clean, _ := m.clean.Load().(map[interface{}]interface{})
+ for k, v := range clean {
+ if !f(k, v) {
+ break
+ }
+ }
+}
+
+func (m *DeepCopyMap) dirty() map[interface{}]interface{} {
+ clean, _ := m.clean.Load().(map[interface{}]interface{})
+ dirty := make(map[interface{}]interface{}, len(clean)+1)
+ for k, v := range clean {
+ dirty[k] = v
+ }
+ return dirty
+}
diff --git a/libgo/go/sync/map_test.go b/libgo/go/sync/map_test.go
new file mode 100644
index 00000000000..b60a1c7bede
--- /dev/null
+++ b/libgo/go/sync/map_test.go
@@ -0,0 +1,170 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "math/rand"
+ "reflect"
+ "runtime"
+ "sync"
+ "testing"
+ "testing/quick"
+)
+
+type mapOp string
+
+const (
+ opLoad = mapOp("Load")
+ opStore = mapOp("Store")
+ opLoadOrStore = mapOp("LoadOrStore")
+ opDelete = mapOp("Delete")
+)
+
+var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opDelete}
+
+// mapCall is a quick.Generator for calls on mapInterface.
+type mapCall struct {
+ op mapOp
+ k, v interface{}
+}
+
+func (c mapCall) apply(m mapInterface) (interface{}, bool) {
+ switch c.op {
+ case opLoad:
+ return m.Load(c.k)
+ case opStore:
+ m.Store(c.k, c.v)
+ return nil, false
+ case opLoadOrStore:
+ return m.LoadOrStore(c.k, c.v)
+ case opDelete:
+ m.Delete(c.k)
+ return nil, false
+ default:
+ panic("invalid mapOp")
+ }
+}
+
+type mapResult struct {
+ value interface{}
+ ok bool
+}
+
+func randValue(r *rand.Rand) interface{} {
+ b := make([]byte, r.Intn(4))
+ for i := range b {
+ b[i] = 'a' + byte(rand.Intn(26))
+ }
+ return string(b)
+}
+
+func (mapCall) Generate(r *rand.Rand, size int) reflect.Value {
+ c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)}
+ switch c.op {
+ case opStore, opLoadOrStore:
+ c.v = randValue(r)
+ }
+ return reflect.ValueOf(c)
+}
+
+func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[interface{}]interface{}) {
+ for _, c := range calls {
+ v, ok := c.apply(m)
+ results = append(results, mapResult{v, ok})
+ }
+
+ final = make(map[interface{}]interface{})
+ m.Range(func(k, v interface{}) bool {
+ final[k] = v
+ return true
+ })
+
+ return results, final
+}
+
+func applyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
+ return applyCalls(new(sync.Map), calls)
+}
+
+func applyRWMutexMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
+ return applyCalls(new(RWMutexMap), calls)
+}
+
+func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) {
+ return applyCalls(new(DeepCopyMap), calls)
+}
+
+func TestMapMatchesRWMutex(t *testing.T) {
+ if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestMapMatchesDeepCopy(t *testing.T) {
+ if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestConcurrentRange(t *testing.T) {
+ const mapSize = 1 << 10
+
+ m := new(sync.Map)
+ for n := int64(1); n <= mapSize; n++ {
+ m.Store(n, int64(n))
+ }
+
+ done := make(chan struct{})
+ var wg sync.WaitGroup
+ defer func() {
+ close(done)
+ wg.Wait()
+ }()
+ for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- {
+ r := rand.New(rand.NewSource(g))
+ wg.Add(1)
+ go func(g int64) {
+ defer wg.Done()
+ for i := int64(0); ; i++ {
+ select {
+ case <-done:
+ return
+ default:
+ }
+ for n := int64(1); n < mapSize; n++ {
+ if r.Int63n(mapSize) == 0 {
+ m.Store(n, n*i*g)
+ } else {
+ m.Load(n)
+ }
+ }
+ }
+ }(g)
+ }
+
+ iters := 1 << 10
+ if testing.Short() {
+ iters = 16
+ }
+ for n := iters; n > 0; n-- {
+ seen := make(map[int64]bool, mapSize)
+
+ m.Range(func(ki, vi interface{}) bool {
+ k, v := ki.(int64), vi.(int64)
+ if v%k != 0 {
+ t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v)
+ }
+ if seen[k] {
+ t.Fatalf("Range visited key %v twice", k)
+ }
+ seen[k] = true
+ return true
+ })
+
+ if len(seen) != mapSize {
+ t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize)
+ }
+ }
+}
diff --git a/libgo/go/sync/mutex.go b/libgo/go/sync/mutex.go
index 8c9366f4fe1..1232c629b18 100644
--- a/libgo/go/sync/mutex.go
+++ b/libgo/go/sync/mutex.go
@@ -19,8 +19,7 @@ import (
func throw(string) // provided by runtime
// A Mutex is a mutual exclusion lock.
-// Mutexes can be created as part of other structures;
-// the zero value for a Mutex is an unlocked mutex.
+// The zero value for a Mutex is an unlocked mutex.
//
// A Mutex must not be copied after first use.
type Mutex struct {
@@ -37,7 +36,34 @@ type Locker interface {
const (
mutexLocked = 1 << iota // mutex is locked
mutexWoken
+ mutexStarving
mutexWaiterShift = iota
+
+ // Mutex fairness.
+ //
+ // Mutex can be in 2 modes of operations: normal and starvation.
+ // In normal mode waiters are queued in FIFO order, but a woken up waiter
+ // does not own the mutex and competes with new arriving goroutines over
+ // the ownership. New arriving goroutines have an advantage -- they are
+ // already running on CPU and there can be lots of them, so a woken up
+ // waiter has good chances of losing. In such case it is queued at front
+ // of the wait queue. If a waiter fails to acquire the mutex for more than 1ms,
+ // it switches mutex to the starvation mode.
+ //
+ // In starvation mode ownership of the mutex is directly handed off from
+ // the unlocking goroutine to the waiter at the front of the queue.
+ // New arriving goroutines don't try to acquire the mutex even if it appears
+ // to be unlocked, and don't try to spin. Instead they queue themselves at
+ // the tail of the wait queue.
+ //
+ // If a waiter receives ownership of the mutex and sees that either
+ // (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms,
+ // it switches mutex back to normal operation mode.
+ //
+ // Normal mode has considerably better performance as a goroutine can acquire
+ // a mutex several times in a row even if there are blocked waiters.
+ // Starvation mode is important to prevent pathological cases of tail latency.
+ starvationThresholdNs = 1e6
)
// Lock locks m.
@@ -52,41 +78,86 @@ func (m *Mutex) Lock() {
return
}
+ var waitStartTime int64
+ starving := false
awoke := false
iter := 0
+ old := m.state
for {
- old := m.state
- new := old | mutexLocked
- if old&mutexLocked != 0 {
- if runtime_canSpin(iter) {
- // Active spinning makes sense.
- // Try to set mutexWoken flag to inform Unlock
- // to not wake other blocked goroutines.
- if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
- atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
- awoke = true
- }
- runtime_doSpin()
- iter++
- continue
+ // Don't spin in starvation mode, ownership is handed off to waiters
+ // so we won't be able to acquire the mutex anyway.
+ if old&(mutexLocked|mutexStarving) == mutexLocked && runtime_canSpin(iter) {
+ // Active spinning makes sense.
+ // Try to set mutexWoken flag to inform Unlock
+ // to not wake other blocked goroutines.
+ if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
+ atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
+ awoke = true
}
- new = old + 1< starvationThresholdNs
+ old = m.state
+ if old&mutexStarving != 0 {
+ // If this goroutine was woken and mutex is in starvation mode,
+ // ownership was handed off to us but mutex is in somewhat
+ // inconsistent state: mutexLocked is not set and we are still
+ // accounted as waiter. Fix that.
+ if old&(mutexLocked|mutexWoken) != 0 || old>>mutexWaiterShift == 0 {
+ panic("sync: inconsistent mutex state")
+ }
+ delta := int32(mutexLocked - 1<>mutexWaiterShift == 1 {
+ // Exit starvation mode.
+ // Critical to do it here and consider wait time.
+ // Starvation mode is so inefficient, that two goroutines
+ // can go lock-step infinitely once they switch mutex
+ // to starvation mode.
+ delta -= mutexStarving
+ }
+ atomic.AddInt32(&m.state, delta)
break
}
- runtime_SemacquireMutex(&m.sema)
awoke = true
iter = 0
+ } else {
+ old = m.state
}
}
@@ -110,22 +181,33 @@ func (m *Mutex) Unlock() {
// Fast path: drop lock bit.
new := atomic.AddInt32(&m.state, -mutexLocked)
if (new+mutexLocked)&mutexLocked == 0 {
- throw("sync: unlock of unlocked mutex")
+ panic("sync: unlock of unlocked mutex")
}
-
- old := new
- for {
- // If there are no waiters or a goroutine has already
- // been woken or grabbed the lock, no need to wake anyone.
- if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken) != 0 {
- return
- }
- // Grab the right to wake someone.
- new = (old - 1<>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken|mutexStarving) != 0 {
+ return
+ }
+ // Grab the right to wake someone.
+ new = (old - 1< 0 {
+ gids := make([]Gid_t, ngroups)
for i, v := range cred.Groups {
- groups[i] = Gid_t(v)
+ gids[i] = Gid_t(v)
}
- err2 := setgroups(ngroups, &groups[0])
+ groups = &gids[0]
+ }
+ if !cred.NoSetGroups {
+ err2 := setgroups(ngroups, groups)
if err2 == nil {
err1 = 0
} else {
err1 = err2.(Errno)
}
- }
- if err1 != 0 {
- goto childerror
+ if err1 != 0 {
+ goto childerror
+ }
}
err2 := Setgid(int(cred.Gid))
if err2 != nil {
@@ -255,17 +255,3 @@ childerror:
raw_exit(253)
}
}
-
-// Try to open a pipe with O_CLOEXEC set on both file descriptors.
-func forkExecPipe(p []int) error {
- err := Pipe(p)
- if err != nil {
- return err
- }
- _, err = fcntl(p[0], F_SETFD, FD_CLOEXEC)
- if err != nil {
- return err
- }
- _, err = fcntl(p[1], F_SETFD, FD_CLOEXEC)
- return err
-}
diff --git a/libgo/go/syscall/exec_freebsd.go b/libgo/go/syscall/exec_freebsd.go
new file mode 100644
index 00000000000..4ed32c0614f
--- /dev/null
+++ b/libgo/go/syscall/exec_freebsd.go
@@ -0,0 +1,25 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+
+func forkExecPipe(p []int) error {
+ err := Pipe2(p, O_CLOEXEC)
+ if err == nil {
+ return nil
+ }
+
+ // FreeBSD 9 fallback.
+ // TODO: remove this for Go 1.10 per Issue 19072
+ err = Pipe(p)
+ if err != nil {
+ return err
+ }
+ _, err = fcntl(p[0], F_SETFD, FD_CLOEXEC)
+ if err != nil {
+ return err
+ }
+ _, err = fcntl(p[1], F_SETFD, FD_CLOEXEC)
+ return err
+}
diff --git a/libgo/go/syscall/exec_linux.go b/libgo/go/syscall/exec_linux.go
index 8d6467a8720..6e2f83e9416 100644
--- a/libgo/go/syscall/exec_linux.go
+++ b/libgo/go/syscall/exec_linux.go
@@ -13,6 +13,12 @@ import (
//sysnb raw_prctl(option int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err Errno)
//prctl(option _C_int, arg2 _C_long, arg3 _C_long, arg4 _C_long, arg5 _C_long) _C_int
+//sysnb rawUnshare(flags int) (err Errno)
+//unshare(flags _C_int) _C_int
+
+//sysnb rawMount(source *byte, target *byte, fstype *byte, flags uintptr, data *byte) (err Errno)
+//mount(source *byte, target *byte, fstype *byte, flags _C_long, data *byte) _C_int
+
// SysProcIDMap holds Container ID to Host ID mappings used for User Namespaces in Linux.
// See user_namespaces(7).
type SysProcIDMap struct {
@@ -42,11 +48,18 @@ type SysProcAttr struct {
// This parameter is no-op if GidMappings == nil. Otherwise for unprivileged
// users this should be set to false for mappings work.
GidMappingsEnableSetgroups bool
+ AmbientCaps []uintptr // Ambient capabilities (Linux only)
}
+var (
+ none = [...]byte{'n', 'o', 'n', 'e', 0}
+ slash = [...]byte{'/', 0}
+)
+
// Implemented in runtime package.
func runtime_BeforeFork()
func runtime_AfterFork()
+func runtime_AfterForkInChild()
// Implemented in clone_linux.c
func rawClone(flags _C_ulong, child_stack *byte, ptid *Pid_t, ctid *Pid_t, regs unsafe.Pointer) _C_long
@@ -62,16 +75,62 @@ func rawClone(flags _C_ulong, child_stack *byte, ptid *Pid_t, ctid *Pid_t, regs
// functions that do not grow the stack.
//go:norace
func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid int, err Errno) {
+ // Set up and fork. This returns immediately in the parent or
+ // if there's an error.
+ r1, err1, p, locked := forkAndExecInChild1(argv0, argv, envv, chroot, dir, attr, sys, pipe)
+ if locked {
+ runtime_AfterFork()
+ }
+ if err1 != 0 {
+ return 0, err1
+ }
+
+ // parent; return PID
+ pid = int(r1)
+
+ if sys.UidMappings != nil || sys.GidMappings != nil {
+ Close(p[0])
+ err := writeUidGidMappings(pid, sys)
+ var err2 Errno
+ if err != nil {
+ err2 = err.(Errno)
+ }
+ RawSyscall(SYS_WRITE, uintptr(p[1]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
+ Close(p[1])
+ }
+
+ return pid, 0
+}
+
+// forkAndExecInChild1 implements the body of forkAndExecInChild up to
+// the parent's post-fork path. This is a separate function so we can
+// separate the child's and parent's stack frames if we're using
+// vfork.
+//
+// This is go:noinline because the point is to keep the stack frames
+// of this and forkAndExecInChild separate.
+//
+//go:noinline
+//go:norace
+func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (r1 uintptr, err1 Errno, p [2]int, locked bool) {
+ // Defined in linux/prctl.h starting with Linux 4.3.
+ const (
+ PR_CAP_AMBIENT = 0x2f
+ PR_CAP_AMBIENT_RAISE = 0x2
+ )
+
+ // vfork requires that the child not touch any of the parent's
+ // active stack frames. Hence, the child does all post-fork
+ // processing in this stack frame and never returns, while the
+ // parent returns immediately from this frame and does all
+ // post-fork processing in the outer frame.
// Declare all variables at top in case any
// declarations require heap allocation (e.g., err1).
var (
- r1 uintptr
- r2 _C_long
- err1 Errno
err2 Errno
nextfd int
i int
- p [2]int
+ r2 int
)
// Record parent PID so child can test if it has died.
@@ -94,39 +153,42 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr
// synchronizing writing of User ID/Group ID mappings.
if sys.UidMappings != nil || sys.GidMappings != nil {
if err := forkExecPipe(p[:]); err != nil {
- return 0, err.(Errno)
+ err1 = err.(Errno)
+ return
}
}
// About to call fork.
// No more allocation or calls of non-assembly functions.
runtime_BeforeFork()
- r2 = rawClone(_C_ulong(uintptr(SIGCHLD)|sys.Cloneflags), nil, nil, nil, unsafe.Pointer(nil))
+ locked = true
+ r2 = int(rawClone(_C_ulong(uintptr(SIGCHLD)|sys.Cloneflags), nil, nil, nil, unsafe.Pointer(nil)))
if r2 < 0 {
- runtime_AfterFork()
- return 0, GetErrno()
+ err1 = GetErrno()
}
-
if r2 != 0 {
- // parent; return PID
- runtime_AfterFork()
- pid = int(r2)
-
- if sys.UidMappings != nil || sys.GidMappings != nil {
- Close(p[0])
- err := writeUidGidMappings(pid, sys)
- if err != nil {
- err2 = err.(Errno)
- }
- RawSyscall(SYS_WRITE, uintptr(p[1]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
- Close(p[1])
- }
-
- return pid, 0
+ // If we're in the parent, we must return immediately
+ // so we're not in the same stack frame as the child.
+ // This can at most use the return PC, which the child
+ // will not modify, and the results of
+ // rawVforkSyscall, which must have been written after
+ // the child was replaced.
+ r1 = uintptr(r2)
+ return
}
// Fork succeeded, now in child.
+ runtime_AfterForkInChild()
+
+ // Enable the "keep capabilities" flag to set ambient capabilities later.
+ if len(sys.AmbientCaps) > 0 {
+ _, _, err1 = RawSyscall6(SYS_PRCTL, PR_SET_KEEPCAPS, 1, 0, 0, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
// Wait for User ID/Group ID mappings to be written.
if sys.UidMappings != nil || sys.GidMappings != nil {
if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(p[1]), 0, 0); err1 != 0 {
@@ -184,17 +246,30 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr
}
}
- // Chroot
- if chroot != nil {
- err1 = raw_chroot(chroot)
+ // Unshare
+ if sys.Unshareflags != 0 {
+ err1 = rawUnshare(int(sys.Unshareflags))
if err1 != 0 {
goto childerror
}
+ // The unshare system call in Linux doesn't unshare mount points
+ // mounted with --shared. Systemd mounts / with --shared. For a
+ // long discussion of the pros and cons of this see debian bug 739593.
+ // The Go model of unsharing is more like Plan 9, where you ask
+ // to unshare and the namespaces are unconditionally unshared.
+ // To make this model work we must further mark / as MS_PRIVATE.
+ // This is what the standard unshare command does.
+ if sys.Unshareflags&CLONE_NEWNS == CLONE_NEWNS {
+ err1 = rawMount(&none[0], &slash[0], nil, MS_REC|MS_PRIVATE, nil)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
}
- // Unshare
- if sys.Unshareflags != 0 {
- _, _, err1 = RawSyscall(SYS_UNSHARE, sys.Unshareflags, 0, 0)
+ // Chroot
+ if chroot != nil {
+ err1 = raw_chroot(chroot)
if err1 != 0 {
goto childerror
}
@@ -207,10 +282,7 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr
if ngroups > 0 {
groups = unsafe.Pointer(&cred.Groups[0])
}
- // Don't call setgroups in case of user namespace, gid mappings
- // and disabled setgroups, because otherwise unprivileged user namespace
- // will fail with any non-empty SysProcAttr.Credential.
- if !(sys.GidMappings != nil && !sys.GidMappingsEnableSetgroups && ngroups == 0) {
+ if !(sys.GidMappings != nil && !sys.GidMappingsEnableSetgroups && ngroups == 0) && !cred.NoSetGroups {
err1 = raw_setgroups(ngroups, groups)
if err1 != 0 {
goto childerror
@@ -226,6 +298,13 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr
}
}
+ for _, c := range sys.AmbientCaps {
+ _, _, err1 = RawSyscall6(SYS_PRCTL, PR_CAP_AMBIENT, uintptr(PR_CAP_AMBIENT_RAISE), c, 0, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
// Chdir
if dir != nil {
err1 = raw_chdir(dir)
@@ -321,7 +400,7 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr
// Set the controlling TTY to Ctty
if sys.Setctty {
- _, err1 = raw_ioctl(sys.Ctty, TIOCSCTTY, 0)
+ _, err1 = raw_ioctl(sys.Ctty, TIOCSCTTY, sys.Ctty)
if err1 != 0 {
goto childerror
}
diff --git a/libgo/go/syscall/exec_linux_test.go b/libgo/go/syscall/exec_linux_test.go
index 7a4b5717600..114deec5bbe 100644
--- a/libgo/go/syscall/exec_linux_test.go
+++ b/libgo/go/syscall/exec_linux_test.go
@@ -7,12 +7,20 @@
package syscall_test
import (
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "io"
"io/ioutil"
"os"
"os/exec"
+ "os/user"
+ "path/filepath"
+ "strconv"
"strings"
"syscall"
"testing"
+ "unsafe"
)
// Check if we are in a chroot by checking if the inode of / is
@@ -49,6 +57,14 @@ func checkUserNS(t *testing.T) {
t.Skip("kernel prohibits user namespace in unprivileged process")
}
}
+ // On Centos 7 make sure they set the kernel parameter user_namespace=1
+ // See issue 16283 and 20796.
+ if _, err := os.Stat("/sys/module/user_namespace/parameters/enable"); err == nil {
+ buf, _ := ioutil.ReadFile("/sys/module/user_namespace/parameters/enabled")
+ if !strings.HasPrefix(string(buf), "Y") {
+ t.Skip("kernel doesn't support user namespaces")
+ }
+ }
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
@@ -174,6 +190,12 @@ func TestUnshare(t *testing.T) {
}
out, err := cmd.CombinedOutput()
if err != nil {
+ if strings.Contains(err.Error(), "operation not permitted") {
+ // Issue 17206: despite all the checks above,
+ // this still reportedly fails for some users.
+ // (older kernels?). Just skip.
+ t.Skip("skipping due to permission error")
+ }
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
@@ -205,9 +227,10 @@ func TestGroupCleanup(t *testing.T) {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
strOut := strings.TrimSpace(string(out))
- expected := "uid=0(root) gid=0(root) groups=0(root)"
+ expected := "uid=0(root) gid=0(root)"
// Just check prefix because some distros reportedly output a
// context parameter; see https://golang.org/issue/16224.
+ // Alpine does not output groups; see https://golang.org/issue/19938.
if !strings.HasPrefix(strOut, expected) {
t.Errorf("id command output: %q, expected prefix: %q", strOut, expected)
}
@@ -245,6 +268,7 @@ func TestGroupCleanupUserNamespace(t *testing.T) {
"uid=0(root) gid=0(root) groups=0(root),65534(nobody)",
"uid=0(root) gid=0(root) groups=0(root),65534(nogroup)",
"uid=0(root) gid=0(root) groups=0(root),65534",
+ "uid=0(root) gid=0(root) groups=0(root),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody)", // Alpine; see https://golang.org/issue/19938
}
for _, e := range expected {
if strOut == e {
@@ -253,3 +277,282 @@ func TestGroupCleanupUserNamespace(t *testing.T) {
}
t.Errorf("id command output: %q, expected one of %q", strOut, expected)
}
+
+// TestUnshareHelperProcess isn't a real test. It's used as a helper process
+// for TestUnshareMountNameSpace.
+func TestUnshareMountNameSpaceHelper(*testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+ defer os.Exit(0)
+ if err := syscall.Mount("none", flag.Args()[0], "proc", 0, ""); err != nil {
+ fmt.Fprintf(os.Stderr, "unshare: mount %v failed: %v", os.Args, err)
+ os.Exit(2)
+ }
+}
+
+// Test for Issue 38471: unshare fails because systemd has forced / to be shared
+func TestUnshareMountNameSpace(t *testing.T) {
+ // Make sure we are running as root so we have permissions to use unshare
+ // and create a network namespace.
+ if os.Getuid() != 0 {
+ t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
+ }
+
+ // When running under the Go continuous build, skip tests for
+ // now when under Kubernetes. (where things are root but not quite)
+ // Both of these are our own environment variables.
+ // See Issue 12815.
+ if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
+ t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
+ }
+
+ d, err := ioutil.TempDir("", "unshare")
+ if err != nil {
+ t.Fatalf("tempdir: %v", err)
+ }
+
+ cmd := exec.Command(os.Args[0], "-test.run=TestUnshareMountNameSpaceHelper", d)
+ cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
+ cmd.SysProcAttr = &syscall.SysProcAttr{Unshareflags: syscall.CLONE_NEWNS}
+
+ o, err := cmd.CombinedOutput()
+ if err != nil {
+ if strings.Contains(err.Error(), ": permission denied") {
+ t.Skipf("Skipping test (golang.org/issue/19698); unshare failed due to permissions: %s, %v", o, err)
+ }
+ t.Fatalf("unshare failed: %s, %v", o, err)
+ }
+
+ // How do we tell if the namespace was really unshared? It turns out
+ // to be simple: just try to remove the directory. If it's still mounted
+ // on the rm will fail with EBUSY. Then we have some cleanup to do:
+ // we must unmount it, then try to remove it again.
+
+ if err := os.Remove(d); err != nil {
+ t.Errorf("rmdir failed on %v: %v", d, err)
+ if err := syscall.Unmount(d, syscall.MNT_FORCE); err != nil {
+ t.Errorf("Can't unmount %v: %v", d, err)
+ }
+ if err := os.Remove(d); err != nil {
+ t.Errorf("rmdir after unmount failed on %v: %v", d, err)
+ }
+ }
+}
+
+// Test for Issue 20103: unshare fails when chroot is used
+func TestUnshareMountNameSpaceChroot(t *testing.T) {
+ // Make sure we are running as root so we have permissions to use unshare
+ // and create a network namespace.
+ if os.Getuid() != 0 {
+ t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
+ }
+
+ // When running under the Go continuous build, skip tests for
+ // now when under Kubernetes. (where things are root but not quite)
+ // Both of these are our own environment variables.
+ // See Issue 12815.
+ if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
+ t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
+ }
+
+ d, err := ioutil.TempDir("", "unshare")
+ if err != nil {
+ t.Fatalf("tempdir: %v", err)
+ }
+
+ // Since we are doing a chroot, we need the binary there,
+ // and it must be statically linked.
+ x := filepath.Join(d, "syscall.test")
+ cmd := exec.Command(testenv.GoToolPath(t), "test", "-c", "-o", x, "syscall")
+ cmd.Env = append(os.Environ(), "CGO_ENABLED=0")
+ if o, err := cmd.CombinedOutput(); err != nil {
+ t.Fatalf("Build of syscall in chroot failed, output %v, err %v", o, err)
+ }
+
+ cmd = exec.Command("/syscall.test", "-test.run=TestUnshareMountNameSpaceHelper", "/")
+ cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
+ cmd.SysProcAttr = &syscall.SysProcAttr{Chroot: d, Unshareflags: syscall.CLONE_NEWNS}
+
+ o, err := cmd.CombinedOutput()
+ if err != nil {
+ if strings.Contains(err.Error(), ": permission denied") {
+ t.Skipf("Skipping test (golang.org/issue/19698); unshare failed due to permissions: %s, %v", o, err)
+ }
+ t.Fatalf("unshare failed: %s, %v", o, err)
+ }
+
+ // How do we tell if the namespace was really unshared? It turns out
+ // to be simple: just try to remove the executable. If it's still mounted
+ // on, the rm will fail. Then we have some cleanup to do:
+ // we must force unmount it, then try to remove it again.
+
+ if err := os.Remove(x); err != nil {
+ t.Errorf("rm failed on %v: %v", x, err)
+ if err := syscall.Unmount(d, syscall.MNT_FORCE); err != nil {
+ t.Fatalf("Can't unmount %v: %v", d, err)
+ }
+ if err := os.Remove(x); err != nil {
+ t.Fatalf("rm failed on %v: %v", x, err)
+ }
+ }
+
+ if err := os.Remove(d); err != nil {
+ t.Errorf("rmdir failed on %v: %v", d, err)
+ }
+}
+
+type capHeader struct {
+ version uint32
+ pid int
+}
+
+type capData struct {
+ effective uint32
+ permitted uint32
+ inheritable uint32
+}
+
+const CAP_SYS_TIME = 25
+
+type caps struct {
+ hdr capHeader
+ data [2]capData
+}
+
+func getCaps() (caps, error) {
+ var c caps
+
+ // Get capability version
+ if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(nil)), 0); errno != 0 {
+ return c, fmt.Errorf("SYS_CAPGET: %v", errno)
+ }
+
+ // Get current capabilities
+ if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(&c.data[0])), 0); errno != 0 {
+ return c, fmt.Errorf("SYS_CAPGET: %v", errno)
+ }
+
+ return c, nil
+}
+
+func mustSupportAmbientCaps(t *testing.T) {
+ var uname syscall.Utsname
+ if err := syscall.Uname(&uname); err != nil {
+ t.Fatalf("Uname: %v", err)
+ }
+ var buf [65]byte
+ for i, b := range uname.Release {
+ buf[i] = byte(b)
+ }
+ ver := string(buf[:])
+ if i := strings.Index(ver, "\x00"); i != -1 {
+ ver = ver[:i]
+ }
+ if strings.HasPrefix(ver, "2.") ||
+ strings.HasPrefix(ver, "3.") ||
+ strings.HasPrefix(ver, "4.1.") ||
+ strings.HasPrefix(ver, "4.2.") {
+ t.Skipf("kernel version %q predates required 4.3; skipping test", ver)
+ }
+}
+
+// TestAmbientCapsHelper isn't a real test. It's used as a helper process for
+// TestAmbientCaps.
+func TestAmbientCapsHelper(*testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+ defer os.Exit(0)
+
+ caps, err := getCaps()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+ if caps.data[0].effective&(1< ctx.maxLen {
@@ -398,6 +411,7 @@ func runBenchmarks(matchString func(pat, str string) (bool, error), benchmarks [
w: os.Stdout,
chatty: *chatty,
},
+ importPath: importPath,
benchFunc: func(b *B) {
for _, Benchmark := range bs {
b.Run(Benchmark.Name, Benchmark.F)
@@ -462,7 +476,7 @@ func (ctx *benchContext) processBench(b *B) {
// least once will not be measured itself and will be called once with N=1.
//
// Run may be called simultaneously from multiple goroutines, but all such
-// calls must happen before the outer benchmark function for b returns.
+// calls must return before the outer benchmark function for b returns.
func (b *B) Run(name string, f func(b *B)) bool {
// Since b has subbenchmarks, we will no longer run it as a benchmark itself.
// Release the lock and acquire it on exit to ensure locks stay paired.
@@ -470,9 +484,9 @@ func (b *B) Run(name string, f func(b *B)) bool {
benchmarkLock.Unlock()
defer benchmarkLock.Lock()
- benchName, ok := b.name, true
+ benchName, ok, partial := b.name, true, false
if b.context != nil {
- benchName, ok = b.context.match.fullName(&b.common, name)
+ benchName, ok, partial = b.context.match.fullName(&b.common, name)
}
if !ok {
return true
@@ -486,9 +500,15 @@ func (b *B) Run(name string, f func(b *B)) bool {
w: b.w,
chatty: b.chatty,
},
- benchFunc: f,
- benchTime: b.benchTime,
- context: b.context,
+ importPath: b.importPath,
+ benchFunc: f,
+ benchTime: b.benchTime,
+ context: b.context,
+ }
+ if partial {
+ // Partial name match, like -bench=X/Y matching BenchmarkX.
+ // Only process sub-benchmarks, if any.
+ atomic.StoreInt32(&sub.hasSub, 1)
}
if sub.run1() {
sub.run()
@@ -634,10 +654,10 @@ func Benchmark(f func(b *B)) BenchmarkResult {
benchFunc: f,
benchTime: *benchTime,
}
- if !b.run1() {
- return BenchmarkResult{}
+ if b.run1() {
+ b.run()
}
- return b.run()
+ return b.result
}
type discard struct{}
diff --git a/libgo/go/testing/helper_test.go b/libgo/go/testing/helper_test.go
new file mode 100644
index 00000000000..f5cb27c317b
--- /dev/null
+++ b/libgo/go/testing/helper_test.go
@@ -0,0 +1,70 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import (
+ "bytes"
+ "regexp"
+ "strings"
+)
+
+func TestTBHelper(t *T) {
+ var buf bytes.Buffer
+ ctx := newTestContext(1, newMatcher(regexp.MatchString, "", ""))
+ t1 := &T{
+ common: common{
+ signal: make(chan bool),
+ w: &buf,
+ },
+ context: ctx,
+ }
+ t1.Run("Test", testHelper)
+
+ want := `--- FAIL: Test (?s)
+helperfuncs_test.go:12: 0
+helperfuncs_test.go:33: 1
+helperfuncs_test.go:21: 2
+helperfuncs_test.go:35: 3
+helperfuncs_test.go:42: 4
+helperfuncs_test.go:47: 5
+--- FAIL: Test/sub (?s)
+helperfuncs_test.go:50: 6
+helperfuncs_test.go:21: 7
+helperfuncs_test.go:53: 8
+`
+ lines := strings.Split(buf.String(), "\n")
+ durationRE := regexp.MustCompile(`\(.*\)$`)
+ for i, line := range lines {
+ line = strings.TrimSpace(line)
+ line = durationRE.ReplaceAllString(line, "(?s)")
+ lines[i] = line
+ }
+ got := strings.Join(lines, "\n")
+ if got != want {
+ t.Errorf("got output:\n\n%s\nwant:\n\n%s", got, want)
+ }
+}
+
+func TestTBHelperParallel(t *T) {
+ var buf bytes.Buffer
+ ctx := newTestContext(1, newMatcher(regexp.MatchString, "", ""))
+ t1 := &T{
+ common: common{
+ signal: make(chan bool),
+ w: &buf,
+ },
+ context: ctx,
+ }
+ t1.Run("Test", parallelTestHelper)
+
+ lines := strings.Split(strings.TrimSpace(buf.String()), "\n")
+ if len(lines) != 6 {
+ t.Fatalf("parallelTestHelper gave %d lines of output; want 6", len(lines))
+ }
+ want := "helperfuncs_test.go:21: parallel"
+ if got := strings.TrimSpace(lines[1]); got != want {
+ t.Errorf("got output line %q; want %q", got, want)
+ }
+}
diff --git a/libgo/go/testing/helperfuncs_test.go b/libgo/go/testing/helperfuncs_test.go
new file mode 100644
index 00000000000..7cb2e2cc56d
--- /dev/null
+++ b/libgo/go/testing/helperfuncs_test.go
@@ -0,0 +1,67 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testing
+
+import "sync"
+
+// The line numbering of this file is important for TestTBHelper.
+
+func notHelper(t *T, msg string) {
+ t.Error(msg)
+}
+
+func helper(t *T, msg string) {
+ t.Helper()
+ t.Error(msg)
+}
+
+func notHelperCallingHelper(t *T, msg string) {
+ helper(t, msg)
+}
+
+func helperCallingHelper(t *T, msg string) {
+ t.Helper()
+ helper(t, msg)
+}
+
+func testHelper(t *T) {
+ // Check combinations of directly and indirectly
+ // calling helper functions.
+ notHelper(t, "0")
+ helper(t, "1")
+ notHelperCallingHelper(t, "2")
+ helperCallingHelper(t, "3")
+
+ // Check a function literal closing over t that uses Helper.
+ fn := func(msg string) {
+ t.Helper()
+ t.Error(msg)
+ }
+ fn("4")
+
+ // Check that calling Helper from inside this test entry function
+ // doesn't have an effect.
+ t.Helper()
+ t.Error("5")
+
+ t.Run("sub", func(t *T) {
+ helper(t, "6")
+ notHelperCallingHelper(t, "7")
+ t.Helper()
+ t.Error("8")
+ })
+}
+
+func parallelTestHelper(t *T) {
+ var wg sync.WaitGroup
+ for i := 0; i < 5; i++ {
+ wg.Add(1)
+ go func() {
+ notHelperCallingHelper(t, "parallel")
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
diff --git a/libgo/go/testing/internal/testdeps/deps.go b/libgo/go/testing/internal/testdeps/deps.go
index b08300b5d60..042f69614ee 100644
--- a/libgo/go/testing/internal/testdeps/deps.go
+++ b/libgo/go/testing/internal/testdeps/deps.go
@@ -49,3 +49,10 @@ func (TestDeps) WriteHeapProfile(w io.Writer) error {
func (TestDeps) WriteProfileTo(name string, w io.Writer, debug int) error {
return pprof.Lookup(name).WriteTo(w, debug)
}
+
+// ImportPath is the import path of the testing binary, set by the generated main function.
+var ImportPath string
+
+func (TestDeps) ImportPath() string {
+ return ImportPath
+}
diff --git a/libgo/go/testing/match.go b/libgo/go/testing/match.go
index 77510357602..89e30d01a75 100644
--- a/libgo/go/testing/match.go
+++ b/libgo/go/testing/match.go
@@ -47,7 +47,7 @@ func newMatcher(matchString func(pat, str string) (bool, error), patterns, name
}
}
-func (m *matcher) fullName(c *common, subname string) (name string, ok bool) {
+func (m *matcher) fullName(c *common, subname string) (name string, ok, partial bool) {
name = subname
m.mu.Lock()
@@ -62,15 +62,16 @@ func (m *matcher) fullName(c *common, subname string) (name string, ok bool) {
// We check the full array of paths each time to allow for the case that
// a pattern contains a '/'.
- for i, s := range strings.Split(name, "/") {
+ elem := strings.Split(name, "/")
+ for i, s := range elem {
if i >= len(m.filter) {
break
}
if ok, _ := m.matchFunc(m.filter[i], s); !ok {
- return name, false
+ return name, false, false
}
}
- return name, true
+ return name, true, len(elem) < len(m.filter)
}
func splitRegexp(s string) []string {
diff --git a/libgo/go/testing/match_test.go b/libgo/go/testing/match_test.go
index 8c1c5f4452c..8c09dc660fb 100644
--- a/libgo/go/testing/match_test.go
+++ b/libgo/go/testing/match_test.go
@@ -88,43 +88,44 @@ func TestMatcher(t *T) {
pattern string
parent, sub string
ok bool
+ partial bool
}{
// Behavior without subtests.
- {"", "", "TestFoo", true},
- {"TestFoo", "", "TestFoo", true},
- {"TestFoo/", "", "TestFoo", true},
- {"TestFoo/bar/baz", "", "TestFoo", true},
- {"TestFoo", "", "TestBar", false},
- {"TestFoo/", "", "TestBar", false},
- {"TestFoo/bar/baz", "", "TestBar/bar/baz", false},
+ {"", "", "TestFoo", true, false},
+ {"TestFoo", "", "TestFoo", true, false},
+ {"TestFoo/", "", "TestFoo", true, true},
+ {"TestFoo/bar/baz", "", "TestFoo", true, true},
+ {"TestFoo", "", "TestBar", false, false},
+ {"TestFoo/", "", "TestBar", false, false},
+ {"TestFoo/bar/baz", "", "TestBar/bar/baz", false, false},
// with subtests
- {"", "TestFoo", "x", true},
- {"TestFoo", "TestFoo", "x", true},
- {"TestFoo/", "TestFoo", "x", true},
- {"TestFoo/bar/baz", "TestFoo", "bar", true},
+ {"", "TestFoo", "x", true, false},
+ {"TestFoo", "TestFoo", "x", true, false},
+ {"TestFoo/", "TestFoo", "x", true, false},
+ {"TestFoo/bar/baz", "TestFoo", "bar", true, true},
// Subtest with a '/' in its name still allows for copy and pasted names
// to match.
- {"TestFoo/bar/baz", "TestFoo", "bar/baz", true},
- {"TestFoo/bar/baz", "TestFoo/bar", "baz", true},
- {"TestFoo/bar/baz", "TestFoo", "x", false},
- {"TestFoo", "TestBar", "x", false},
- {"TestFoo/", "TestBar", "x", false},
- {"TestFoo/bar/baz", "TestBar", "x/bar/baz", false},
+ {"TestFoo/bar/baz", "TestFoo", "bar/baz", true, false},
+ {"TestFoo/bar/baz", "TestFoo/bar", "baz", true, false},
+ {"TestFoo/bar/baz", "TestFoo", "x", false, false},
+ {"TestFoo", "TestBar", "x", false, false},
+ {"TestFoo/", "TestBar", "x", false, false},
+ {"TestFoo/bar/baz", "TestBar", "x/bar/baz", false, false},
// subtests only
- {"", "TestFoo", "x", true},
- {"/", "TestFoo", "x", true},
- {"./", "TestFoo", "x", true},
- {"./.", "TestFoo", "x", true},
- {"/bar/baz", "TestFoo", "bar", true},
- {"/bar/baz", "TestFoo", "bar/baz", true},
- {"//baz", "TestFoo", "bar/baz", true},
- {"//", "TestFoo", "bar/baz", true},
- {"/bar/baz", "TestFoo/bar", "baz", true},
- {"//foo", "TestFoo", "bar/baz", false},
- {"/bar/baz", "TestFoo", "x", false},
- {"/bar/baz", "TestBar", "x/bar/baz", false},
+ {"", "TestFoo", "x", true, false},
+ {"/", "TestFoo", "x", true, false},
+ {"./", "TestFoo", "x", true, false},
+ {"./.", "TestFoo", "x", true, false},
+ {"/bar/baz", "TestFoo", "bar", true, true},
+ {"/bar/baz", "TestFoo", "bar/baz", true, false},
+ {"//baz", "TestFoo", "bar/baz", true, false},
+ {"//", "TestFoo", "bar/baz", true, false},
+ {"/bar/baz", "TestFoo/bar", "baz", true, false},
+ {"//foo", "TestFoo", "bar/baz", false, false},
+ {"/bar/baz", "TestFoo", "x", false, false},
+ {"/bar/baz", "TestBar", "x/bar/baz", false, false},
}
for _, tc := range testCases {
@@ -134,9 +135,9 @@ func TestMatcher(t *T) {
if tc.parent != "" {
parent.level = 1
}
- if n, ok := m.fullName(parent, tc.sub); ok != tc.ok {
- t.Errorf("for pattern %q, fullName(parent=%q, sub=%q) = %q, ok %v; want ok %v",
- tc.pattern, tc.parent, tc.sub, n, ok, tc.ok)
+ if n, ok, partial := m.fullName(parent, tc.sub); ok != tc.ok || partial != tc.partial {
+ t.Errorf("for pattern %q, fullName(parent=%q, sub=%q) = %q, ok %v partial %v; want ok %v partial %v",
+ tc.pattern, tc.parent, tc.sub, n, ok, partial, tc.ok, tc.partial)
}
}
}
@@ -178,7 +179,7 @@ func TestNaming(t *T) {
}
for i, tc := range testCases {
- if got, _ := m.fullName(parent, tc.name); got != tc.want {
+ if got, _, _ := m.fullName(parent, tc.name); got != tc.want {
t.Errorf("%d:%s: got %q; want %q", i, tc.name, got, tc.want)
}
}
diff --git a/libgo/go/testing/quick/quick.go b/libgo/go/testing/quick/quick.go
index 95860fda0fb..0457fc7571b 100644
--- a/libgo/go/testing/quick/quick.go
+++ b/libgo/go/testing/quick/quick.go
@@ -14,6 +14,7 @@ import (
"math/rand"
"reflect"
"strings"
+ "time"
)
var defaultMaxCount *int = flag.Int("quickchecks", 100, "The default number of iterations for each check")
@@ -43,8 +44,10 @@ func randFloat64(rand *rand.Rand) float64 {
return f
}
-// randInt64 returns a random integer taking half the range of an int64.
-func randInt64(rand *rand.Rand) int64 { return rand.Int63() - 1<<62 }
+// randInt64 returns a random int64.
+func randInt64(rand *rand.Rand) int64 {
+ return int64(rand.Uint64())
+}
// complexSize is the maximum length of arbitrary values that contain other
// values.
@@ -172,19 +175,20 @@ func sizedValue(t reflect.Type, rand *rand.Rand, size int) (value reflect.Value,
// A Config structure contains options for running a test.
type Config struct {
- // MaxCount sets the maximum number of iterations. If zero,
- // MaxCountScale is used.
+ // MaxCount sets the maximum number of iterations.
+ // If zero, MaxCountScale is used.
MaxCount int
- // MaxCountScale is a non-negative scale factor applied to the default
- // maximum. If zero, the default is unchanged.
+ // MaxCountScale is a non-negative scale factor applied to the
+ // default maximum.
+ // If zero, the default is unchanged.
MaxCountScale float64
- // If non-nil, rand is a source of random numbers. Otherwise a default
- // pseudo-random source will be used.
+ // Rand specifies a source of random numbers.
+ // If nil, a default pseudo-random source will be used.
Rand *rand.Rand
- // If non-nil, the Values function generates a slice of arbitrary
- // reflect.Values that are congruent with the arguments to the function
- // being tested. Otherwise, the top-level Value function is used
- // to generate them.
+ // Values specifies a function to generate a slice of
+ // arbitrary reflect.Values that are congruent with the
+ // arguments to the function being tested.
+ // If nil, the top-level Value function is used to generate them.
Values func([]reflect.Value, *rand.Rand)
}
@@ -193,7 +197,7 @@ var defaultConfig Config
// getRand returns the *rand.Rand to use for a given Config.
func (c *Config) getRand() *rand.Rand {
if c.Rand == nil {
- return rand.New(rand.NewSource(0))
+ return rand.New(rand.NewSource(time.Now().UnixNano()))
}
return c.Rand
}
diff --git a/libgo/go/testing/quick/quick_test.go b/libgo/go/testing/quick/quick_test.go
index fe443592f87..4246cd1d3ba 100644
--- a/libgo/go/testing/quick/quick_test.go
+++ b/libgo/go/testing/quick/quick_test.go
@@ -307,3 +307,21 @@ func TestNonZeroSliceAndMap(t *testing.T) {
t.Fatal(err)
}
}
+
+func TestInt64(t *testing.T) {
+ var lo, hi int64
+ f := func(x int64) bool {
+ if x < lo {
+ lo = x
+ }
+ if x > hi {
+ hi = x
+ }
+ return true
+ }
+ cfg := &Config{MaxCount: 100000}
+ Check(f, cfg)
+ if uint64(lo)>>62 == 0 || uint64(hi)>>62 == 0 {
+ t.Errorf("int64 returned range %#016x,%#016x; does not look like full range", lo, hi)
+ }
+}
diff --git a/libgo/go/testing/sub_test.go b/libgo/go/testing/sub_test.go
index bb7b3e09255..acf5dea8785 100644
--- a/libgo/go/testing/sub_test.go
+++ b/libgo/go/testing/sub_test.go
@@ -8,11 +8,18 @@ import (
"bytes"
"fmt"
"regexp"
+ "runtime"
"strings"
+ "sync"
"sync/atomic"
"time"
)
+func init() {
+ // Make benchmark tests run 10* faster.
+ *benchTime = 100 * time.Millisecond
+}
+
func TestTestContext(t *T) {
const (
add1 = 0
@@ -455,8 +462,14 @@ func TestBRun(t *T) {
_ = append([]byte(nil), buf[:]...)
}
}
- b.Run("", func(b *B) { alloc(b) })
- b.Run("", func(b *B) { alloc(b) })
+ b.Run("", func(b *B) {
+ alloc(b)
+ b.ReportAllocs()
+ })
+ b.Run("", func(b *B) {
+ alloc(b)
+ b.ReportAllocs()
+ })
// runtime.MemStats sometimes reports more allocations than the
// benchmark is responsible for. Luckily the point of this test is
// to ensure that the results are not underreported, so we can
@@ -517,6 +530,26 @@ func TestBenchmarkOutput(t *T) {
Benchmark(func(b *B) {})
}
+func TestBenchmarkStartsFrom1(t *T) {
+ var first = true
+ Benchmark(func(b *B) {
+ if first && b.N != 1 {
+ panic(fmt.Sprintf("Benchmark() first N=%v; want 1", b.N))
+ }
+ first = false
+ })
+}
+
+func TestBenchmarkReadMemStatsBeforeFirstRun(t *T) {
+ var first = true
+ Benchmark(func(b *B) {
+ if first && (b.startAllocs == 0 || b.startBytes == 0) {
+ panic(fmt.Sprintf("ReadMemStats not called before first run"))
+ }
+ first = false
+ })
+}
+
func TestParallelSub(t *T) {
c := make(chan int)
block := make(chan int)
@@ -532,3 +565,59 @@ func TestParallelSub(t *T) {
<-c
}
}
+
+type funcWriter func([]byte) (int, error)
+
+func (fw funcWriter) Write(b []byte) (int, error) { return fw(b) }
+
+func TestRacyOutput(t *T) {
+ var runs int32 // The number of running Writes
+ var races int32 // Incremented for each race detected
+ raceDetector := func(b []byte) (int, error) {
+ // Check if some other goroutine is concurrently calling Write.
+ if atomic.LoadInt32(&runs) > 0 {
+ atomic.AddInt32(&races, 1) // Race detected!
+ }
+ atomic.AddInt32(&runs, 1)
+ defer atomic.AddInt32(&runs, -1)
+ runtime.Gosched() // Increase probability of a race
+ return len(b), nil
+ }
+
+ var wg sync.WaitGroup
+ root := &T{
+ common: common{w: funcWriter(raceDetector), chatty: true},
+ context: newTestContext(1, newMatcher(regexp.MatchString, "", "")),
+ }
+ root.Run("", func(t *T) {
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ t.Run(fmt.Sprint(i), func(t *T) {
+ t.Logf("testing run %d", i)
+ })
+ }(i)
+ }
+ })
+ wg.Wait()
+
+ if races > 0 {
+ t.Errorf("detected %d racy Writes", races)
+ }
+}
+
+func TestBenchmark(t *T) {
+ res := Benchmark(func(b *B) {
+ for i := 0; i < 5; i++ {
+ b.Run("", func(b *B) {
+ for i := 0; i < b.N; i++ {
+ time.Sleep(time.Millisecond)
+ }
+ })
+ }
+ })
+ if res.NsPerOp() < 4000000 {
+ t.Errorf("want >5ms; got %v", time.Duration(res.NsPerOp()))
+ }
+}
diff --git a/libgo/go/testing/testing.go b/libgo/go/testing/testing.go
index b002aa0bf10..a62974211fb 100644
--- a/libgo/go/testing/testing.go
+++ b/libgo/go/testing/testing.go
@@ -83,16 +83,30 @@
// ignores leading and trailing space.) These are examples of an example:
//
// func ExampleHello() {
-// fmt.Println("hello")
-// // Output: hello
+// fmt.Println("hello")
+// // Output: hello
// }
//
// func ExampleSalutations() {
-// fmt.Println("hello, and")
-// fmt.Println("goodbye")
-// // Output:
-// // hello, and
-// // goodbye
+// fmt.Println("hello, and")
+// fmt.Println("goodbye")
+// // Output:
+// // hello, and
+// // goodbye
+// }
+//
+// The comment prefix "Unordered output:" is like "Output:", but matches any
+// line order:
+//
+// func ExamplePerm() {
+// for _, value := range Perm(4) {
+// fmt.Println(value)
+// }
+// // Unordered output: 4
+// // 2
+// // 1
+// // 3
+// // 0
// }
//
// Example functions without output comments are compiled but not executed.
@@ -238,6 +252,7 @@ var (
chatty = flag.Bool("test.v", false, "verbose: print additional output")
count = flag.Uint("test.count", 1, "run tests and benchmarks `n` times")
coverProfile = flag.String("test.coverprofile", "", "write a coverage profile to `file`")
+ matchList = flag.String("test.list", "", "list tests, examples, and benchmarch maching `regexp` then exit")
match = flag.String("test.run", "", "run only tests and examples matching `regexp`")
memProfile = flag.String("test.memprofile", "", "write a memory profile to `file`")
memProfileRate = flag.Int("test.memprofilerate", 0, "set memory profiling `rate` (see runtime.MemProfileRate)")
@@ -247,7 +262,7 @@ var (
mutexProfile = flag.String("test.mutexprofile", "", "write a mutex contention profile to the named file after execution")
mutexProfileFraction = flag.Int("test.mutexprofilefraction", 1, "if >= 0, calls runtime.SetMutexProfileFraction()")
traceFile = flag.String("test.trace", "", "write an execution trace to `file`")
- timeout = flag.Duration("test.timeout", 0, "fail test binary execution after duration `d` (0 means unlimited)")
+ timeout = flag.Duration("test.timeout", 0, "panic test binary after duration `d` (0 means unlimited)")
cpuListStr = flag.String("test.cpu", "", "comma-separated `list` of cpu counts to run each test with")
parallel = flag.Int("test.parallel", runtime.GOMAXPROCS(0), "run at most `n` tests in parallel")
@@ -259,17 +274,20 @@ var (
// common holds the elements common between T and B and
// captures common methods such as Errorf.
type common struct {
- mu sync.RWMutex // guards output, failed, and done.
- output []byte // Output generated by test or benchmark.
- w io.Writer // For flushToParent.
- chatty bool // A copy of the chatty flag.
- ran bool // Test or benchmark (or one of its subtests) was executed.
- failed bool // Test or benchmark has failed.
- skipped bool // Test of benchmark has been skipped.
- finished bool // Test function has completed.
- done bool // Test is finished and all subtests have completed.
- hasSub int32 // written atomically
- raceErrors int // number of races detected during test
+ mu sync.RWMutex // guards this group of fields
+ output []byte // Output generated by test or benchmark.
+ w io.Writer // For flushToParent.
+ ran bool // Test or benchmark (or one of its subtests) was executed.
+ failed bool // Test or benchmark has failed.
+ skipped bool // Test of benchmark has been skipped.
+ done bool // Test is finished and all subtests have completed.
+ helpers map[string]struct{} // functions to be skipped when writing file/line info
+
+ chatty bool // A copy of the chatty flag.
+ finished bool // Test function has completed.
+ hasSub int32 // written atomically
+ raceErrors int // number of races detected during test
+ runner string // function name of tRunner running the test
parent *common
level int // Nesting depth of test or benchmark.
@@ -298,10 +316,48 @@ func Verbose() bool {
return *chatty
}
+// frameSkip searches, starting after skip frames, for the first caller frame
+// in a function not marked as a helper and returns the frames to skip
+// to reach that site. The search stops if it finds a tRunner function that
+// was the entry point into the test.
+// This function must be called with c.mu held.
+func (c *common) frameSkip(skip int) int {
+ if c.helpers == nil {
+ return skip
+ }
+ var pc [50]uintptr
+ // Skip two extra frames to account for this function
+ // and runtime.Callers itself.
+ n := runtime.Callers(skip+2, pc[:])
+ if n == 0 {
+ panic("testing: zero callers found")
+ }
+ frames := runtime.CallersFrames(pc[:n])
+ var frame runtime.Frame
+ more := true
+ for i := 0; more; i++ {
+ frame, more = frames.Next()
+ if frame.Function == c.runner {
+ // We've gone up all the way to the tRunner calling
+ // the test function (so the user must have
+ // called tb.Helper from inside that test function).
+ // Only skip up to the test function itself.
+ return skip + i - 1
+ }
+ if _, ok := c.helpers[frame.Function]; !ok {
+ // Found a frame that wasn't inside a helper function.
+ return skip + i
+ }
+ }
+ return skip
+}
+
// decorate prefixes the string with the file and line of the call site
// and inserts the final newline if needed and indentation tabs for formatting.
-func decorate(s string) string {
- _, file, line, ok := runtime.Caller(3) // decorate + log + public function.
+// This function must be called with c.mu held.
+func (c *common) decorate(s string) string {
+ skip := c.frameSkip(3) // decorate + log + public function.
+ _, file, line, ok := runtime.Caller(skip)
if ok {
// Truncate file name at last file name separator.
if index := strings.LastIndex(file, "/"); index >= 0 {
@@ -391,6 +447,7 @@ type TB interface {
SkipNow()
Skipf(format string, args ...interface{})
Skipped() bool
+ Helper()
// A private method to prevent users implementing the
// interface and so future additions to it will not
@@ -450,8 +507,9 @@ func (c *common) Fail() {
// Failed reports whether the function has failed.
func (c *common) Failed() bool {
c.mu.RLock()
- defer c.mu.RUnlock()
- return c.failed
+ failed := c.failed
+ c.mu.RUnlock()
+ return failed || c.raceErrors+race.Errors() > 0
}
// FailNow marks the function as having failed and stops its execution.
@@ -490,7 +548,7 @@ func (c *common) FailNow() {
func (c *common) log(s string) {
c.mu.Lock()
defer c.mu.Unlock()
- c.output = append(c.output, decorate(s)...)
+ c.output = append(c.output, c.decorate(s)...)
}
// Log formats its arguments using default formatting, analogous to Println,
@@ -568,8 +626,38 @@ func (c *common) Skipped() bool {
return c.skipped
}
+// Helper marks the calling function as a test helper function.
+// When printing file and line information, that function will be skipped.
+// Helper may be called simultaneously from multiple goroutines.
+// Helper has no effect if it is called directly from a TestXxx/BenchmarkXxx
+// function or a subtest/sub-benchmark function.
+func (c *common) Helper() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.helpers == nil {
+ c.helpers = make(map[string]struct{})
+ }
+ c.helpers[callerName(1)] = struct{}{}
+}
+
+// callerName gives the function name (qualified with a package path)
+// for the caller after skip frames (where 0 means the current function).
+func callerName(skip int) string {
+ // Make room for the skip PC.
+ var pc [2]uintptr
+ n := runtime.Callers(skip+2, pc[:]) // skip + runtime.Callers + callerName
+ if n == 0 {
+ panic("testing: zero callers found")
+ }
+ frames := runtime.CallersFrames(pc[:n])
+ frame, _ := frames.Next()
+ return frame.Function
+}
+
// Parallel signals that this test is to be run in parallel with (and only with)
-// other parallel tests.
+// other parallel tests. When a test is run multiple times due to use of
+// -test.count or -test.cpu, multiple instances of a single test never run in
+// parallel with each other.
func (t *T) Parallel() {
if t.isParallel {
panic("testing: t.Parallel called multiple times")
@@ -600,13 +688,14 @@ type InternalTest struct {
}
func tRunner(t *T, fn func(t *T)) {
+ t.runner = callerName(0)
+
// When this goroutine is done, either because fn(t)
// returned normally or because a test failure triggered
// a call to runtime.Goexit, record the duration and send
// a signal saying that the test is done.
defer func() {
- t.raceErrors += race.Errors()
- if t.raceErrors > 0 {
+ if t.raceErrors+race.Errors() > 0 {
t.Errorf("race detected during execution of test")
}
@@ -658,14 +747,15 @@ func tRunner(t *T, fn func(t *T)) {
t.finished = true
}
-// Run runs f as a subtest of t called name. It reports whether f succeeded.
-// Run will block until all its parallel subtests have completed.
+// Run runs f as a subtest of t called name. It reports whether f succeeded. Run
+// runs f in a separate goroutine and will block until all its parallel subtests
+// have completed.
//
-// Run may be called simultaneously from multiple goroutines, but all such
-// calls must happen before the outer test function for t returns.
+// Run may be called simultaneously from multiple goroutines, but all such calls
+// must return before the outer test function for t returns.
func (t *T) Run(name string, f func(t *T)) bool {
atomic.StoreInt32(&t.hasSub, 1)
- testName, ok := t.context.match.fullName(&t.common, name)
+ testName, ok, _ := t.context.match.fullName(&t.common, name)
if !ok {
return true
}
@@ -687,7 +777,9 @@ func (t *T) Run(name string, f func(t *T)) bool {
root := t.parent
for ; root.parent != nil; root = root.parent {
}
+ root.mu.Lock()
fmt.Fprintf(root.w, "=== RUN %s\n", t.name)
+ root.mu.Unlock()
}
// Instead of reducing the running count of this test before calling the
// tRunner and increasing it afterwards, we rely on tRunner keeping the
@@ -764,6 +856,7 @@ func (f matchStringOnly) StartCPUProfile(w io.Writer) error { return e
func (f matchStringOnly) StopCPUProfile() {}
func (f matchStringOnly) WriteHeapProfile(w io.Writer) error { return errMain }
func (f matchStringOnly) WriteProfileTo(string, io.Writer, int) error { return errMain }
+func (f matchStringOnly) ImportPath() string { return "" }
// Main is an internal function, part of the implementation of the "go test" command.
// It was exported because it is cross-package and predates "internal" packages.
@@ -793,6 +886,7 @@ type testDeps interface {
StopCPUProfile()
WriteHeapProfile(io.Writer) error
WriteProfileTo(string, io.Writer, int) error
+ ImportPath() string
}
// MainStart is meant for use by tests generated by 'go test'.
@@ -814,6 +908,11 @@ func (m *M) Run() int {
flag.Parse()
}
+ if len(*matchList) != 0 {
+ listTests(m.deps.MatchString, m.tests, m.benchmarks, m.examples)
+ return 0
+ }
+
parseCpuList()
m.before()
@@ -825,7 +924,7 @@ func (m *M) Run() int {
if !testRan && !exampleRan && *matchBenchmarks == "" {
fmt.Fprintln(os.Stderr, "testing: warning: no tests to run")
}
- if !testOk || !exampleOk || !runBenchmarks(m.deps.MatchString, m.benchmarks) || race.Errors() > 0 {
+ if !testOk || !exampleOk || !runBenchmarks(m.deps.ImportPath(), m.deps.MatchString, m.benchmarks) || race.Errors() > 0 {
fmt.Println("FAIL")
m.after()
return 1
@@ -853,6 +952,29 @@ func (t *T) report() {
}
}
+func listTests(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {
+ if _, err := matchString(*matchList, "non-empty"); err != nil {
+ fmt.Fprintf(os.Stderr, "testing: invalid regexp in -test.list (%q): %s\n", *matchList, err)
+ os.Exit(1)
+ }
+
+ for _, test := range tests {
+ if ok, _ := matchString(*matchList, test.Name); ok {
+ fmt.Println(test.Name)
+ }
+ }
+ for _, bench := range benchmarks {
+ if ok, _ := matchString(*matchList, bench.Name); ok {
+ fmt.Println(bench.Name)
+ }
+ }
+ for _, example := range examples {
+ if ok, _ := matchString(*matchList, example.Name); ok {
+ fmt.Println(example.Name)
+ }
+ }
+}
+
// An internal function but exported because it is cross-package; part of the implementation
// of the "go test" command.
func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {
diff --git a/libgo/go/text/scanner/example_test.go b/libgo/go/text/scanner/example_test.go
index f48c31daa0a..9e2d5b7c733 100644
--- a/libgo/go/text/scanner/example_test.go
+++ b/libgo/go/text/scanner/example_test.go
@@ -14,28 +14,25 @@ import (
func Example() {
const src = `
- // This is scanned code.
- if a > 10 {
- someParsable = text
- }`
+// This is scanned code.
+if a > 10 {
+ someParsable = text
+}`
var s scanner.Scanner
- s.Filename = "example"
s.Init(strings.NewReader(src))
- var tok rune
- for tok != scanner.EOF {
- tok = s.Scan()
- fmt.Println("At position", s.Pos(), ":", s.TokenText())
+ s.Filename = "example"
+ for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
+ fmt.Printf("%s: %s\n", s.Position, s.TokenText())
}
// Output:
- // At position example:3:4 : if
- // At position example:3:6 : a
- // At position example:3:8 : >
- // At position example:3:11 : 10
- // At position example:3:13 : {
- // At position example:4:15 : someParsable
- // At position example:4:17 : =
- // At position example:4:22 : text
- // At position example:5:3 : }
- // At position example:5:3 :
+ // example:3:1: if
+ // example:3:4: a
+ // example:3:6: >
+ // example:3:8: 10
+ // example:3:11: {
+ // example:4:2: someParsable
+ // example:4:15: =
+ // example:4:17: text
+ // example:5:1: }
}
diff --git a/libgo/go/text/scanner/scanner.go b/libgo/go/text/scanner/scanner.go
index e085f8a7d95..6fb0422fe5e 100644
--- a/libgo/go/text/scanner/scanner.go
+++ b/libgo/go/text/scanner/scanner.go
@@ -166,7 +166,8 @@ type Scanner struct {
// The Filename field is always left untouched by the Scanner.
// If an error is reported (via Error) and Position is invalid,
// the scanner is not inside a token. Call Pos to obtain an error
- // position in that case.
+ // position in that case, or to obtain the position immediately
+ // after the most recently scanned token.
Position
}
@@ -637,6 +638,8 @@ redo:
// Pos returns the position of the character immediately after
// the character or token returned by the last call to Next or Scan.
+// Use the Scanner's Position field for the start position of the most
+// recently scanned token.
func (s *Scanner) Pos() (pos Position) {
pos.Filename = s.Filename
pos.Offset = s.srcBufOffset + s.srcPos - s.lastCharLen
diff --git a/libgo/go/text/template/doc.go b/libgo/go/text/template/doc.go
index fe59e3f74e8..d174ebd9cfe 100644
--- a/libgo/go/text/template/doc.go
+++ b/libgo/go/text/template/doc.go
@@ -20,7 +20,8 @@ The input text for a template is UTF-8-encoded text in any format.
"{{" and "}}"; all text outside actions is copied to the output unchanged.
Except for raw strings, actions may not span newlines, although comments can.
-Once parsed, a template may be executed safely in parallel.
+Once parsed, a template may be executed safely in parallel, although if parallel
+executions share a Writer the output may be interleaved.
Here is a trivial example that prints "17 items are made of wool".
@@ -80,14 +81,14 @@ data, defined in detail in the corresponding sections that follow.
{{if pipeline}} T1 {{end}}
If the value of the pipeline is empty, no output is generated;
- otherwise, T1 is executed. The empty values are false, 0, any
+ otherwise, T1 is executed. The empty values are false, 0, any
nil pointer or interface value, and any array, slice, map, or
string of length zero.
Dot is unaffected.
{{if pipeline}} T1 {{else}} T0 {{end}}
If the value of the pipeline is empty, T0 is executed;
- otherwise, T1 is executed. Dot is unaffected.
+ otherwise, T1 is executed. Dot is unaffected.
{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
To simplify the appearance of if-else chains, the else action
@@ -241,19 +242,19 @@ where $variable is the name of the variable. An action that declares a
variable produces no output.
If a "range" action initializes a variable, the variable is set to the
-successive elements of the iteration. Also, a "range" may declare two
+successive elements of the iteration. Also, a "range" may declare two
variables, separated by a comma:
range $index, $element := pipeline
in which case $index and $element are set to the successive values of the
-array/slice index or map key and element, respectively. Note that if there is
+array/slice index or map key and element, respectively. Note that if there is
only one variable, it is assigned the element; this is opposite to the
convention in Go range clauses.
A variable's scope extends to the "end" action of the control structure ("if",
"with", or "range") in which it is declared, or to the end of the template if
-there is no such control structure. A template invocation does not inherit
+there is no such control structure. A template invocation does not inherit
variables from the point of its invocation.
When execution begins, $ is set to the data argument passed to Execute, that is,
@@ -314,7 +315,8 @@ Predefined global functions are named as follows.
or the returned error value is non-nil, execution stops.
html
Returns the escaped HTML equivalent of the textual
- representation of its arguments.
+ representation of its arguments. This function is unavailable
+ in html/template, with a few exceptions.
index
Returns the result of indexing its first argument by the
following arguments. Thus "index x 1 2 3" is, in Go syntax,
@@ -340,6 +342,8 @@ Predefined global functions are named as follows.
urlquery
Returns the escaped value of the textual representation of
its arguments in a form suitable for embedding in a URL query.
+ This function is unavailable in html/template, with a few
+ exceptions.
The boolean functions take any zero value to be false and a non-zero
value to be true.
diff --git a/libgo/go/text/template/exec.go b/libgo/go/text/template/exec.go
index 89d3e379b4b..29eb68fba75 100644
--- a/libgo/go/text/template/exec.go
+++ b/libgo/go/text/template/exec.go
@@ -155,7 +155,8 @@ func errRecover(errp *error) {
// If an error occurs executing the template or writing its output,
// execution stops, but partial results may already have been written to
// the output writer.
-// A template may be executed safely in parallel.
+// A template may be executed safely in parallel, although if parallel
+// executions share a Writer the output may be interleaved.
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
var tmpl *Template
if t.common != nil {
@@ -172,7 +173,8 @@ func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{})
// If an error occurs executing the template or writing its output,
// execution stops, but partial results may already have been written to
// the output writer.
-// A template may be executed safely in parallel.
+// A template may be executed safely in parallel, although if parallel
+// executions share a Writer the output may be interleaved.
//
// If data is a reflect.Value, the template applies to the concrete
// value that the reflect.Value holds, as in fmt.Print.
@@ -553,7 +555,7 @@ func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node,
// Unless it's an interface, need to get to a value of type *T to guarantee
// we see all methods of T and *T.
ptr := receiver
- if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
+ if ptr.Kind() != reflect.Interface && ptr.Kind() != reflect.Ptr && ptr.CanAddr() {
ptr = ptr.Addr()
}
if method := ptr.MethodByName(fieldName); method.IsValid() {
@@ -630,7 +632,7 @@ func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, a
if numIn < numFixed {
s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
}
- } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
+ } else if numIn != typ.NumIn() {
s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
}
if !goodFunc(typ) {
diff --git a/libgo/go/text/template/exec_test.go b/libgo/go/text/template/exec_test.go
index 5892b27391b..9f7e637c190 100644
--- a/libgo/go/text/template/exec_test.go
+++ b/libgo/go/text/template/exec_test.go
@@ -147,6 +147,8 @@ var tVal = &T{
Tmpl: Must(New("x").Parse("test template")), // "x" is the value of .X
}
+var tSliceOfNil = []*T{nil}
+
// A non-empty interface.
type I interface {
Method0() string
@@ -337,6 +339,7 @@ var execTests = []execTest{
"true", tVal, true},
{".NilOKFunc not nil", "{{call .NilOKFunc .PI}}", "false", tVal, true},
{".NilOKFunc nil", "{{call .NilOKFunc nil}}", "true", tVal, true},
+ {"method on nil value from slice", "-{{range .}}{{.Method1 1234}}{{end}}-", "-1234-", tSliceOfNil, true},
// Function call builtin.
{".BinaryFunc", "{{call .BinaryFunc `1` `2`}}", "[1=2]", tVal, true},
diff --git a/libgo/go/text/template/funcs.go b/libgo/go/text/template/funcs.go
index 3047b272e57..91074310374 100644
--- a/libgo/go/text/template/funcs.go
+++ b/libgo/go/text/template/funcs.go
@@ -489,6 +489,7 @@ var (
htmlAmp = []byte("&")
htmlLt = []byte("<")
htmlGt = []byte(">")
+ htmlNull = []byte("\uFFFD")
)
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
@@ -497,6 +498,8 @@ func HTMLEscape(w io.Writer, b []byte) {
for i, c := range b {
var html []byte
switch c {
+ case '\000':
+ html = htmlNull
case '"':
html = htmlQuot
case '\'':
@@ -520,7 +523,7 @@ func HTMLEscape(w io.Writer, b []byte) {
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
func HTMLEscapeString(s string) string {
// Avoid allocation if we can.
- if !strings.ContainsAny(s, `'"&<>`) {
+ if !strings.ContainsAny(s, "'\"&<>\000") {
return s
}
var b bytes.Buffer
diff --git a/libgo/go/text/template/parse/lex_test.go b/libgo/go/text/template/parse/lex_test.go
index d655d788b3b..2c73bb623ae 100644
--- a/libgo/go/text/template/parse/lex_test.go
+++ b/libgo/go/text/template/parse/lex_test.go
@@ -498,7 +498,7 @@ func TestShutdown(t *testing.T) {
// We need to duplicate template.Parse here to hold on to the lexer.
const text = "erroneous{{define}}{{else}}1234"
lexer := lex("foo", text, "{{", "}}")
- _, err := New("root").parseLexer(lexer, text)
+ _, err := New("root").parseLexer(lexer)
if err == nil {
t.Fatalf("expected error")
}
@@ -511,7 +511,7 @@ func TestShutdown(t *testing.T) {
// parseLexer is a local version of parse that lets us pass in the lexer instead of building it.
// We expect an error, so the tree set and funcs list are explicitly nil.
-func (t *Tree) parseLexer(lex *lexer, text string) (tree *Tree, err error) {
+func (t *Tree) parseLexer(lex *lexer) (tree *Tree, err error) {
defer t.recover(&err)
t.ParseName = t.Name
t.startParse(nil, lex, map[string]*Tree{})
diff --git a/libgo/go/text/template/parse/parse.go b/libgo/go/text/template/parse/parse.go
index 6060c6d74b7..a91a544ce01 100644
--- a/libgo/go/text/template/parse/parse.go
+++ b/libgo/go/text/template/parse/parse.go
@@ -202,7 +202,6 @@ func (t *Tree) recover(errp *error) {
}
*errp = e.(error)
}
- return
}
// startParse initializes the parser, using the lexer.
diff --git a/libgo/go/text/template/template.go b/libgo/go/text/template/template.go
index 3b4f34b4db0..2246f676e65 100644
--- a/libgo/go/text/template/template.go
+++ b/libgo/go/text/template/template.go
@@ -159,6 +159,7 @@ func (t *Template) Delims(left, right string) *Template {
}
// Funcs adds the elements of the argument map to the template's function map.
+// It must be called before the template is parsed.
// It panics if a value in the map is not a function with appropriate return
// type or if the name cannot be used syntactically as a function in a template.
// It is legal to overwrite elements of the map. The return value is the template,
diff --git a/libgo/go/time/example_test.go b/libgo/go/time/example_test.go
index 7dc2bb5e7ee..aeb63caa552 100644
--- a/libgo/go/time/example_test.go
+++ b/libgo/go/time/example_test.go
@@ -256,6 +256,9 @@ func ExampleTime_Truncate() {
for _, d := range trunc {
fmt.Printf("t.Truncate(%5s) = %s\n", d, t.Truncate(d).Format("15:04:05.999999999"))
}
+ // To round to the last midnight in the local timezone, create a new Date.
+ midnight := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.Local)
+ _ = midnight
// Output:
// t.Truncate( 1ns) = 12:15:30.918273645
diff --git a/libgo/go/time/export_test.go b/libgo/go/time/export_test.go
index 6cd535f6b19..4c08ab13afc 100644
--- a/libgo/go/time/export_test.go
+++ b/libgo/go/time/export_test.go
@@ -18,7 +18,20 @@ func ForceUSPacificForTesting() {
localOnce.Do(initTestingZone)
}
+func ZoneinfoForTesting() *string {
+ return zoneinfo
+}
+
+func ResetZoneinfoForTesting() {
+ zoneinfo = nil
+ zoneinfoOnce = sync.Once{}
+}
+
var (
ForceZipFileForTesting = forceZipFileForTesting
ParseTimeZone = parseTimeZone
+ SetMono = (*Time).setMono
+ GetMono = (*Time).mono
+ ErrLocation = errLocation
+ ReadFile = readFile
)
diff --git a/libgo/go/time/format.go b/libgo/go/time/format.go
index b903e1485c6..8c16e873f64 100644
--- a/libgo/go/time/format.go
+++ b/libgo/go/time/format.go
@@ -61,6 +61,8 @@ import "errors"
// RFC822, RFC822Z, RFC1123, and RFC1123Z are useful for formatting;
// when used with time.Parse they do not accept all the time formats
// permitted by the RFCs.
+// The RFC3339Nano format removes trailing zeros from the seconds field
+// and thus may not sort correctly once formatted.
const (
ANSIC = "Mon Jan _2 15:04:05 2006"
UnixDate = "Mon Jan _2 15:04:05 MST 2006"
@@ -424,8 +426,41 @@ func formatNano(b []byte, nanosec uint, n int, trim bool) []byte {
// String returns the time formatted using the format string
// "2006-01-02 15:04:05.999999999 -0700 MST"
+//
+// If the time has a monotonic clock reading, the returned string
+// includes a final field "m=±", where value is the monotonic
+// clock reading formatted as a decimal number of seconds.
+//
+// The returned string is meant for debugging; for a stable serialized
+// representation, use t.MarshalText, t.MarshalBinary, or t.Format
+// with an explicit format string.
func (t Time) String() string {
- return t.Format("2006-01-02 15:04:05.999999999 -0700 MST")
+ s := t.Format("2006-01-02 15:04:05.999999999 -0700 MST")
+
+ // Format monotonic clock reading as m=±ddd.nnnnnnnnn.
+ if t.wall&hasMonotonic != 0 {
+ m2 := uint64(t.ext)
+ sign := byte('+')
+ if t.ext < 0 {
+ sign = '-'
+ m2 = -m2
+ }
+ m1, m2 := m2/1e9, m2%1e9
+ m0, m1 := m1/1e9, m1%1e9
+ var buf []byte
+ buf = append(buf, " m="...)
+ buf = append(buf, sign)
+ wid := 0
+ if m0 != 0 {
+ buf = appendInt(buf, int(m0), 0)
+ wid = 9
+ }
+ buf = appendInt(buf, int(m1), wid)
+ buf = append(buf, '.')
+ buf = appendInt(buf, int(m2), 9)
+ s += string(buf)
+ }
+ return s
}
// Format returns a textual representation of the time value formatted
@@ -725,11 +760,6 @@ func skip(value, prefix string) (string, error) {
// location and zone in the returned time. Otherwise it records the time as
// being in a fabricated location with time fixed at the given zone offset.
//
-// No checking is done that the day of the month is within the month's
-// valid dates; any one- or two-digit value is accepted. For example
-// February 31 and even February 99 are valid dates, specifying dates
-// in March and May. This behavior is consistent with time.Date.
-//
// When parsing a time with a zone abbreviation like MST, if the zone abbreviation
// has a defined offset in the current location, then that offset is used.
// The zone abbreviation "UTC" is recognized as UTC regardless of location.
@@ -1022,11 +1052,11 @@ func parse(layout, value string, defaultLocation, local *Location) (Time, error)
if zoneOffset != -1 {
t := Date(year, Month(month), day, hour, min, sec, nsec, UTC)
- t.sec -= int64(zoneOffset)
+ t.addSec(-int64(zoneOffset))
// Look for local zone with the given offset.
// If that zone was in effect at the given time, use it.
- name, offset, _, _, _ := local.lookup(t.sec + internalToUnix)
+ name, offset, _, _, _ := local.lookup(t.unixSec())
if offset == zoneOffset && (zoneName == "" || name == zoneName) {
t.setLoc(local)
return t, nil
@@ -1041,9 +1071,9 @@ func parse(layout, value string, defaultLocation, local *Location) (Time, error)
t := Date(year, Month(month), day, hour, min, sec, nsec, UTC)
// Look for local zone with the given offset.
// If that zone was in effect at the given time, use it.
- offset, _, ok := local.lookupName(zoneName, t.sec+internalToUnix)
+ offset, _, ok := local.lookupName(zoneName, t.unixSec())
if ok {
- t.sec -= int64(offset)
+ t.addSec(-int64(offset))
t.setLoc(local)
return t, nil
}
diff --git a/libgo/go/time/format_test.go b/libgo/go/time/format_test.go
index 0e4a4174309..abaeb506096 100644
--- a/libgo/go/time/format_test.go
+++ b/libgo/go/time/format_test.go
@@ -380,8 +380,8 @@ func checkTime(time Time, test *ParseTest, t *testing.T) {
func TestFormatAndParse(t *testing.T) {
const fmt = "Mon MST " + RFC3339 // all fields
f := func(sec int64) bool {
- t1 := Unix(sec, 0)
- if t1.Year() < 1000 || t1.Year() > 9999 {
+ t1 := Unix(sec/2, 0)
+ if t1.Year() < 1000 || t1.Year() > 9999 || t1.Unix() != sec {
// not required to work
return true
}
diff --git a/libgo/go/time/genzabbrs.go b/libgo/go/time/genzabbrs.go
index 6281f73ce4a..824a67f15af 100644
--- a/libgo/go/time/genzabbrs.go
+++ b/libgo/go/time/genzabbrs.go
@@ -142,8 +142,8 @@ const prog = `
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// generated by genzabbrs.go from
-// {{.URL}}
+// Code generated by genzabbrs.go; DO NOT EDIT.
+// Based on information from {{.URL}}
package time
diff --git a/libgo/go/time/mono_test.go b/libgo/go/time/mono_test.go
new file mode 100644
index 00000000000..8778ab78a03
--- /dev/null
+++ b/libgo/go/time/mono_test.go
@@ -0,0 +1,261 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package time_test
+
+import (
+ "strings"
+ "testing"
+ . "time"
+)
+
+func TestHasMonotonicClock(t *testing.T) {
+ yes := func(expr string, tt Time) {
+ if GetMono(&tt) == 0 {
+ t.Errorf("%s: missing monotonic clock reading", expr)
+ }
+ }
+ no := func(expr string, tt Time) {
+ if GetMono(&tt) != 0 {
+ t.Errorf("%s: unexpected monotonic clock reading", expr)
+ }
+ }
+
+ yes("<-After(1)", <-After(1))
+ ticker := NewTicker(1)
+ yes("<-Tick(1)", <-ticker.C)
+ ticker.Stop()
+ no("Date(2009, 11, 23, 0, 0, 0, 0, UTC)", Date(2009, 11, 23, 0, 0, 0, 0, UTC))
+ tp, _ := Parse(UnixDate, "Sat Mar 7 11:06:39 PST 2015")
+ no(`Parse(UnixDate, "Sat Mar 7 11:06:39 PST 2015")`, tp)
+ no("Unix(1486057371, 0)", Unix(1486057371, 0))
+
+ yes("Now()", Now())
+
+ tu := Unix(1486057371, 0)
+ tm := tu
+ SetMono(&tm, 123456)
+ no("tu", tu)
+ yes("tm", tm)
+
+ no("tu.Add(1)", tu.Add(1))
+ no("tu.In(UTC)", tu.In(UTC))
+ no("tu.AddDate(1, 1, 1)", tu.AddDate(1, 1, 1))
+ no("tu.AddDate(0, 0, 0)", tu.AddDate(0, 0, 0))
+ no("tu.Local()", tu.Local())
+ no("tu.UTC()", tu.UTC())
+ no("tu.Round(2)", tu.Round(2))
+ no("tu.Truncate(2)", tu.Truncate(2))
+
+ yes("tm.Add(1)", tm.Add(1))
+ no("tm.AddDate(1, 1, 1)", tm.AddDate(1, 1, 1))
+ no("tm.AddDate(0, 0, 0)", tm.AddDate(0, 0, 0))
+ no("tm.In(UTC)", tm.In(UTC))
+ no("tm.Local()", tm.Local())
+ no("tm.UTC()", tm.UTC())
+ no("tm.Round(2)", tm.Round(2))
+ no("tm.Truncate(2)", tm.Truncate(2))
+}
+
+func TestMonotonicAdd(t *testing.T) {
+ tm := Unix(1486057371, 123456)
+ SetMono(&tm, 123456789012345)
+
+ t2 := tm.Add(1e8)
+ if t2.Nanosecond() != 100123456 {
+ t.Errorf("t2.Nanosecond() = %d, want 100123456", t2.Nanosecond())
+ }
+ if GetMono(&t2) != 123456889012345 {
+ t.Errorf("t2.mono = %d, want 123456889012345", GetMono(&t2))
+ }
+
+ t3 := tm.Add(-9e18) // wall now out of range
+ if t3.Nanosecond() != 123456 {
+ t.Errorf("t3.Nanosecond() = %d, want 123456", t3.Nanosecond())
+ }
+ if GetMono(&t3) != 0 {
+ t.Errorf("t3.mono = %d, want 0 (wall time out of range for monotonic reading)", GetMono(&t3))
+ }
+
+ t4 := tm.Add(+9e18) // wall now out of range
+ if t4.Nanosecond() != 123456 {
+ t.Errorf("t4.Nanosecond() = %d, want 123456", t4.Nanosecond())
+ }
+ if GetMono(&t4) != 0 {
+ t.Errorf("t4.mono = %d, want 0 (wall time out of range for monotonic reading)", GetMono(&t4))
+ }
+
+ tn := Now()
+ tn1 := tn.Add(1 * Hour)
+ Sleep(100 * Millisecond)
+ d := Until(tn1)
+ if d < 59*Minute {
+ t.Errorf("Until(Now().Add(1*Hour)) = %v, wanted at least 59m", d)
+ }
+ now := Now()
+ if now.After(tn1) {
+ t.Errorf("Now().After(Now().Add(1*Hour)) = true, want false")
+ }
+ if !tn1.After(now) {
+ t.Errorf("Now().Add(1*Hour).After(now) = false, want true")
+ }
+ if tn1.Before(now) {
+ t.Errorf("Now().Add(1*Hour).Before(Now()) = true, want false")
+ }
+ if !now.Before(tn1) {
+ t.Errorf("Now().Before(Now().Add(1*Hour)) = false, want true")
+ }
+}
+
+func TestMonotonicSub(t *testing.T) {
+ t1 := Unix(1483228799, 995e6)
+ SetMono(&t1, 123456789012345)
+
+ t2 := Unix(1483228799, 5e6)
+ SetMono(&t2, 123456789012345+10e6)
+
+ t3 := Unix(1483228799, 995e6)
+ SetMono(&t3, 123456789012345+1e9)
+
+ t1w := t1.AddDate(0, 0, 0)
+ if GetMono(&t1w) != 0 {
+ t.Fatalf("AddDate didn't strip monotonic clock reading")
+ }
+ t2w := t2.AddDate(0, 0, 0)
+ if GetMono(&t2w) != 0 {
+ t.Fatalf("AddDate didn't strip monotonic clock reading")
+ }
+ t3w := t3.AddDate(0, 0, 0)
+ if GetMono(&t3w) != 0 {
+ t.Fatalf("AddDate didn't strip monotonic clock reading")
+ }
+
+ sub := func(txs, tys string, tx, txw, ty, tyw Time, d, dw Duration) {
+ check := func(expr string, d, want Duration) {
+ if d != want {
+ t.Errorf("%s = %v, want %v", expr, d, want)
+ }
+ }
+ check(txs+".Sub("+tys+")", tx.Sub(ty), d)
+ check(txs+"w.Sub("+tys+")", txw.Sub(ty), dw)
+ check(txs+".Sub("+tys+"w)", tx.Sub(tyw), dw)
+ check(txs+"w.Sub("+tys+"w)", txw.Sub(tyw), dw)
+ }
+ sub("t1", "t1", t1, t1w, t1, t1w, 0, 0)
+ sub("t1", "t2", t1, t1w, t2, t2w, -10*Millisecond, 990*Millisecond)
+ sub("t1", "t3", t1, t1w, t3, t3w, -1000*Millisecond, 0)
+
+ sub("t2", "t1", t2, t2w, t1, t1w, 10*Millisecond, -990*Millisecond)
+ sub("t2", "t2", t2, t2w, t2, t2w, 0, 0)
+ sub("t2", "t3", t2, t2w, t3, t3w, -990*Millisecond, -990*Millisecond)
+
+ sub("t3", "t1", t3, t3w, t1, t1w, 1000*Millisecond, 0)
+ sub("t3", "t2", t3, t3w, t2, t2w, 990*Millisecond, 990*Millisecond)
+ sub("t3", "t3", t3, t3w, t3, t3w, 0, 0)
+
+ cmp := func(txs, tys string, tx, txw, ty, tyw Time, c, cw int) {
+ check := func(expr string, b, want bool) {
+ if b != want {
+ t.Errorf("%s = %v, want %v", expr, b, want)
+ }
+ }
+ check(txs+".After("+tys+")", tx.After(ty), c > 0)
+ check(txs+"w.After("+tys+")", txw.After(ty), cw > 0)
+ check(txs+".After("+tys+"w)", tx.After(tyw), cw > 0)
+ check(txs+"w.After("+tys+"w)", txw.After(tyw), cw > 0)
+
+ check(txs+".Before("+tys+")", tx.Before(ty), c < 0)
+ check(txs+"w.Before("+tys+")", txw.Before(ty), cw < 0)
+ check(txs+".Before("+tys+"w)", tx.Before(tyw), cw < 0)
+ check(txs+"w.Before("+tys+"w)", txw.Before(tyw), cw < 0)
+
+ check(txs+".Equal("+tys+")", tx.Equal(ty), c == 0)
+ check(txs+"w.Equal("+tys+")", txw.Equal(ty), cw == 0)
+ check(txs+".Equal("+tys+"w)", tx.Equal(tyw), cw == 0)
+ check(txs+"w.Equal("+tys+"w)", txw.Equal(tyw), cw == 0)
+ }
+
+ cmp("t1", "t1", t1, t1w, t1, t1w, 0, 0)
+ cmp("t1", "t2", t1, t1w, t2, t2w, -1, +1)
+ cmp("t1", "t3", t1, t1w, t3, t3w, -1, 0)
+
+ cmp("t2", "t1", t2, t2w, t1, t1w, +1, -1)
+ cmp("t2", "t2", t2, t2w, t2, t2w, 0, 0)
+ cmp("t2", "t3", t2, t2w, t3, t3w, -1, -1)
+
+ cmp("t3", "t1", t3, t3w, t1, t1w, +1, 0)
+ cmp("t3", "t2", t3, t3w, t2, t2w, +1, +1)
+ cmp("t3", "t3", t3, t3w, t3, t3w, 0, 0)
+}
+
+func TestMonotonicOverflow(t *testing.T) {
+ t1 := Now().Add(-30 * Second)
+ d := Until(t1)
+ if d < -35*Second || -30*Second < d {
+ t.Errorf("Until(Now().Add(-30s)) = %v, want roughly -30s (-35s to -30s)", d)
+ }
+
+ t1 = Now().Add(30 * Second)
+ d = Until(t1)
+ if d < 25*Second || 30*Second < d {
+ t.Errorf("Until(Now().Add(-30s)) = %v, want roughly 30s (25s to 30s)", d)
+ }
+
+ t0 := Now()
+ t1 = t0.Add(Duration(1<<63 - 1))
+ if GetMono(&t1) != 0 {
+ t.Errorf("Now().Add(maxDuration) has monotonic clock reading (%v => %v %d %d)", t0.String(), t1.String(), t0.Unix(), t1.Unix())
+ }
+ t2 := t1.Add(-Duration(1<<63 - 1))
+ d = Since(t2)
+ if d < -10*Second || 10*Second < d {
+ t.Errorf("Since(Now().Add(max).Add(-max)) = %v, want [-10s, 10s]", d)
+ }
+
+ t0 = Now()
+ t1 = t0.Add(1 * Hour)
+ Sleep(100 * Millisecond)
+ t2 = Now().Add(-5 * Second)
+ if !t1.After(t2) {
+ t.Errorf("Now().Add(1*Hour).After(Now().Add(-5*Second)) = false, want true\nt1=%v\nt2=%v", t1, t2)
+ }
+ if t2.After(t1) {
+ t.Errorf("Now().Add(-5*Second).After(Now().Add(1*Hour)) = true, want false\nt1=%v\nt2=%v", t1, t2)
+ }
+ if t1.Before(t2) {
+ t.Errorf("Now().Add(1*Hour).Before(Now().Add(-5*Second)) = true, want false\nt1=%v\nt2=%v", t1, t2)
+ }
+ if !t2.Before(t1) {
+ t.Errorf("Now().Add(-5*Second).Before(Now().Add(1*Hour)) = false, want true\nt1=%v\nt2=%v", t1, t2)
+ }
+}
+
+var monotonicStringTests = []struct {
+ mono int64
+ want string
+}{
+ {0, "m=+0.000000000"},
+ {123456789, "m=+0.123456789"},
+ {-123456789, "m=-0.123456789"},
+ {123456789000, "m=+123.456789000"},
+ {-123456789000, "m=-123.456789000"},
+ {9e18, "m=+9000000000.000000000"},
+ {-9e18, "m=-9000000000.000000000"},
+ {-1 << 63, "m=-9223372036.854775808"},
+}
+
+func TestMonotonicString(t *testing.T) {
+ t1 := Now()
+ t.Logf("Now() = %v", t1)
+
+ for _, tt := range monotonicStringTests {
+ t1 := Now()
+ SetMono(&t1, tt.mono)
+ s := t1.String()
+ got := s[strings.LastIndex(s, " ")+1:]
+ if got != tt.want {
+ t.Errorf("with mono=%d: got %q; want %q", tt.mono, got, tt.want)
+ }
+ }
+}
diff --git a/libgo/go/time/sleep_test.go b/libgo/go/time/sleep_test.go
index c286bd00273..546b28a28ac 100644
--- a/libgo/go/time/sleep_test.go
+++ b/libgo/go/time/sleep_test.go
@@ -227,7 +227,7 @@ func TestAfterQueuing(t *testing.T) {
err := errors.New("!=nil")
for i := 0; i < attempts && err != nil; i++ {
delta := Duration(20+i*50) * Millisecond
- if err = testAfterQueuing(t, delta); err != nil {
+ if err = testAfterQueuing(delta); err != nil {
t.Logf("attempt %v failed: %v", i, err)
}
}
@@ -248,7 +248,7 @@ func await(slot int, result chan<- afterResult, ac <-chan Time) {
result <- afterResult{slot, <-ac}
}
-func testAfterQueuing(t *testing.T, delta Duration) error {
+func testAfterQueuing(delta Duration) error {
// make the result channel buffered because we don't want
// to depend on channel queueing semantics that might
// possibly change in the future.
diff --git a/libgo/go/time/sys_plan9.go b/libgo/go/time/sys_plan9.go
index 11365a791f7..9086a6e835f 100644
--- a/libgo/go/time/sys_plan9.go
+++ b/libgo/go/time/sys_plan9.go
@@ -19,6 +19,7 @@ func interrupt() {
// readFile reads and returns the content of the named file.
// It is a trivial implementation of ioutil.ReadFile, reimplemented
// here to avoid depending on io/ioutil or os.
+// It returns an error if name exceeds maxFileSize bytes.
func readFile(name string) ([]byte, error) {
f, err := syscall.Open(name, syscall.O_RDONLY)
if err != nil {
@@ -38,6 +39,9 @@ func readFile(name string) ([]byte, error) {
if n == 0 || err != nil {
break
}
+ if len(ret) > maxFileSize {
+ return nil, fileSizeError(name)
+ }
}
return ret, err
}
diff --git a/libgo/go/time/sys_unix.go b/libgo/go/time/sys_unix.go
index 4c68bbdc6d1..57152751800 100644
--- a/libgo/go/time/sys_unix.go
+++ b/libgo/go/time/sys_unix.go
@@ -19,6 +19,7 @@ func interrupt() {
// readFile reads and returns the content of the named file.
// It is a trivial implementation of ioutil.ReadFile, reimplemented
// here to avoid depending on io/ioutil or os.
+// It returns an error if name exceeds maxFileSize bytes.
func readFile(name string) ([]byte, error) {
f, err := syscall.Open(name, syscall.O_RDONLY, 0)
if err != nil {
@@ -38,6 +39,9 @@ func readFile(name string) ([]byte, error) {
if n == 0 || err != nil {
break
}
+ if len(ret) > maxFileSize {
+ return nil, fileSizeError(name)
+ }
}
return ret, err
}
diff --git a/libgo/go/time/sys_windows.go b/libgo/go/time/sys_windows.go
index a4a068f7849..9e381653937 100644
--- a/libgo/go/time/sys_windows.go
+++ b/libgo/go/time/sys_windows.go
@@ -16,6 +16,7 @@ func interrupt() {
// readFile reads and returns the content of the named file.
// It is a trivial implementation of ioutil.ReadFile, reimplemented
// here to avoid depending on io/ioutil or os.
+// It returns an error if name exceeds maxFileSize bytes.
func readFile(name string) ([]byte, error) {
f, err := syscall.Open(name, syscall.O_RDONLY, 0)
if err != nil {
@@ -35,6 +36,9 @@ func readFile(name string) ([]byte, error) {
if n == 0 || err != nil {
break
}
+ if len(ret) > maxFileSize {
+ return nil, fileSizeError(name)
+ }
}
return ret, err
}
diff --git a/libgo/go/time/time.go b/libgo/go/time/time.go
index 10b32461e1c..8a29eef2631 100644
--- a/libgo/go/time/time.go
+++ b/libgo/go/time/time.go
@@ -6,6 +6,69 @@
//
// The calendrical calculations always assume a Gregorian calendar, with
// no leap seconds.
+//
+// Monotonic Clocks
+//
+// Operating systems provide both a âwall clock,â which is subject to
+// changes for clock synchronization, and a âmonotonic clock,â which is
+// not. The general rule is that the wall clock is for telling time and
+// the monotonic clock is for measuring time. Rather than split the API,
+// in this package the Time returned by time.Now contains both a wall
+// clock reading and a monotonic clock reading; later time-telling
+// operations use the wall clock reading, but later time-measuring
+// operations, specifically comparisons and subtractions, use the
+// monotonic clock reading.
+//
+// For example, this code always computes a positive elapsed time of
+// approximately 20 milliseconds, even if the wall clock is changed during
+// the operation being timed:
+//
+// start := time.Now()
+// ... operation that takes 20 milliseconds ...
+// t := time.Now()
+// elapsed := t.Sub(start)
+//
+// Other idioms, such as time.Since(start), time.Until(deadline), and
+// time.Now().Before(deadline), are similarly robust against wall clock
+// resets.
+//
+// The rest of this section gives the precise details of how operations
+// use monotonic clocks, but understanding those details is not required
+// to use this package.
+//
+// The Time returned by time.Now contains a monotonic clock reading.
+// If Time t has a monotonic clock reading, t.Add adds the same duration to
+// both the wall clock and monotonic clock readings to compute the result.
+// Because t.AddDate(y, m, d), t.Round(d), and t.Truncate(d) are wall time
+// computations, they always strip any monotonic clock reading from their results.
+// Because t.In, t.Local, and t.UTC are used for their effect on the interpretation
+// of the wall time, they also strip any monotonic clock reading from their results.
+// The canonical way to strip a monotonic clock reading is to use t = t.Round(0).
+//
+// If Times t and u both contain monotonic clock readings, the operations
+// t.After(u), t.Before(u), t.Equal(u), and t.Sub(u) are carried out
+// using the monotonic clock readings alone, ignoring the wall clock
+// readings. If either t or u contains no monotonic clock reading, these
+// operations fall back to using the wall clock readings.
+//
+// Because the monotonic clock reading has no meaning outside
+// the current process, the serialized forms generated by t.GobEncode,
+// t.MarshalBinary, t.MarshalJSON, and t.MarshalText omit the monotonic
+// clock reading, and t.Format provides no format for it. Similarly, the
+// constructors time.Date, time.Parse, time.ParseInLocation, and time.Unix,
+// as well as the unmarshalers t.GobDecode, t.UnmarshalBinary.
+// t.UnmarshalJSON, and t.UnmarshalText always create times with
+// no monotonic clock reading.
+//
+// Note that the Go == operator compares not just the time instant but
+// also the Location and the monotonic clock reading. See the
+// documentation for the Time type for a discussion of equality
+// testing for Time values.
+//
+// For debugging, the result of t.String does include the monotonic
+// clock reading if present. If t != u because of different monotonic clock readings,
+// that difference will be visible when printing t.String() and u.String().
+//
package time
import "errors"
@@ -14,8 +77,11 @@ import "errors"
//
// Programs using times should typically store and pass them as values,
// not pointers. That is, time variables and struct fields should be of
-// type time.Time, not *time.Time. A Time value can be used by
-// multiple goroutines simultaneously.
+// type time.Time, not *time.Time.
+//
+// A Time value can be used by multiple goroutines simultaneously except
+// that the methods GobDecode, UnmarshalBinary, UnmarshalJSON and
+// UnmarshalText are not concurrency-safe.
//
// Time instants can be compared using the Before, After, and Equal methods.
// The Sub method subtracts two instants, producing a Duration.
@@ -33,19 +99,34 @@ import "errors"
// computations described in earlier paragraphs.
//
// Note that the Go == operator compares not just the time instant but also the
-// Location. Therefore, Time values should not be used as map or database keys
-// without first guaranteeing that the identical Location has been set for all
-// values, which can be achieved through use of the UTC or Local method.
+// Location and the monotonic clock reading. Therefore, Time values should not
+// be used as map or database keys without first guaranteeing that the
+// identical Location has been set for all values, which can be achieved
+// through use of the UTC or Local method, and that the monotonic clock reading
+// has been stripped by setting t = t.Round(0). In general, prefer t.Equal(u)
+// to t == u, since t.Equal uses the most accurate comparison available and
+// correctly handles the case when only one of its arguments has a monotonic
+// clock reading.
+//
+// In addition to the required âwall clockâ reading, a Time may contain an optional
+// reading of the current process's monotonic clock, to provide additional precision
+// for comparison or subtraction.
+// See the âMonotonic Clocksâ section in the package documentation for details.
//
type Time struct {
- // sec gives the number of seconds elapsed since
- // January 1, year 1 00:00:00 UTC.
- sec int64
-
- // nsec specifies a non-negative nanosecond
- // offset within the second named by Seconds.
- // It must be in the range [0, 999999999].
- nsec int32
+ // wall and ext encode the wall time seconds, wall time nanoseconds,
+ // and optional monotonic clock reading in nanoseconds.
+ //
+ // From high to low bit position, wall encodes a 1-bit flag (hasMonotonic),
+ // a 33-bit seconds field, and a 30-bit wall time nanoseconds field.
+ // The nanoseconds field is in the range [0, 999999999].
+ // If the hasMonotonic bit is 0, then the 33-bit field must be zero
+ // and the full signed 64-bit wall seconds since Jan 1 year 1 is stored in ext.
+ // If the hasMonotonic bit is 1, then the 33-bit field holds a 33-bit
+ // unsigned wall seconds since Jan 1 year 1885, and ext holds a
+ // signed 64-bit monotonic clock reading, nanoseconds since process start.
+ wall uint64
+ ext int64
// loc specifies the Location that should be used to
// determine the minute, hour, month, day, and year
@@ -55,29 +136,124 @@ type Time struct {
loc *Location
}
+const (
+ hasMonotonic = 1 << 63
+ maxWall = wallToInternal + (1<<33 - 1) // year 2157
+ minWall = wallToInternal // year 1885
+ nsecMask = 1<<30 - 1
+ nsecShift = 30
+)
+
+// These helpers for manipulating the wall and monotonic clock readings
+// take pointer receivers, even when they don't modify the time,
+// to make them cheaper to call.
+
+// nsec returns the time's nanoseconds.
+func (t *Time) nsec() int32 {
+ return int32(t.wall & nsecMask)
+}
+
+// sec returns the time's seconds since Jan 1 year 1.
+func (t *Time) sec() int64 {
+ if t.wall&hasMonotonic != 0 {
+ return wallToInternal + int64(t.wall<<1>>(nsecShift+1))
+ }
+ return int64(t.ext)
+}
+
+// unixSec returns the time's seconds since Jan 1 1970 (Unix time).
+func (t *Time) unixSec() int64 { return t.sec() + internalToUnix }
+
+// addSec adds d seconds to the time.
+func (t *Time) addSec(d int64) {
+ if t.wall&hasMonotonic != 0 {
+ sec := int64(t.wall << 1 >> (nsecShift + 1))
+ dsec := sec + d
+ if 0 <= dsec && dsec <= 1<<33-1 {
+ t.wall = t.wall&nsecMask | uint64(dsec)< u.sec || t.sec == u.sec && t.nsec > u.nsec
+ if t.wall&u.wall&hasMonotonic != 0 {
+ return t.ext > u.ext
+ }
+ ts := t.sec()
+ us := u.sec()
+ return ts > us || ts == us && t.nsec() > u.nsec()
}
// Before reports whether the time instant t is before u.
func (t Time) Before(u Time) bool {
- return t.sec < u.sec || t.sec == u.sec && t.nsec < u.nsec
+ if t.wall&u.wall&hasMonotonic != 0 {
+ return t.ext < u.ext
+ }
+ return t.sec() < u.sec() || t.sec() == u.sec() && t.nsec() < u.nsec()
}
// Equal reports whether t and u represent the same time instant.
// Two times can be equal even if they are in different locations.
// For example, 6:00 +0200 CEST and 4:00 UTC are Equal.
-// Do not use == with Time values.
+// See the documentation on the Time type for the pitfalls of using == with
+// Time values; most code should use Equal instead.
func (t Time) Equal(u Time) bool {
- return t.sec == u.sec && t.nsec == u.nsec
+ if t.wall&u.wall&hasMonotonic != 0 {
+ return t.ext == u.ext
+ }
+ return t.sec() == u.sec() && t.nsec() == u.nsec()
}
// A Month specifies a month of the year (January = 1, ...).
@@ -162,7 +338,7 @@ func (d Weekday) String() string { return days[d] }
// The zero Time value does not force a specific epoch for the time
// representation. For example, to use the Unix epoch internally, we
// could define that to distinguish a zero value from Jan 1 1970, that
-// time would be represented by sec=-1, nsec=1e9. However, it does
+// time would be represented by sec=-1, nsec=1e9. However, it does
// suggest a representation, namely using 1-1-1 00:00:00 UTC as the
// epoch, and that's what we do.
//
@@ -194,7 +370,7 @@ func (d Weekday) String() string { return days[d] }
// everywhere.
//
// The calendar runs on an exact 400 year cycle: a 400-year calendar
-// printed for 1970-2469 will apply as well to 2370-2769. Even the days
+// printed for 1970-2369 will apply as well to 2370-2769. Even the days
// of the week match up. It simplifies the computations to choose the
// cycle boundaries so that the exceptional years are always delayed as
// long as possible. That means choosing a year equal to 1 mod 400, so
@@ -208,7 +384,7 @@ func (d Weekday) String() string { return days[d] }
//
// These three considerationsâchoose an epoch as early as possible, that
// uses a year equal to 1 mod 400, and that is no more than 2â¶Â³ seconds
-// earlier than 1970âbring us to the year -292277022399. We refer to
+// earlier than 1970âbring us to the year -292277022399. We refer to
// this year as the absolute zero year, and to times measured as a uint64
// seconds since this year as absolute times.
//
@@ -219,9 +395,9 @@ func (d Weekday) String() string { return days[d] }
// times.
//
// It is tempting to just use the year 1 as the absolute epoch, defining
-// that the routines are only valid for years >= 1. However, the
+// that the routines are only valid for years >= 1. However, the
// routines would then be invalid when displaying the epoch in time zones
-// west of UTC, since it is year 0. It doesn't seem tenable to say that
+// west of UTC, since it is year 0. It doesn't seem tenable to say that
// printing the zero time correctly isn't supported in half the time
// zones. By comparison, it's reasonable to mishandle some times in
// the year -292277022399.
@@ -245,12 +421,15 @@ const (
unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * secondsPerDay
internalToUnix int64 = -unixToInternal
+
+ wallToInternal int64 = (1884*365 + 1884/4 - 1884/100 + 1884/400) * secondsPerDay
+ internalToWall int64 = -wallToInternal
)
// IsZero reports whether t represents the zero time instant,
// January 1, year 1, 00:00:00 UTC.
func (t Time) IsZero() bool {
- return t.sec == 0 && t.nsec == 0
+ return t.sec() == 0 && t.nsec() == 0
}
// abs returns the time t as an absolute time, adjusted by the zone offset.
@@ -261,7 +440,7 @@ func (t Time) abs() uint64 {
if l == nil || l == &localLoc {
l = l.get()
}
- sec := t.sec + internalToUnix
+ sec := t.unixSec()
if l != &utcLoc {
if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
sec += int64(l.cacheZone.offset)
@@ -281,7 +460,7 @@ func (t Time) locabs() (name string, offset int, abs uint64) {
l = l.get()
}
// Avoid function call if we hit the local time cache.
- sec := t.sec + internalToUnix
+ sec := t.unixSec()
if l != &utcLoc {
if l.cacheZone != nil && l.cacheStart <= sec && sec < l.cacheEnd {
name = l.cacheZone.name
@@ -425,7 +604,7 @@ func (t Time) Second() int {
// Nanosecond returns the nanosecond offset within the second specified by t,
// in the range [0, 999999999].
func (t Time) Nanosecond() int {
- return int(t.nsec)
+ return int(t.nsec())
}
// YearDay returns the day of the year specified by t, in the range [1,365] for non-leap years,
@@ -543,8 +722,8 @@ func (d Duration) String() string {
}
// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the
-// tail of buf, omitting trailing zeros. it omits the decimal
-// point too when the fraction is 0. It returns the index where the
+// tail of buf, omitting trailing zeros. it omits the decimal
+// point too when the fraction is 0. It returns the index where the
// output bytes begin and the value v/10**prec.
func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) {
// Omit trailing zeros up to and including decimal point.
@@ -616,18 +795,73 @@ func (d Duration) Hours() float64 {
return float64(hour) + float64(nsec)/(60*60*1e9)
}
+// Truncate returns the result of rounding d toward zero to a multiple of m.
+// If m <= 0, Truncate returns d unchanged.
+func (d Duration) Truncate(m Duration) Duration {
+ if m <= 0 {
+ return d
+ }
+ return d - d%m
+}
+
+// lessThanHalf reports whether x+x < y but avoids overflow,
+// assuming x and y are both positive (Duration is signed).
+func lessThanHalf(x, y Duration) bool {
+ return uint64(x)+uint64(x) < uint64(y)
+}
+
+// Round returns the result of rounding d to the nearest multiple of m.
+// The rounding behavior for halfway values is to round away from zero.
+// If the result exceeds the maximum (or minimum)
+// value that can be stored in a Duration,
+// Round returns the maximum (or minimum) duration.
+// If m <= 0, Round returns d unchanged.
+func (d Duration) Round(m Duration) Duration {
+ if m <= 0 {
+ return d
+ }
+ r := d % m
+ if d < 0 {
+ r = -r
+ if lessThanHalf(r, m) {
+ return d + r
+ }
+ if d1 := d - m + r; d1 < d {
+ return d1
+ }
+ return minDuration // overflow
+ }
+ if lessThanHalf(r, m) {
+ return d - r
+ }
+ if d1 := d + m - r; d1 > d {
+ return d1
+ }
+ return maxDuration // overflow
+}
+
// Add returns the time t+d.
func (t Time) Add(d Duration) Time {
- t.sec += int64(d / 1e9)
- nsec := t.nsec + int32(d%1e9)
+ dsec := int64(d / 1e9)
+ nsec := t.nsec() + int32(d%1e9)
if nsec >= 1e9 {
- t.sec++
+ dsec++
nsec -= 1e9
} else if nsec < 0 {
- t.sec--
+ dsec--
nsec += 1e9
}
- t.nsec = nsec
+ t.wall = t.wall&^nsecMask | uint64(nsec) // update nsec
+ t.addSec(dsec)
+ if t.wall&hasMonotonic != 0 {
+ te := t.ext + int64(d)
+ if d < 0 && te > int64(t.ext) || d > 0 && te < int64(t.ext) {
+ // Monotonic clock reading now out of range; degrade to wall-only.
+ t.stripMono()
+ } else {
+ t.ext = te
+ }
+ }
return t
}
@@ -636,7 +870,19 @@ func (t Time) Add(d Duration) Time {
// will be returned.
// To compute t-d for a duration d, use t.Add(-d).
func (t Time) Sub(u Time) Duration {
- d := Duration(t.sec-u.sec)*Second + Duration(t.nsec-u.nsec)
+ if t.wall&u.wall&hasMonotonic != 0 {
+ te := int64(t.ext)
+ ue := int64(u.ext)
+ d := Duration(te - ue)
+ if d < 0 && te > ue {
+ return maxDuration // t - u is positive out of range
+ }
+ if d > 0 && te < ue {
+ return minDuration // t - u is negative out of range
+ }
+ return d
+ }
+ d := Duration(t.sec()-u.sec())*Second + Duration(t.nsec()-u.nsec())
// Check for overflow or underflow.
switch {
case u.Add(d).Equal(t):
@@ -671,7 +917,7 @@ func Until(t Time) Duration {
func (t Time) AddDate(years int, months int, days int) Time {
year, month, day := t.Date()
hour, min, sec := t.Clock()
- return Date(year+years, month+Month(months), day+days, hour, min, sec, int(t.nsec), t.Location())
+ return Date(year+years, month+Month(months), day+days, hour, min, sec, int(t.nsec()), t.Location())
}
const (
@@ -718,7 +964,7 @@ func absDate(abs uint64, full bool) (year int, month Month, day int, yday int) {
// Cut off years within a 4-year cycle.
// The last year is a leap year, so on the last day of that year,
- // day / 365 will be 4 instead of 3. Cut it back down to 3
+ // day / 365 will be 4 instead of 3. Cut it back down to 3
// by subtracting n>>2.
n = d / 365
n -= n >> 2
@@ -791,12 +1037,20 @@ func daysIn(m Month, year int) int {
}
// Provided by package runtime.
-func now() (sec int64, nsec int32)
+func now() (sec int64, nsec int32, mono int64)
// Now returns the current local time.
func Now() Time {
- sec, nsec := now()
- return Time{sec + unixToInternal, nsec, Local}
+ sec, nsec, mono := now()
+ sec += unixToInternal - minWall
+ if uint64(sec)>>33 != 0 {
+ return Time{uint64(nsec), sec + minWall, Local}
+ }
+ return Time{hasMonotonic | uint64(sec)<> 56), // bytes 1-8: seconds
- byte(t.sec >> 48),
- byte(t.sec >> 40),
- byte(t.sec >> 32),
- byte(t.sec >> 24),
- byte(t.sec >> 16),
- byte(t.sec >> 8),
- byte(t.sec),
- byte(t.nsec >> 24), // bytes 9-12: nanoseconds
- byte(t.nsec >> 16),
- byte(t.nsec >> 8),
- byte(t.nsec),
+ byte(sec >> 56), // bytes 1-8: seconds
+ byte(sec >> 48),
+ byte(sec >> 40),
+ byte(sec >> 32),
+ byte(sec >> 24),
+ byte(sec >> 16),
+ byte(sec >> 8),
+ byte(sec),
+ byte(nsec >> 24), // bytes 9-12: nanoseconds
+ byte(nsec >> 16),
+ byte(nsec >> 8),
+ byte(nsec),
byte(offsetMin >> 8), // bytes 13-14: zone offset in minutes
byte(offsetMin),
}
@@ -910,18 +1166,22 @@ func (t *Time) UnmarshalBinary(data []byte) error {
}
buf = buf[1:]
- t.sec = int64(buf[7]) | int64(buf[6])<<8 | int64(buf[5])<<16 | int64(buf[4])<<24 |
+ sec := int64(buf[7]) | int64(buf[6])<<8 | int64(buf[5])<<16 | int64(buf[4])<<24 |
int64(buf[3])<<32 | int64(buf[2])<<40 | int64(buf[1])<<48 | int64(buf[0])<<56
buf = buf[8:]
- t.nsec = int32(buf[3]) | int32(buf[2])<<8 | int32(buf[1])<<16 | int32(buf[0])<<24
+ nsec := int32(buf[3]) | int32(buf[2])<<8 | int32(buf[1])<<16 | int32(buf[0])<<24
buf = buf[4:]
offset := int(int16(buf[1])|int16(buf[0])<<8) * 60
+ *t = Time{}
+ t.wall = uint64(nsec)
+ t.ext = sec
+
if offset == -1*60 {
t.setLoc(&utcLoc)
- } else if _, localoff, _, _, _ := Local.lookup(t.sec + internalToUnix); offset == localoff {
+ } else if _, localoff, _, _, _ := Local.lookup(t.unixSec()); offset == localoff {
t.setLoc(Local)
} else {
t.setLoc(FixedZone("", offset))
@@ -1008,7 +1268,7 @@ func Unix(sec int64, nsec int64) Time {
sec--
}
}
- return Time{sec + unixToInternal, int32(nsec), Local}
+ return unixTime(sec, int32(nsec))
}
func isLeap(year int) bool {
@@ -1117,7 +1377,7 @@ func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) T
unix -= int64(offset)
}
- t := Time{unix + unixToInternal, int32(nsec), nil}
+ t := unixTime(unix, int32(nsec))
t.setLoc(loc)
return t
}
@@ -1130,6 +1390,7 @@ func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) T
// time. Thus, Truncate(Hour) may return a time with a non-zero
// minute, depending on the time's Location.
func (t Time) Truncate(d Duration) Time {
+ t.stripMono()
if d <= 0 {
return t
}
@@ -1146,11 +1407,12 @@ func (t Time) Truncate(d Duration) Time {
// time. Thus, Round(Hour) may return a time with a non-zero
// minute, depending on the time's Location.
func (t Time) Round(d Duration) Time {
+ t.stripMono()
if d <= 0 {
return t
}
_, r := div(t, d)
- if r+r < d {
+ if lessThanHalf(r, d) {
return t.Add(-r)
}
return t.Add(d - r)
@@ -1161,15 +1423,16 @@ func (t Time) Round(d Duration) Time {
// but it's still here in case we change our minds.
func div(t Time, d Duration) (qmod2 int, r Duration) {
neg := false
- nsec := t.nsec
- if t.sec < 0 {
+ nsec := t.nsec()
+ sec := t.sec()
+ if sec < 0 {
// Operate on absolute value.
neg = true
- t.sec = -t.sec
+ sec = -sec
nsec = -nsec
if nsec < 0 {
nsec += 1e9
- t.sec-- // t.sec >= 1 before the -- so safe
+ sec-- // sec >= 1 before the -- so safe
}
}
@@ -1182,8 +1445,8 @@ func div(t Time, d Duration) (qmod2 int, r Duration) {
// Special case: d is a multiple of 1 second.
case d%Second == 0:
d1 := int64(d / Second)
- qmod2 = int(t.sec/d1) & 1
- r = Duration(t.sec%d1)*Second + Duration(nsec)
+ qmod2 = int(sec/d1) & 1
+ r = Duration(sec%d1)*Second + Duration(nsec)
// General case.
// This could be faster if more cleverness were applied,
@@ -1191,7 +1454,7 @@ func div(t Time, d Duration) (qmod2 int, r Duration) {
// No one will care about these cases.
default:
// Compute nanoseconds as 128-bit number.
- sec := uint64(t.sec)
+ sec := uint64(sec)
tmp := (sec >> 32) * 1e9
u1 := tmp >> 32
u0 := tmp << 32
diff --git a/libgo/go/time/time_test.go b/libgo/go/time/time_test.go
index 2922560f097..dba8e0dadcd 100644
--- a/libgo/go/time/time_test.go
+++ b/libgo/go/time/time_test.go
@@ -11,7 +11,9 @@ import (
"fmt"
"math/big"
"math/rand"
+ "os"
"runtime"
+ "strings"
"testing"
"testing/quick"
. "time"
@@ -231,6 +233,7 @@ var truncateRoundTests = []struct {
{Date(-1, January, 1, 12, 15, 31, 5e8, UTC), 3},
{Date(2012, January, 1, 12, 15, 30, 5e8, UTC), Second},
{Date(2012, January, 1, 12, 15, 31, 5e8, UTC), Second},
+ {Unix(-19012425939, 649146258), 7435029458905025217}, // 5.8*d rounds to 6*d, but .8*d+.8*d < 0 < d
}
func TestTruncateRound(t *testing.T) {
@@ -1056,6 +1059,66 @@ func TestDurationHours(t *testing.T) {
}
}
+var durationTruncateTests = []struct {
+ d Duration
+ m Duration
+ want Duration
+}{
+ {0, Second, 0},
+ {Minute, -7 * Second, Minute},
+ {Minute, 0, Minute},
+ {Minute, 1, Minute},
+ {Minute + 10*Second, 10 * Second, Minute + 10*Second},
+ {2*Minute + 10*Second, Minute, 2 * Minute},
+ {10*Minute + 10*Second, 3 * Minute, 9 * Minute},
+ {Minute + 10*Second, Minute + 10*Second + 1, 0},
+ {Minute + 10*Second, Hour, 0},
+ {-Minute, Second, -Minute},
+ {-10 * Minute, 3 * Minute, -9 * Minute},
+ {-10 * Minute, Hour, 0},
+}
+
+func TestDurationTruncate(t *testing.T) {
+ for _, tt := range durationTruncateTests {
+ if got := tt.d.Truncate(tt.m); got != tt.want {
+ t.Errorf("Duration(%s).Truncate(%s) = %s; want: %s", tt.d, tt.m, got, tt.want)
+ }
+ }
+}
+
+var durationRoundTests = []struct {
+ d Duration
+ m Duration
+ want Duration
+}{
+ {0, Second, 0},
+ {Minute, -11 * Second, Minute},
+ {Minute, 0, Minute},
+ {Minute, 1, Minute},
+ {2 * Minute, Minute, 2 * Minute},
+ {2*Minute + 10*Second, Minute, 2 * Minute},
+ {2*Minute + 30*Second, Minute, 3 * Minute},
+ {2*Minute + 50*Second, Minute, 3 * Minute},
+ {-Minute, 1, -Minute},
+ {-2 * Minute, Minute, -2 * Minute},
+ {-2*Minute - 10*Second, Minute, -2 * Minute},
+ {-2*Minute - 30*Second, Minute, -3 * Minute},
+ {-2*Minute - 50*Second, Minute, -3 * Minute},
+ {8e18, 3e18, 9e18},
+ {9e18, 5e18, 1<<63 - 1},
+ {-8e18, 3e18, -9e18},
+ {-9e18, 5e18, -1 << 63},
+ {3<<61 - 1, 3 << 61, 3 << 61},
+}
+
+func TestDurationRound(t *testing.T) {
+ for _, tt := range durationRoundTests {
+ if got := tt.d.Round(tt.m); got != tt.want {
+ t.Errorf("Duration(%s).Round(%s) = %s; want: %s", tt.d, tt.m, got, tt.want)
+ }
+ }
+}
+
var defaultLocTests = []struct {
name string
f func(t1, t2 Time) bool
@@ -1254,3 +1317,14 @@ func TestZeroMonthString(t *testing.T) {
t.Errorf("zero month = %q; want %q", got, want)
}
}
+
+func TestReadFileLimit(t *testing.T) {
+ const zero = "/dev/zero"
+ if _, err := os.Stat(zero); err != nil {
+ t.Skip("skipping test without a /dev/zero")
+ }
+ _, err := ReadFile(zero)
+ if err == nil || !strings.Contains(err.Error(), "is too large") {
+ t.Errorf("readFile(%q) error = %v; want error containing 'is too large'", zero, err)
+ }
+}
diff --git a/libgo/go/time/zoneinfo.go b/libgo/go/time/zoneinfo.go
index fb0aa392404..f4d4df95d36 100644
--- a/libgo/go/time/zoneinfo.go
+++ b/libgo/go/time/zoneinfo.go
@@ -5,6 +5,7 @@
package time
import (
+ "errors"
"sync"
"syscall"
)
@@ -81,7 +82,7 @@ func (l *Location) get() *Location {
}
// String returns a descriptive name for the time zone information,
-// corresponding to the argument to LoadLocation.
+// corresponding to the name argument to LoadLocation or FixedZone.
func (l *Location) String() string {
return l.get().name
}
@@ -256,7 +257,10 @@ func (l *Location) lookupName(name string, unix int64) (offset int, isDST bool,
// NOTE(rsc): Eventually we will need to accept the POSIX TZ environment
// syntax too, but I don't feel like implementing it today.
-var zoneinfo, _ = syscall.Getenv("ZONEINFO")
+var errLocation = errors.New("time: invalid location name")
+
+var zoneinfo *string
+var zoneinfoOnce sync.Once
// LoadLocation returns the Location with the given name.
//
@@ -279,11 +283,33 @@ func LoadLocation(name string) (*Location, error) {
if name == "Local" {
return Local, nil
}
- if zoneinfo != "" {
- if z, err := loadZoneFile(zoneinfo, name); err == nil {
+ if containsDotDot(name) || name[0] == '/' || name[0] == '\\' {
+ // No valid IANA Time Zone name contains a single dot,
+ // much less dot dot. Likewise, none begin with a slash.
+ return nil, errLocation
+ }
+ zoneinfoOnce.Do(func() {
+ env, _ := syscall.Getenv("ZONEINFO")
+ zoneinfo = &env
+ })
+ if zoneinfo != nil && *zoneinfo != "" {
+ if z, err := loadZoneFile(*zoneinfo, name); err == nil {
z.name = name
return z, nil
}
}
return loadLocation(name)
}
+
+// containsDotDot reports whether s contains "..".
+func containsDotDot(s string) bool {
+ if len(s) < 2 {
+ return false
+ }
+ for i := 0; i < len(s)-1; i++ {
+ if s[i] == '.' && s[i+1] == '.' {
+ return true
+ }
+ }
+ return false
+}
diff --git a/libgo/go/time/zoneinfo_abbrs_windows.go b/libgo/go/time/zoneinfo_abbrs_windows.go
index 9425db844ca..db0bbfd74f4 100644
--- a/libgo/go/time/zoneinfo_abbrs_windows.go
+++ b/libgo/go/time/zoneinfo_abbrs_windows.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// generated by genzabbrs.go from
-// http://unicode.org/cldr/data/common/supplemental/windowsZones.xml
+// Code generated by genzabbrs.go; DO NOT EDIT.
+// Based on information from http://unicode.org/cldr/data/common/supplemental/windowsZones.xml
package time
@@ -22,124 +22,128 @@ var abbrs = map[string]abbr{
"Namibia Standard Time": {"WAT", "WAST"}, // Africa/Windhoek
"Aleutian Standard Time": {"HST", "HDT"}, // America/Adak
"Alaskan Standard Time": {"AKST", "AKDT"}, // America/Anchorage
- "Tocantins Standard Time": {"BRT", "BRT"}, // America/Araguaina
- "Paraguay Standard Time": {"PYT", "PYST"}, // America/Asuncion
- "Bahia Standard Time": {"BRT", "BRT"}, // America/Bahia
- "SA Pacific Standard Time": {"COT", "COT"}, // America/Bogota
- "Argentina Standard Time": {"ART", "ART"}, // America/Buenos_Aires
+ "Tocantins Standard Time": {"-03", "-03"}, // America/Araguaina
+ "Paraguay Standard Time": {"-04", "-03"}, // America/Asuncion
+ "Bahia Standard Time": {"-03", "-03"}, // America/Bahia
+ "SA Pacific Standard Time": {"-05", "-05"}, // America/Bogota
+ "Argentina Standard Time": {"-03", "-03"}, // America/Buenos_Aires
"Eastern Standard Time (Mexico)": {"EST", "EST"}, // America/Cancun
- "Venezuela Standard Time": {"VET", "VET"}, // America/Caracas
- "SA Eastern Standard Time": {"GFT", "GFT"}, // America/Cayenne
+ "Venezuela Standard Time": {"-04", "-04"}, // America/Caracas
+ "SA Eastern Standard Time": {"-03", "-03"}, // America/Cayenne
"Central Standard Time": {"CST", "CDT"}, // America/Chicago
"Mountain Standard Time (Mexico)": {"MST", "MDT"}, // America/Chihuahua
- "Central Brazilian Standard Time": {"AMT", "AMST"}, // America/Cuiaba
+ "Central Brazilian Standard Time": {"-04", "-03"}, // America/Cuiaba
"Mountain Standard Time": {"MST", "MDT"}, // America/Denver
- "Greenland Standard Time": {"WGT", "WGST"}, // America/Godthab
+ "Greenland Standard Time": {"-03", "-02"}, // America/Godthab
"Turks And Caicos Standard Time": {"AST", "AST"}, // America/Grand_Turk
"Central America Standard Time": {"CST", "CST"}, // America/Guatemala
"Atlantic Standard Time": {"AST", "ADT"}, // America/Halifax
"Cuba Standard Time": {"CST", "CDT"}, // America/Havana
"US Eastern Standard Time": {"EST", "EDT"}, // America/Indianapolis
- "SA Western Standard Time": {"BOT", "BOT"}, // America/La_Paz
+ "SA Western Standard Time": {"-04", "-04"}, // America/La_Paz
"Pacific Standard Time": {"PST", "PDT"}, // America/Los_Angeles
"Central Standard Time (Mexico)": {"CST", "CDT"}, // America/Mexico_City
- "Saint Pierre Standard Time": {"PMST", "PMDT"}, // America/Miquelon
- "Montevideo Standard Time": {"UYT", "UYT"}, // America/Montevideo
+ "Saint Pierre Standard Time": {"-03", "-02"}, // America/Miquelon
+ "Montevideo Standard Time": {"-03", "-03"}, // America/Montevideo
"Eastern Standard Time": {"EST", "EDT"}, // America/New_York
"US Mountain Standard Time": {"MST", "MST"}, // America/Phoenix
- "Haiti Standard Time": {"EST", "EST"}, // America/Port-au-Prince
+ "Haiti Standard Time": {"EST", "EDT"}, // America/Port-au-Prince
+ "Magallanes Standard Time": {"-03", "-03"}, // America/Punta_Arenas
"Canada Central Standard Time": {"CST", "CST"}, // America/Regina
- "Pacific SA Standard Time": {"CLT", "CLST"}, // America/Santiago
- "E. South America Standard Time": {"BRT", "BRST"}, // America/Sao_Paulo
+ "Pacific SA Standard Time": {"-04", "-03"}, // America/Santiago
+ "E. South America Standard Time": {"-03", "-02"}, // America/Sao_Paulo
"Newfoundland Standard Time": {"NST", "NDT"}, // America/St_Johns
"Pacific Standard Time (Mexico)": {"PST", "PDT"}, // America/Tijuana
"Central Asia Standard Time": {"+06", "+06"}, // Asia/Almaty
"Jordan Standard Time": {"EET", "EEST"}, // Asia/Amman
- "Arabic Standard Time": {"AST", "AST"}, // Asia/Baghdad
- "Azerbaijan Standard Time": {"AZT", "AZT"}, // Asia/Baku
- "SE Asia Standard Time": {"ICT", "ICT"}, // Asia/Bangkok
- "Altai Standard Time": {"+06", "+07"}, // Asia/Barnaul
+ "Arabic Standard Time": {"+03", "+03"}, // Asia/Baghdad
+ "Azerbaijan Standard Time": {"+04", "+04"}, // Asia/Baku
+ "SE Asia Standard Time": {"+07", "+07"}, // Asia/Bangkok
+ "Altai Standard Time": {"+07", "+07"}, // Asia/Barnaul
"Middle East Standard Time": {"EET", "EEST"}, // Asia/Beirut
"India Standard Time": {"IST", "IST"}, // Asia/Calcutta
- "Transbaikal Standard Time": {"IRKT", "YAKT"}, // Asia/Chita
- "Sri Lanka Standard Time": {"IST", "IST"}, // Asia/Colombo
+ "Transbaikal Standard Time": {"+09", "+09"}, // Asia/Chita
+ "Sri Lanka Standard Time": {"+0530", "+0530"}, // Asia/Colombo
"Syria Standard Time": {"EET", "EEST"}, // Asia/Damascus
- "Bangladesh Standard Time": {"BDT", "BDT"}, // Asia/Dhaka
- "Arabian Standard Time": {"GST", "GST"}, // Asia/Dubai
+ "Bangladesh Standard Time": {"+06", "+06"}, // Asia/Dhaka
+ "Arabian Standard Time": {"+04", "+04"}, // Asia/Dubai
"West Bank Standard Time": {"EET", "EEST"}, // Asia/Hebron
- "W. Mongolia Standard Time": {"HOVT", "HOVST"}, // Asia/Hovd
- "North Asia East Standard Time": {"IRKT", "IRKT"}, // Asia/Irkutsk
+ "W. Mongolia Standard Time": {"+07", "+07"}, // Asia/Hovd
+ "North Asia East Standard Time": {"+08", "+08"}, // Asia/Irkutsk
"Israel Standard Time": {"IST", "IDT"}, // Asia/Jerusalem
- "Afghanistan Standard Time": {"AFT", "AFT"}, // Asia/Kabul
- "Russia Time Zone 11": {"PETT", "PETT"}, // Asia/Kamchatka
+ "Afghanistan Standard Time": {"+0430", "+0430"}, // Asia/Kabul
+ "Russia Time Zone 11": {"+12", "+12"}, // Asia/Kamchatka
"Pakistan Standard Time": {"PKT", "PKT"}, // Asia/Karachi
- "Nepal Standard Time": {"NPT", "NPT"}, // Asia/Katmandu
- "North Asia Standard Time": {"KRAT", "KRAT"}, // Asia/Krasnoyarsk
- "Magadan Standard Time": {"MAGT", "MAGT"}, // Asia/Magadan
- "N. Central Asia Standard Time": {"+06", "+07"}, // Asia/Novosibirsk
+ "Nepal Standard Time": {"+0545", "+0545"}, // Asia/Katmandu
+ "North Asia Standard Time": {"+07", "+07"}, // Asia/Krasnoyarsk
+ "Magadan Standard Time": {"+11", "+11"}, // Asia/Magadan
+ "N. Central Asia Standard Time": {"+07", "+07"}, // Asia/Novosibirsk
+ "Omsk Standard Time": {"+06", "+06"}, // Asia/Omsk
"North Korea Standard Time": {"KST", "KST"}, // Asia/Pyongyang
- "Myanmar Standard Time": {"MMT", "MMT"}, // Asia/Rangoon
- "Arab Standard Time": {"AST", "AST"}, // Asia/Riyadh
- "Sakhalin Standard Time": {"SAKT", "SAKT"}, // Asia/Sakhalin
+ "Myanmar Standard Time": {"+0630", "+0630"}, // Asia/Rangoon
+ "Arab Standard Time": {"+03", "+03"}, // Asia/Riyadh
+ "Sakhalin Standard Time": {"+11", "+11"}, // Asia/Sakhalin
"Korea Standard Time": {"KST", "KST"}, // Asia/Seoul
"China Standard Time": {"CST", "CST"}, // Asia/Shanghai
- "Singapore Standard Time": {"SGT", "SGT"}, // Asia/Singapore
- "Russia Time Zone 10": {"SRET", "SRET"}, // Asia/Srednekolymsk
+ "Singapore Standard Time": {"+08", "+08"}, // Asia/Singapore
+ "Russia Time Zone 10": {"+11", "+11"}, // Asia/Srednekolymsk
"Taipei Standard Time": {"CST", "CST"}, // Asia/Taipei
- "West Asia Standard Time": {"UZT", "UZT"}, // Asia/Tashkent
- "Georgian Standard Time": {"GET", "GET"}, // Asia/Tbilisi
- "Iran Standard Time": {"IRST", "IRDT"}, // Asia/Tehran
+ "West Asia Standard Time": {"+05", "+05"}, // Asia/Tashkent
+ "Georgian Standard Time": {"+04", "+04"}, // Asia/Tbilisi
+ "Iran Standard Time": {"+0330", "+0430"}, // Asia/Tehran
"Tokyo Standard Time": {"JST", "JST"}, // Asia/Tokyo
- "Tomsk Standard Time": {"+06", "+07"}, // Asia/Tomsk
- "Ulaanbaatar Standard Time": {"ULAT", "ULAST"}, // Asia/Ulaanbaatar
- "Vladivostok Standard Time": {"VLAT", "VLAT"}, // Asia/Vladivostok
- "Yakutsk Standard Time": {"YAKT", "YAKT"}, // Asia/Yakutsk
- "Ekaterinburg Standard Time": {"YEKT", "YEKT"}, // Asia/Yekaterinburg
- "Caucasus Standard Time": {"AMT", "AMT"}, // Asia/Yerevan
- "Azores Standard Time": {"AZOT", "AZOST"}, // Atlantic/Azores
- "Cape Verde Standard Time": {"CVT", "CVT"}, // Atlantic/Cape_Verde
+ "Tomsk Standard Time": {"+07", "+07"}, // Asia/Tomsk
+ "Ulaanbaatar Standard Time": {"+08", "+08"}, // Asia/Ulaanbaatar
+ "Vladivostok Standard Time": {"+10", "+10"}, // Asia/Vladivostok
+ "Yakutsk Standard Time": {"+09", "+09"}, // Asia/Yakutsk
+ "Ekaterinburg Standard Time": {"+05", "+05"}, // Asia/Yekaterinburg
+ "Caucasus Standard Time": {"+04", "+04"}, // Asia/Yerevan
+ "Azores Standard Time": {"-01", "+00"}, // Atlantic/Azores
+ "Cape Verde Standard Time": {"-01", "-01"}, // Atlantic/Cape_Verde
"Greenwich Standard Time": {"GMT", "GMT"}, // Atlantic/Reykjavik
"Cen. Australia Standard Time": {"ACST", "ACDT"}, // Australia/Adelaide
"E. Australia Standard Time": {"AEST", "AEST"}, // Australia/Brisbane
"AUS Central Standard Time": {"ACST", "ACST"}, // Australia/Darwin
- "Aus Central W. Standard Time": {"ACWST", "ACWST"}, // Australia/Eucla
+ "Aus Central W. Standard Time": {"+0845", "+0845"}, // Australia/Eucla
"Tasmania Standard Time": {"AEST", "AEDT"}, // Australia/Hobart
- "Lord Howe Standard Time": {"LHST", "LHDT"}, // Australia/Lord_Howe
+ "Lord Howe Standard Time": {"+1030", "+11"}, // Australia/Lord_Howe
"W. Australia Standard Time": {"AWST", "AWST"}, // Australia/Perth
"AUS Eastern Standard Time": {"AEST", "AEDT"}, // Australia/Sydney
- "UTC": {"GMT", "GMT"}, // Etc/GMT
- "UTC-11": {"GMT+11", "GMT+11"}, // Etc/GMT+11
- "Dateline Standard Time": {"GMT+12", "GMT+12"}, // Etc/GMT+12
- "UTC-02": {"GMT+2", "GMT+2"}, // Etc/GMT+2
- "UTC-08": {"GMT+8", "GMT+8"}, // Etc/GMT+8
- "UTC-09": {"GMT+9", "GMT+9"}, // Etc/GMT+9
- "UTC+12": {"GMT-12", "GMT-12"}, // Etc/GMT-12
- "Astrakhan Standard Time": {"+03", "+04"}, // Europe/Astrakhan
- "W. Europe Standard Time": {"CET", "CEST"}, // Europe/Berlin
- "GTB Standard Time": {"EET", "EEST"}, // Europe/Bucharest
- "Central Europe Standard Time": {"CET", "CEST"}, // Europe/Budapest
- "E. Europe Standard Time": {"EET", "EEST"}, // Europe/Chisinau
- "Turkey Standard Time": {"EET", "EEST"}, // Europe/Istanbul
- "Kaliningrad Standard Time": {"EET", "EET"}, // Europe/Kaliningrad
- "FLE Standard Time": {"EET", "EEST"}, // Europe/Kiev
- "GMT Standard Time": {"GMT", "BST"}, // Europe/London
- "Belarus Standard Time": {"MSK", "MSK"}, // Europe/Minsk
- "Russian Standard Time": {"MSK", "MSK"}, // Europe/Moscow
- "Romance Standard Time": {"CET", "CEST"}, // Europe/Paris
- "Russia Time Zone 3": {"SAMT", "SAMT"}, // Europe/Samara
- "Central European Standard Time": {"CET", "CEST"}, // Europe/Warsaw
- "Mauritius Standard Time": {"MUT", "MUT"}, // Indian/Mauritius
- "Samoa Standard Time": {"WSST", "WSDT"}, // Pacific/Apia
- "New Zealand Standard Time": {"NZST", "NZDT"}, // Pacific/Auckland
- "Bougainville Standard Time": {"BST", "BST"}, // Pacific/Bougainville
- "Chatham Islands Standard Time": {"CHAST", "CHADT"}, // Pacific/Chatham
- "Easter Island Standard Time": {"EAST", "EASST"}, // Pacific/Easter
- "Fiji Standard Time": {"FJT", "FJST"}, // Pacific/Fiji
- "Central Pacific Standard Time": {"SBT", "SBT"}, // Pacific/Guadalcanal
- "Hawaiian Standard Time": {"HST", "HST"}, // Pacific/Honolulu
- "Line Islands Standard Time": {"LINT", "LINT"}, // Pacific/Kiritimati
- "Marquesas Standard Time": {"MART", "MART"}, // Pacific/Marquesas
- "Norfolk Standard Time": {"NFT", "NFT"}, // Pacific/Norfolk
- "West Pacific Standard Time": {"PGT", "PGT"}, // Pacific/Port_Moresby
- "Tonga Standard Time": {"TOT", "TOT"}, // Pacific/Tongatapu
+ "UTC": {"GMT", "GMT"}, // Etc/GMT
+ "UTC-11": {"-11", "-11"}, // Etc/GMT+11
+ "Dateline Standard Time": {"-12", "-12"}, // Etc/GMT+12
+ "UTC-02": {"-02", "-02"}, // Etc/GMT+2
+ "UTC-08": {"-08", "-08"}, // Etc/GMT+8
+ "UTC-09": {"-09", "-09"}, // Etc/GMT+9
+ "UTC+12": {"+12", "+12"}, // Etc/GMT-12
+ "UTC+13": {"+13", "+13"}, // Etc/GMT-13
+ "Astrakhan Standard Time": {"+04", "+04"}, // Europe/Astrakhan
+ "W. Europe Standard Time": {"CET", "CEST"}, // Europe/Berlin
+ "GTB Standard Time": {"EET", "EEST"}, // Europe/Bucharest
+ "Central Europe Standard Time": {"CET", "CEST"}, // Europe/Budapest
+ "E. Europe Standard Time": {"EET", "EEST"}, // Europe/Chisinau
+ "Turkey Standard Time": {"+03", "+03"}, // Europe/Istanbul
+ "Kaliningrad Standard Time": {"EET", "EET"}, // Europe/Kaliningrad
+ "FLE Standard Time": {"EET", "EEST"}, // Europe/Kiev
+ "GMT Standard Time": {"GMT", "BST"}, // Europe/London
+ "Belarus Standard Time": {"+03", "+03"}, // Europe/Minsk
+ "Russian Standard Time": {"MSK", "MSK"}, // Europe/Moscow
+ "Romance Standard Time": {"CET", "CEST"}, // Europe/Paris
+ "Russia Time Zone 3": {"+04", "+04"}, // Europe/Samara
+ "Saratov Standard Time": {"+03", "+04"}, // Europe/Saratov
+ "Central European Standard Time": {"CET", "CEST"}, // Europe/Warsaw
+ "Mauritius Standard Time": {"+04", "+04"}, // Indian/Mauritius
+ "Samoa Standard Time": {"+13", "+14"}, // Pacific/Apia
+ "New Zealand Standard Time": {"NZST", "NZDT"}, // Pacific/Auckland
+ "Bougainville Standard Time": {"+11", "+11"}, // Pacific/Bougainville
+ "Chatham Islands Standard Time": {"+1245", "+1345"}, // Pacific/Chatham
+ "Easter Island Standard Time": {"-06", "-05"}, // Pacific/Easter
+ "Fiji Standard Time": {"+12", "+13"}, // Pacific/Fiji
+ "Central Pacific Standard Time": {"+11", "+11"}, // Pacific/Guadalcanal
+ "Hawaiian Standard Time": {"HST", "HST"}, // Pacific/Honolulu
+ "Line Islands Standard Time": {"+14", "+14"}, // Pacific/Kiritimati
+ "Marquesas Standard Time": {"-0930", "-0930"}, // Pacific/Marquesas
+ "Norfolk Standard Time": {"+11", "+11"}, // Pacific/Norfolk
+ "West Pacific Standard Time": {"+10", "+10"}, // Pacific/Port_Moresby
+ "Tonga Standard Time": {"+13", "+14"}, // Pacific/Tongatapu
}
diff --git a/libgo/go/time/zoneinfo_plan9.go b/libgo/go/time/zoneinfo_plan9.go
index 0694f0a9904..26637a151f0 100644
--- a/libgo/go/time/zoneinfo_plan9.go
+++ b/libgo/go/time/zoneinfo_plan9.go
@@ -95,7 +95,7 @@ func loadZoneDataPlan9(s string) (l *Location, err error) {
// Fill in the cache with information about right now,
// since that will be the most common lookup.
- sec, _ := now()
+ sec, _, _ := now()
for i := range tx {
if tx[i].when <= sec && (i+1 == len(tx) || sec < tx[i+1].when) {
l.cacheStart = tx[i].when
diff --git a/libgo/go/time/zoneinfo_read.go b/libgo/go/time/zoneinfo_read.go
index 19cd40d8477..b0cd9da9230 100644
--- a/libgo/go/time/zoneinfo_read.go
+++ b/libgo/go/time/zoneinfo_read.go
@@ -11,6 +11,17 @@ package time
import "errors"
+// maxFileSize is the max permitted size of files read by readFile.
+// As reference, the zoneinfo.zip distributed by Go is ~350 KB,
+// so 10MB is overkill.
+const maxFileSize = 10 << 20
+
+type fileSizeError string
+
+func (f fileSizeError) Error() string {
+ return "time: file " + string(f) + " is too large"
+}
+
// Copies of io.Seek* constants to avoid importing "io":
const (
seekStart = 0
@@ -188,7 +199,7 @@ func loadZoneData(bytes []byte) (l *Location, err error) {
// Fill in the cache with information about right now,
// since that will be the most common lookup.
- sec, _ := now()
+ sec, _, _ := now()
for i := range tx {
if tx[i].when <= sec && (i+1 == len(tx) || sec < tx[i+1].when) {
l.cacheStart = tx[i].when
diff --git a/libgo/go/time/zoneinfo_test.go b/libgo/go/time/zoneinfo_test.go
index 5b6a4dc4e4e..8a4caa0c445 100644
--- a/libgo/go/time/zoneinfo_test.go
+++ b/libgo/go/time/zoneinfo_test.go
@@ -5,10 +5,56 @@
package time_test
import (
+ "fmt"
+ "os"
"testing"
"time"
)
+func init() {
+ if time.ZoneinfoForTesting() != nil {
+ panic(fmt.Errorf("zoneinfo initialized before first LoadLocation"))
+ }
+}
+
+func TestEnvVarUsage(t *testing.T) {
+ time.ResetZoneinfoForTesting()
+
+ const testZoneinfo = "foo.zip"
+ const env = "ZONEINFO"
+
+ defer os.Setenv(env, os.Getenv(env))
+ os.Setenv(env, testZoneinfo)
+
+ // Result isn't important, we're testing the side effect of this command
+ time.LoadLocation("Asia/Jerusalem")
+ defer time.ResetZoneinfoForTesting()
+
+ if zoneinfo := time.ZoneinfoForTesting(); testZoneinfo != *zoneinfo {
+ t.Errorf("zoneinfo does not match env variable: got %q want %q", zoneinfo, testZoneinfo)
+ }
+}
+
+func TestLoadLocationValidatesNames(t *testing.T) {
+ time.ResetZoneinfoForTesting()
+ const env = "ZONEINFO"
+ defer os.Setenv(env, os.Getenv(env))
+ os.Setenv(env, "")
+
+ bad := []string{
+ "/usr/foo/Foo",
+ "\\UNC\foo",
+ "..",
+ "a..",
+ }
+ for _, v := range bad {
+ _, err := time.LoadLocation(v)
+ if err != time.ErrLocation {
+ t.Errorf("LoadLocation(%q) error = %v; want ErrLocation", v, err)
+ }
+ }
+}
+
func TestVersion3(t *testing.T) {
t.Skip("gccgo does not use the zip file")
time.ForceZipFileForTesting(true)
@@ -44,8 +90,8 @@ func TestFirstZone(t *testing.T) {
{
"Pacific/Fakaofo",
1325242799,
- "Thu, 29 Dec 2011 23:59:59 -1100 (TKT)",
- "Sat, 31 Dec 2011 00:00:00 +1300 (TKT)",
+ "Thu, 29 Dec 2011 23:59:59 -1100 (-11)",
+ "Sat, 31 Dec 2011 00:00:00 +1300 (+13)",
},
}
diff --git a/libgo/go/time/zoneinfo_windows.go b/libgo/go/time/zoneinfo_windows.go
index a6e227b5b0d..c201f4b55ee 100644
--- a/libgo/go/time/zoneinfo_windows.go
+++ b/libgo/go/time/zoneinfo_windows.go
@@ -132,7 +132,7 @@ func pseudoUnix(year int, d *syscall.Systemtime) int64 {
day -= 7
}
}
- return t.sec + int64(day-1)*secondsPerDay + internalToUnix
+ return t.sec() + int64(day-1)*secondsPerDay + internalToUnix
}
func initLocalFromTZI(i *syscall.Timezoneinformation) {
diff --git a/libgo/go/unicode/letter.go b/libgo/go/unicode/letter.go
index b43cc66e7d3..90b0b414da2 100644
--- a/libgo/go/unicode/letter.go
+++ b/libgo/go/unicode/letter.go
@@ -46,7 +46,7 @@ type Range32 struct {
// CaseRange represents a range of Unicode code points for simple (one
// code point to one code point) case conversion.
-// The range runs from Lo to Hi inclusive, with a fixed stride of 1. Deltas
+// The range runs from Lo to Hi inclusive, with a fixed stride of 1. Deltas
// are the number to add to the code point to reach the code point for a
// different case for that character. They may be negative. If zero, it
// means the character is in the corresponding case. There is a special
@@ -308,7 +308,7 @@ func (special SpecialCase) ToLower(r rune) rune {
}
// caseOrbit is defined in tables.go as []foldPair. Right now all the
-// entries fit in uint16, so use uint16. If that changes, compilation
+// entries fit in uint16, so use uint16. If that changes, compilation
// will fail (the constants in the composite literal will not fit in uint16)
// and the types here can change to uint32.
type foldPair struct {
diff --git a/libgo/go/unicode/tables.go b/libgo/go/unicode/tables.go
index 15fecd954f1..90323367d55 100644
--- a/libgo/go/unicode/tables.go
+++ b/libgo/go/unicode/tables.go
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Generated by running
+// Code generated by maketables; DO NOT EDIT.
+// To regenerate, run:
// maketables --tables=all --data=http://www.unicode.org/Public/9.0.0/ucd/UnicodeData.txt --casefolding=http://www.unicode.org/Public/9.0.0/ucd/CaseFolding.txt
-// DO NOT EDIT
package unicode
@@ -7311,34 +7311,12 @@ var caseOrbit = []foldPair{
// simple case folding to code points inside the category.
// If there is no entry for a category name, there are no such points.
var FoldCategory = map[string]*RangeTable{
- "Common": foldCommon,
- "Greek": foldGreek,
- "Inherited": foldInherited,
- "L": foldL,
- "Ll": foldLl,
- "Lt": foldLt,
- "Lu": foldLu,
- "M": foldM,
- "Mn": foldMn,
-}
-
-var foldCommon = &RangeTable{
- R16: []Range16{
- {0x039c, 0x03bc, 32},
- },
-}
-
-var foldGreek = &RangeTable{
- R16: []Range16{
- {0x00b5, 0x0345, 656},
- },
-}
-
-var foldInherited = &RangeTable{
- R16: []Range16{
- {0x0399, 0x03b9, 32},
- {0x1fbe, 0x1fbe, 1},
- },
+ "L": foldL,
+ "Ll": foldLl,
+ "Lt": foldLt,
+ "Lu": foldLu,
+ "M": foldM,
+ "Mn": foldMn,
}
var foldL = &RangeTable{
@@ -7609,7 +7587,30 @@ var foldMn = &RangeTable{
// code points outside the script that are equivalent under
// simple case folding to code points inside the script.
// If there is no entry for a script name, there are no such points.
-var FoldScript = map[string]*RangeTable{}
+var FoldScript = map[string]*RangeTable{
+ "Common": foldCommon,
+ "Greek": foldGreek,
+ "Inherited": foldInherited,
+}
+
+var foldCommon = &RangeTable{
+ R16: []Range16{
+ {0x039c, 0x03bc, 32},
+ },
+}
+
+var foldGreek = &RangeTable{
+ R16: []Range16{
+ {0x00b5, 0x0345, 656},
+ },
+}
+
+var foldInherited = &RangeTable{
+ R16: []Range16{
+ {0x0399, 0x03b9, 32},
+ {0x1fbe, 0x1fbe, 1},
+ },
+}
// Range entries: 3576 16-bit, 1454 32-bit, 5030 total.
// Range bytes: 21456 16-bit, 17448 32-bit, 38904 total.
diff --git a/libgo/godeps.sh b/libgo/godeps.sh
index 0da5d07bdf9..87cd09e3d9d 100644
--- a/libgo/godeps.sh
+++ b/libgo/godeps.sh
@@ -25,7 +25,8 @@ shift
files=$*
deps=`for f in $files; do cat $f; done |
sed -n -e '/^import.*"/p; /^import[ ]*(/,/^)/p' |
- grep '"' |
+ sed -e 's/^import //' |
+ grep '^[ ]*"' |
grep -v '"unsafe"' |
sed -e 's/^.*"\([^"]*\)".*$/\1/' -e 's/$/.gox/' |
sort -u`
diff --git a/libgo/match.sh b/libgo/match.sh
index f87a51d8f1f..fac75eabec6 100755
--- a/libgo/match.sh
+++ b/libgo/match.sh
@@ -151,18 +151,18 @@ for f in $gofiles; do
fi
match=false
;;
- $goos | $goarch | $cgotag | $cmdlinetag | "gccgo")
+ $goos | $goarch | $cgotag | $cmdlinetag | "gccgo" | go1.[0-9])
match=true
;;
- "!"$goos | "!"$goarch | "!"$cgotag | "!"$cmdlinetag | "!gccgo")
+ "!"$goos | "!"$goarch | "!"$cgotag | "!"$cmdlinetag | "!gccgo" | "!"go1.[0-9])
;;
*,*)
cmatch=true
for ctag in `echo $tag | sed -e 's/,/ /g'`; do
case $ctag in
- $goos | $goarch | $cgotag | $cmdlinetag | "gccgo")
+ $goos | $goarch | $cgotag | $cmdlinetag | "gccgo" | go1.[0-9])
;;
- "!"$goos | "!"$goarch | "!"$cgotag | "!"$cmdlinetag | "!gccgo")
+ "!"$goos | "!"$goarch | "!"$cgotag | "!"$cmdlinetag | "!gccgo" | "!"go1.[0-9])
cmatch=false
;;
"!"*)
diff --git a/libgo/merge.sh b/libgo/merge.sh
index bc24504e2a0..66686654476 100755
--- a/libgo/merge.sh
+++ b/libgo/merge.sh
@@ -128,7 +128,7 @@ echo ${rev} > VERSION
(cd ${NEWDIR}/src && find . -name '*.go' -print) | while read f; do
skip=false
case "$f" in
- ./cmd/cgo/* | ./cmd/go/* | ./cmd/gofmt/* | ./cmd/internal/browser/*)
+ ./cmd/cgo/* | ./cmd/go/* | ./cmd/gofmt/* | ./cmd/internal/browser/* | ./cmd/internal/objabi/*)
;;
./cmd/*)
skip=true
diff --git a/libgo/misc/cgo/errors/issue18452.go b/libgo/misc/cgo/errors/issue18452.go
new file mode 100644
index 00000000000..36ef7f54e12
--- /dev/null
+++ b/libgo/misc/cgo/errors/issue18452.go
@@ -0,0 +1,18 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 18452: show pos info in undefined name errors
+
+package p
+
+import (
+ "C"
+ "fmt"
+)
+
+func a() {
+ fmt.Println("Hello, world!")
+ C.function_that_does_not_exist() // line 16
+ C.pi // line 17
+}
diff --git a/libgo/misc/cgo/errors/issue18889.go b/libgo/misc/cgo/errors/issue18889.go
new file mode 100644
index 00000000000..bba6b8f9bb1
--- /dev/null
+++ b/libgo/misc/cgo/errors/issue18889.go
@@ -0,0 +1,7 @@
+package main
+
+import "C"
+
+func main() {
+ _ = C.malloc // ERROR HERE
+}
diff --git a/libgo/misc/cgo/errors/ptr.go b/libgo/misc/cgo/errors/ptr.go
index 4dafbdf3c01..3e117666bff 100644
--- a/libgo/misc/cgo/errors/ptr.go
+++ b/libgo/misc/cgo/errors/ptr.go
@@ -343,6 +343,14 @@ var ptrTests = []ptrTest{
body: `var b C.char; p := &b; C.f((*C.u)(unsafe.Pointer(&p)))`,
fail: false,
},
+ {
+ // Issue #21306.
+ name: "preempt-during-call",
+ c: `void f() {}`,
+ imports: []string{"runtime", "sync"},
+ body: `var wg sync.WaitGroup; wg.Add(100); for i := 0; i < 100; i++ { go func(i int) { for j := 0; j < 100; j++ { C.f(); runtime.GOMAXPROCS(i) }; wg.Done() }(i) }; wg.Wait()`,
+ fail: false,
+ },
}
func main() {
diff --git a/libgo/misc/cgo/errors/test.bash b/libgo/misc/cgo/errors/test.bash
index 05261e9d767..ed0b0946925 100644
--- a/libgo/misc/cgo/errors/test.bash
+++ b/libgo/misc/cgo/errors/test.bash
@@ -17,7 +17,7 @@ check() {
expect() {
file=$1
shift
- if go build $file >errs 2>&1; then
+ if go build -gcflags=-C $file >errs 2>&1; then
echo 1>&2 misc/cgo/errors/test.bash: BUG: expected cgo to fail on $file but it succeeded
exit 1
fi
@@ -47,6 +47,8 @@ expect issue13635.go C.uchar C.schar C.ushort C.uint C.ulong C.longlong C.ulongl
check issue13830.go
check issue16116.go
check issue16591.go
+check issue18889.go
+expect issue18452.go issue18452.go:16 issue18452.go:17
if ! go build issue14669.go; then
exit 1
diff --git a/libgo/misc/cgo/fortran/test.bash b/libgo/misc/cgo/fortran/test.bash
index 3d1bc9de8e9..1e0d59ea1c6 100644
--- a/libgo/misc/cgo/fortran/test.bash
+++ b/libgo/misc/cgo/fortran/test.bash
@@ -12,7 +12,7 @@ FC=$1
goos=$(go env GOOS)
libext="so"
-if [ "$goos" == "darwin" ]; then
+if [ "$goos" = "darwin" ]; then
libext="dylib"
fi
diff --git a/libgo/misc/cgo/test/cgo_test.go b/libgo/misc/cgo/test/cgo_test.go
index a6de999752b..f7cf6f613c4 100644
--- a/libgo/misc/cgo/test/cgo_test.go
+++ b/libgo/misc/cgo/test/cgo_test.go
@@ -76,5 +76,9 @@ func TestThreadLock(t *testing.T) { testThreadLockFunc(t) }
func TestCheckConst(t *testing.T) { testCheckConst(t) }
func Test17537(t *testing.T) { test17537(t) }
func Test18126(t *testing.T) { test18126(t) }
+func Test20369(t *testing.T) { test20369(t) }
+func Test18720(t *testing.T) { test18720(t) }
+func Test20266(t *testing.T) { test20266(t) }
+func Test20129(t *testing.T) { test20129(t) }
func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) }
diff --git a/libgo/misc/cgo/test/issue18720.go b/libgo/misc/cgo/test/issue18720.go
new file mode 100644
index 00000000000..a93304498e0
--- /dev/null
+++ b/libgo/misc/cgo/test/issue18720.go
@@ -0,0 +1,28 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgotest
+
+/*
+#define HELLO "hello"
+#define WORLD "world"
+#define HELLO_WORLD HELLO "\000" WORLD
+
+struct foo { char c; };
+#define SIZE_OF(x) sizeof(x)
+#define SIZE_OF_FOO SIZE_OF(struct foo)
+*/
+import "C"
+import "testing"
+
+func test18720(t *testing.T) {
+ if C.HELLO_WORLD != "hello\000world" {
+ t.Fatalf(`expected "hello\000world", but got %q`, C.HELLO_WORLD)
+ }
+
+ // Issue 20125.
+ if got, want := C.SIZE_OF_FOO, 1; got != want {
+ t.Errorf("C.SIZE_OF_FOO == %v, expected %v", got, want)
+ }
+}
diff --git a/libgo/misc/cgo/test/issue20129.go b/libgo/misc/cgo/test/issue20129.go
new file mode 100644
index 00000000000..e69e0e16efa
--- /dev/null
+++ b/libgo/misc/cgo/test/issue20129.go
@@ -0,0 +1,33 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgotest
+
+/*
+int issue20129 = 0;
+typedef void issue20129Void;
+issue20129Void issue20129Foo() {
+ issue20129 = 1;
+}
+typedef issue20129Void issue20129Void2;
+issue20129Void2 issue20129Bar() {
+ issue20129 = 2;
+}
+*/
+import "C"
+import "testing"
+
+func test20129(t *testing.T) {
+ if C.issue20129 != 0 {
+ t.Fatal("test is broken")
+ }
+ C.issue20129Foo()
+ if C.issue20129 != 1 {
+ t.Errorf("got %v but expected %v", C.issue20129, 1)
+ }
+ C.issue20129Bar()
+ if C.issue20129 != 2 {
+ t.Errorf("got %v but expected %v", C.issue20129, 2)
+ }
+}
diff --git a/libgo/misc/cgo/test/issue20266.go b/libgo/misc/cgo/test/issue20266.go
new file mode 100644
index 00000000000..9f95086cc7b
--- /dev/null
+++ b/libgo/misc/cgo/test/issue20266.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 20266: use -I with a relative path.
+
+package cgotest
+
+/*
+#cgo CFLAGS: -I issue20266 -Iissue20266 -Ddef20266
+#include "issue20266.h"
+*/
+import "C"
+
+import "testing"
+
+func test20266(t *testing.T) {
+ if got, want := C.issue20266, 20266; got != want {
+ t.Errorf("got %d, want %d", got, want)
+ }
+}
diff --git a/libgo/misc/cgo/test/issue20266/issue20266.h b/libgo/misc/cgo/test/issue20266/issue20266.h
new file mode 100644
index 00000000000..8d3258ec6b8
--- /dev/null
+++ b/libgo/misc/cgo/test/issue20266/issue20266.h
@@ -0,0 +1,9 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define issue20266 20266
+
+#ifndef def20266
+#error "expected def20266 to be defined"
+#endif
diff --git a/libgo/misc/cgo/test/issue20369.go b/libgo/misc/cgo/test/issue20369.go
new file mode 100644
index 00000000000..37b4b78dfe1
--- /dev/null
+++ b/libgo/misc/cgo/test/issue20369.go
@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cgotest
+
+/*
+#define UINT64_MAX 18446744073709551615ULL
+*/
+import "C"
+import (
+ "math"
+ "testing"
+)
+
+func test20369(t *testing.T) {
+ if C.UINT64_MAX != math.MaxUint64 {
+ t.Fatalf("got %v, want %v", uint64(C.UINT64_MAX), uint64(math.MaxUint64))
+ }
+}
diff --git a/libgo/misc/cgo/test/issue6612.go b/libgo/misc/cgo/test/issue6612.go
index c337f911d90..15a12fab38f 100644
--- a/libgo/misc/cgo/test/issue6612.go
+++ b/libgo/misc/cgo/test/issue6612.go
@@ -74,18 +74,15 @@ func testNaming(t *testing.T) {
}
}
- // This would be nice, but it has never worked.
- /*
- if c := C.myfloat_def; c != 1.5 {
- t.Errorf("C.myint_def = %v, want 1.5", c)
- }
- {
- const c = C.myfloat_def
- if c != 1.5 {
+ if c := C.myfloat_def; c != 1.5 {
+ t.Errorf("C.myint_def = %v, want 1.5", c)
+ }
+ {
+ const c = C.myfloat_def
+ if c != 1.5 {
t.Errorf("C.myint as const = %v, want 1.5", c)
- }
}
- */
+ }
if s := C.mystring_def; s != "hello" {
t.Errorf("C.mystring_def = %q, want %q", s, "hello")
diff --git a/libgo/misc/cgo/testcarchive/carchive_test.go b/libgo/misc/cgo/testcarchive/carchive_test.go
index a2ad9c56418..4865b806a3e 100644
--- a/libgo/misc/cgo/testcarchive/carchive_test.go
+++ b/libgo/misc/cgo/testcarchive/carchive_test.go
@@ -120,8 +120,10 @@ func init() {
func goEnv(key string) string {
out, err := exec.Command("go", "env", key).Output()
if err != nil {
- fmt.Fprintf(os.Stderr, "go env %s failed:\n%s", key, err)
- fmt.Fprintf(os.Stderr, "%s", err.(*exec.ExitError).Stderr)
+ fmt.Fprintf(os.Stderr, "go env %s failed:\n%s\n", key, err)
+ if ee, ok := err.(*exec.ExitError); ok {
+ fmt.Fprintf(os.Stderr, "%s", ee.Stderr)
+ }
os.Exit(2)
}
return strings.TrimSpace(string(out))
@@ -238,15 +240,7 @@ func TestEarlySignalHandler(t *testing.T) {
}
func TestSignalForwarding(t *testing.T) {
- switch GOOS {
- case "darwin":
- switch GOARCH {
- case "arm", "arm64":
- t.Skipf("skipping on %s/%s; see https://golang.org/issue/13701", GOOS, GOARCH)
- }
- case "windows":
- t.Skip("skipping signal test on Windows")
- }
+ checkSignalForwardingTest(t)
defer func() {
os.Remove("libgo2.a")
@@ -274,32 +268,19 @@ func TestSignalForwarding(t *testing.T) {
cmd = exec.Command(bin[0], append(bin[1:], "1")...)
out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+ expectSignal(t, err, syscall.SIGSEGV)
- if err == nil {
- t.Logf("%s", out)
- t.Error("test program succeeded unexpectedly")
- } else if ee, ok := err.(*exec.ExitError); !ok {
- t.Logf("%s", out)
- t.Errorf("error (%v) has type %T; expected exec.ExitError", err, err)
- } else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok {
- t.Logf("%s", out)
- t.Errorf("error.Sys (%v) has type %T; expected syscall.WaitStatus", ee.Sys(), ee.Sys())
- } else if !ws.Signaled() || ws.Signal() != syscall.SIGSEGV {
- t.Logf("%s", out)
- t.Errorf("got %v; expected SIGSEGV", ee)
- }
+ // Test SIGPIPE forwarding
+ cmd = exec.Command(bin[0], append(bin[1:], "3")...)
+
+ out, err = cmd.CombinedOutput()
+ t.Logf("%s", out)
+ expectSignal(t, err, syscall.SIGPIPE)
}
func TestSignalForwardingExternal(t *testing.T) {
- switch GOOS {
- case "darwin":
- switch GOARCH {
- case "arm", "arm64":
- t.Skipf("skipping on %s/%s; see https://golang.org/issue/13701", GOOS, GOARCH)
- }
- case "windows":
- t.Skip("skipping signal test on Windows")
- }
+ checkSignalForwardingTest(t)
defer func() {
os.Remove("libgo2.a")
@@ -370,14 +351,7 @@ func TestSignalForwardingExternal(t *testing.T) {
continue
}
- if ee, ok := err.(*exec.ExitError); !ok {
- t.Errorf("error (%v) has type %T; expected exec.ExitError", err, err)
- } else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok {
- t.Errorf("error.Sys (%v) has type %T; expected syscall.WaitStatus", ee.Sys(), ee.Sys())
- } else if !ws.Signaled() || ws.Signal() != syscall.SIGSEGV {
- t.Errorf("got %v; expected SIGSEGV", ee)
- } else {
- // We got the error we expected.
+ if expectSignal(t, err, syscall.SIGSEGV) {
return
}
}
@@ -385,6 +359,38 @@ func TestSignalForwardingExternal(t *testing.T) {
t.Errorf("program succeeded unexpectedly %d times", tries)
}
+// checkSignalForwardingTest calls t.Skip if the SignalForwarding test
+// doesn't work on this platform.
+func checkSignalForwardingTest(t *testing.T) {
+ switch GOOS {
+ case "darwin":
+ switch GOARCH {
+ case "arm", "arm64":
+ t.Skipf("skipping on %s/%s; see https://golang.org/issue/13701", GOOS, GOARCH)
+ }
+ case "windows":
+ t.Skip("skipping signal test on Windows")
+ }
+}
+
+// expectSignal checks that err, the exit status of a test program,
+// shows a failure due to a specific signal. Returns whether we found
+// the expected signal.
+func expectSignal(t *testing.T, err error, sig syscall.Signal) bool {
+ if err == nil {
+ t.Error("test program succeeded unexpectedly")
+ } else if ee, ok := err.(*exec.ExitError); !ok {
+ t.Errorf("error (%v) has type %T; expected exec.ExitError", err, err)
+ } else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok {
+ t.Errorf("error.Sys (%v) has type %T; expected syscall.WaitStatus", ee.Sys(), ee.Sys())
+ } else if !ws.Signaled() || ws.Signal() != sig {
+ t.Errorf("got %v; expected signal %v", ee, sig)
+ } else {
+ return true
+ }
+ return false
+}
+
func TestOsSignal(t *testing.T) {
switch GOOS {
case "windows":
@@ -585,3 +591,85 @@ func hasDynTag(t *testing.T, f *elf.File, tag elf.DynTag) bool {
}
return false
}
+
+func TestSIGPROF(t *testing.T) {
+ switch GOOS {
+ case "windows", "plan9":
+ t.Skipf("skipping SIGPROF test on %s", GOOS)
+ }
+
+ t.Parallel()
+
+ defer func() {
+ os.Remove("testp6" + exeSuffix)
+ os.Remove("libgo6.a")
+ os.Remove("libgo6.h")
+ }()
+
+ cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo6.a", "libgo6")
+ cmd.Env = gopathEnv
+ if out, err := cmd.CombinedOutput(); err != nil {
+ t.Logf("%s", out)
+ t.Fatal(err)
+ }
+
+ ccArgs := append(cc, "-o", "testp6"+exeSuffix, "main6.c", "libgo6.a")
+ if runtime.Compiler == "gccgo" {
+ ccArgs = append(ccArgs, "-lgo")
+ }
+ if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
+ t.Logf("%s", out)
+ t.Fatal(err)
+ }
+
+ argv := cmdToRun("./testp6")
+ cmd = exec.Command(argv[0], argv[1:]...)
+ if out, err := cmd.CombinedOutput(); err != nil {
+ t.Logf("%s", out)
+ t.Fatal(err)
+ }
+}
+
+// TestCompileWithoutShared tests that if we compile code without the
+// -shared option, we can put it into an archive. When we use the go
+// tool with -buildmode=c-archive, it passes -shared to the compiler,
+// so we override that. The go tool doesn't work this way, but Bazel
+// will likely do it in the future. And it ought to work. This test
+// was added because at one time it did not work on PPC GNU/Linux.
+func TestCompileWithoutShared(t *testing.T) {
+ // For simplicity, reuse the signal forwarding test.
+ checkSignalForwardingTest(t)
+
+ defer func() {
+ os.Remove("libgo2.a")
+ os.Remove("libgo2.h")
+ }()
+
+ cmd := exec.Command("go", "build", "-buildmode=c-archive", "-gcflags=-shared=false", "-o", "libgo2.a", "libgo2")
+ cmd.Env = gopathEnv
+ t.Log(cmd.Args)
+ out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ exe := "./testnoshared" + exeSuffix
+ ccArgs := append(cc, "-o", exe, "main5.c", "libgo2.a")
+ if runtime.Compiler == "gccgo" {
+ ccArgs = append(ccArgs, "-lgo")
+ }
+ t.Log(ccArgs)
+ out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
+ t.Logf("%s", out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(exe)
+
+ binArgs := append(cmdToRun(exe), "3")
+ t.Log(binArgs)
+ out, err = exec.Command(binArgs[0], binArgs[1:]...).CombinedOutput()
+ t.Logf("%s", out)
+ expectSignal(t, err, syscall.SIGPIPE)
+}
diff --git a/libgo/misc/cgo/testcarchive/main2.c b/libgo/misc/cgo/testcarchive/main2.c
index 774e014a162..769cd497e6c 100644
--- a/libgo/misc/cgo/testcarchive/main2.c
+++ b/libgo/misc/cgo/testcarchive/main2.c
@@ -17,6 +17,7 @@
#include
#include
#include
+#include
#include "libgo2.h"
@@ -26,6 +27,7 @@ static void die(const char* msg) {
}
static volatile sig_atomic_t sigioSeen;
+static volatile sig_atomic_t sigpipeSeen;
// Use up some stack space.
static void recur(int i, char *p) {
@@ -37,6 +39,10 @@ static void recur(int i, char *p) {
}
}
+static void pipeHandler(int signo, siginfo_t* info, void* ctxt) {
+ sigpipeSeen = 1;
+}
+
// Signal handler that uses up more stack space than a goroutine will have.
static void ioHandler(int signo, siginfo_t* info, void* ctxt) {
char a[1024];
@@ -106,6 +112,10 @@ static void init() {
die("sigaction");
}
+ sa.sa_sigaction = pipeHandler;
+ if (sigaction(SIGPIPE, &sa, NULL) < 0) {
+ die("sigaction");
+ }
}
int main(int argc, char** argv) {
@@ -167,7 +177,30 @@ int main(int argc, char** argv) {
nanosleep(&ts, NULL);
i++;
if (i > 5000) {
- fprintf(stderr, "looping too long waiting for signal\n");
+ fprintf(stderr, "looping too long waiting for SIGIO\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ if (verbose) {
+ printf("provoking SIGPIPE\n");
+ }
+
+ GoRaiseSIGPIPE();
+
+ if (verbose) {
+ printf("waiting for sigpipeSeen\n");
+ }
+
+ // Wait until the signal has been delivered.
+ i = 0;
+ while (!sigpipeSeen) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000000;
+ nanosleep(&ts, NULL);
+ i++;
+ if (i > 5000) {
+ fprintf(stderr, "looping too long waiting for SIGPIPE\n");
exit(EXIT_FAILURE);
}
}
diff --git a/libgo/misc/cgo/testcarchive/main3.c b/libgo/misc/cgo/testcarchive/main3.c
index 0a6c0d3f74e..60a16cf5fc4 100644
--- a/libgo/misc/cgo/testcarchive/main3.c
+++ b/libgo/misc/cgo/testcarchive/main3.c
@@ -11,6 +11,7 @@
#include
#include
#include
+#include
#include "libgo3.h"
@@ -25,6 +26,31 @@ static void ioHandler(int signo, siginfo_t* info, void* ctxt) {
sigioSeen = 1;
}
+// Set up the SIGPIPE signal handler in a high priority constructor, so
+// that it is installed before the Go code starts.
+
+static void pipeHandler(int signo, siginfo_t* info, void* ctxt) {
+ const char *s = "unexpected SIGPIPE\n";
+ write(2, s, strlen(s));
+ exit(EXIT_FAILURE);
+}
+
+static void init(void) __attribute__ ((constructor (200)));
+
+static void init() {
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof sa);
+ sa.sa_sigaction = pipeHandler;
+ if (sigemptyset(&sa.sa_mask) < 0) {
+ die("sigemptyset");
+ }
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGPIPE, &sa, NULL) < 0) {
+ die("sigaction");
+ }
+}
+
int main(int argc, char** argv) {
int verbose;
struct sigaction sa;
@@ -34,6 +60,14 @@ int main(int argc, char** argv) {
verbose = argc > 2;
setvbuf(stdout, NULL, _IONBF, 0);
+ if (verbose) {
+ printf("raising SIGPIPE\n");
+ }
+
+ // Test that the Go runtime handles SIGPIPE, even if we installed
+ // a non-default SIGPIPE handler before the runtime initializes.
+ ProvokeSIGPIPE();
+
if (verbose) {
printf("calling sigaction\n");
}
diff --git a/libgo/misc/cgo/testcarchive/main5.c b/libgo/misc/cgo/testcarchive/main5.c
index 9fadf0801e1..2437bf07c58 100644
--- a/libgo/misc/cgo/testcarchive/main5.c
+++ b/libgo/misc/cgo/testcarchive/main5.c
@@ -68,6 +68,24 @@ int main(int argc, char** argv) {
break;
}
+ case 3: {
+ if (verbose) {
+ printf("attempting SIGPIPE\n");
+ }
+
+ int fd[2];
+ if (pipe(fd) != 0) {
+ printf("pipe(2) failed\n");
+ return 0;
+ }
+ // Close the reading end.
+ close(fd[0]);
+ // Expect that write(2) fails (EPIPE)
+ if (write(fd[1], "some data", 9) != -1) {
+ printf("write(2) unexpectedly succeeded\n");
+ return 0;
+ }
+ }
default:
printf("Unknown test: %d\n", test);
return 0;
diff --git a/libgo/misc/cgo/testcarchive/main6.c b/libgo/misc/cgo/testcarchive/main6.c
new file mode 100644
index 00000000000..2745eb9dc5e
--- /dev/null
+++ b/libgo/misc/cgo/testcarchive/main6.c
@@ -0,0 +1,34 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that using the Go profiler in a C program does not crash.
+
+#include
+#include
+
+#include "libgo6.h"
+
+int main(int argc, char **argv) {
+ struct timeval tvstart, tvnow;
+ int diff;
+
+ gettimeofday(&tvstart, NULL);
+
+ go_start_profile();
+
+ // Busy wait so we have something to profile.
+ // If we just sleep the profiling signal will never fire.
+ while (1) {
+ gettimeofday(&tvnow, NULL);
+ diff = (tvnow.tv_sec - tvstart.tv_sec) * 1000 * 1000 + (tvnow.tv_usec - tvstart.tv_usec);
+
+ // Profile frequency is 100Hz so we should definitely
+ // get a signal in 50 milliseconds.
+ if (diff > 50 * 1000)
+ break;
+ }
+
+ go_stop_profile();
+ return 0;
+}
diff --git a/libgo/misc/cgo/testcarchive/src/libgo2/libgo2.go b/libgo/misc/cgo/testcarchive/src/libgo2/libgo2.go
index fbed493b93f..19c8e1a6dcb 100644
--- a/libgo/misc/cgo/testcarchive/src/libgo2/libgo2.go
+++ b/libgo/misc/cgo/testcarchive/src/libgo2/libgo2.go
@@ -4,6 +4,30 @@
package main
+/*
+#include
+#include
+#include
+#include
+
+// Raise SIGPIPE.
+static void CRaiseSIGPIPE() {
+ int fds[2];
+
+ if (pipe(fds) == -1) {
+ perror("pipe");
+ exit(EXIT_FAILURE);
+ }
+ // Close the reader end
+ close(fds[0]);
+ // Write to the writer end to provoke a SIGPIPE
+ if (write(fds[1], "some data", 9) != -1) {
+ fprintf(stderr, "write to a closed pipe succeeded\n");
+ exit(EXIT_FAILURE);
+ }
+ close(fds[1]);
+}
+*/
import "C"
import (
@@ -46,5 +70,11 @@ func TestSEGV() {
func Noop() {
}
+// Raise SIGPIPE.
+//export GoRaiseSIGPIPE
+func GoRaiseSIGPIPE() {
+ C.CRaiseSIGPIPE()
+}
+
func main() {
}
diff --git a/libgo/misc/cgo/testcarchive/src/libgo3/libgo3.go b/libgo/misc/cgo/testcarchive/src/libgo3/libgo3.go
index 94e5d21c14a..e276a3c347a 100644
--- a/libgo/misc/cgo/testcarchive/src/libgo3/libgo3.go
+++ b/libgo/misc/cgo/testcarchive/src/libgo3/libgo3.go
@@ -40,5 +40,17 @@ func SawSIGIO() C.int {
}
}
+// ProvokeSIGPIPE provokes a kernel-initiated SIGPIPE.
+//export ProvokeSIGPIPE
+func ProvokeSIGPIPE() {
+ r, w, err := os.Pipe()
+ if err != nil {
+ panic(err)
+ }
+ r.Close()
+ defer w.Close()
+ w.Write([]byte("some data"))
+}
+
func main() {
}
diff --git a/libgo/misc/cgo/testcarchive/src/libgo6/sigprof.go b/libgo/misc/cgo/testcarchive/src/libgo6/sigprof.go
new file mode 100644
index 00000000000..4cb05dc6178
--- /dev/null
+++ b/libgo/misc/cgo/testcarchive/src/libgo6/sigprof.go
@@ -0,0 +1,25 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "io/ioutil"
+ "runtime/pprof"
+)
+
+import "C"
+
+//export go_start_profile
+func go_start_profile() {
+ pprof.StartCPUProfile(ioutil.Discard)
+}
+
+//export go_stop_profile
+func go_stop_profile() {
+ pprof.StopCPUProfile()
+}
+
+func main() {
+}
diff --git a/libgo/misc/cgo/testcshared/main0.c b/libgo/misc/cgo/testcshared/main0.c
index 1274b8950eb..39ef7e30513 100644
--- a/libgo/misc/cgo/testcshared/main0.c
+++ b/libgo/misc/cgo/testcshared/main0.c
@@ -12,6 +12,7 @@
// int8_t DidInitRun();
// int8_t DidMainRun();
// int32_t FromPkg();
+// uint32_t Divu(uint32_t, uint32_t);
int main(void) {
int8_t ran_init = DidInitRun();
if (!ran_init) {
@@ -30,6 +31,11 @@ int main(void) {
fprintf(stderr, "ERROR: FromPkg=%d, want %d\n", from_pkg, 1024);
return 1;
}
+ uint32_t divu = Divu(2264, 31);
+ if (divu != 73) {
+ fprintf(stderr, "ERROR: Divu(2264, 31)=%d, want %d\n", divu, 73);
+ return 1;
+ }
// test.bash looks for "PASS" to ensure this program has reached the end.
printf("PASS\n");
return 0;
diff --git a/libgo/misc/cgo/testcshared/src/p/p.go b/libgo/misc/cgo/testcshared/src/p/p.go
index 82b445c1210..0f02cf3ce6c 100644
--- a/libgo/misc/cgo/testcshared/src/p/p.go
+++ b/libgo/misc/cgo/testcshared/src/p/p.go
@@ -8,3 +8,6 @@ import "C"
//export FromPkg
func FromPkg() int32 { return 1024 }
+
+//export Divu
+func Divu(a, b uint32) uint32 { return a / b }
diff --git a/libgo/misc/cgo/testcshared/test.bash b/libgo/misc/cgo/testcshared/test.bash
index 0315fb07f57..315a0d40367 100644
--- a/libgo/misc/cgo/testcshared/test.bash
+++ b/libgo/misc/cgo/testcshared/test.bash
@@ -27,7 +27,7 @@ fi
# Directory where cgo headers and outputs will be installed.
# The installation directory format varies depending on the platform.
installdir=pkg/${goos}_${goarch}_testcshared_shared
-if [ "${goos}" == "darwin" ]; then
+if [ "${goos}" = "darwin" ]; then
installdir=pkg/${goos}_${goarch}_testcshared
fi
@@ -40,13 +40,13 @@ function cleanup() {
rm -f testp testp2 testp3 testp4 testp5
rm -rf pkg "${goroot}/${installdir}"
- if [ "$goos" == "android" ]; then
+ if [ "$goos" = "android" ]; then
adb shell rm -rf "$androidpath"
fi
}
trap cleanup EXIT
-if [ "$goos" == "android" ]; then
+if [ "$goos" = "android" ]; then
adb shell mkdir -p "$androidpath"
fi
@@ -69,7 +69,7 @@ function run() {
function binpush() {
bin=${1}
- if [ "$goos" == "android" ]; then
+ if [ "$goos" = "android" ]; then
adb push "$bin" "${androidpath}/${bin}" 2>/dev/null
fi
}
@@ -79,7 +79,7 @@ rm -rf pkg
suffix="-installsuffix testcshared"
libext="so"
-if [ "$goos" == "darwin" ]; then
+if [ "$goos" = "darwin" ]; then
libext="dylib"
fi
@@ -89,7 +89,7 @@ GOPATH=$(pwd) go install -buildmode=c-shared $suffix libgo
GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo.$libext src/libgo/libgo.go
binpush libgo.$libext
-if [ "$goos" == "linux" ] || [ "$goos" == "android" ] ; then
+if [ "$goos" = "linux" ] || [ "$goos" = "android" ] ; then
if readelf -d libgo.$libext | grep TEXTREL >/dev/null; then
echo "libgo.$libext has TEXTREL set"
exit 1
@@ -97,8 +97,8 @@ if [ "$goos" == "linux" ] || [ "$goos" == "android" ] ; then
fi
GOGCCFLAGS=$(go env GOGCCFLAGS)
-if [ "$goos" == "android" ]; then
- GOGCCFLAGS="${GOGCCFLAGS} -pie"
+if [ "$goos" = "android" ]; then
+ GOGCCFLAGS="${GOGCCFLAGS} -pie -fuse-ld=gold"
fi
status=0
@@ -127,7 +127,7 @@ fi
GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo2.$libext libgo2
binpush libgo2.$libext
linkflags="-Wl,--no-as-needed"
-if [ "$goos" == "darwin" ]; then
+if [ "$goos" = "darwin" ]; then
linkflags=""
fi
$(go env CC) ${GOGCCFLAGS} -o testp2 main2.c $linkflags libgo2.$libext
@@ -139,7 +139,7 @@ if [ "$output" != "PASS" ]; then
fi
# test3: tests main.main is exported on android.
-if [ "$goos" == "android" ]; then
+if [ "$goos" = "android" ]; then
$(go env CC) ${GOGCCFLAGS} -o testp3 main3.c -ldl
binpush testp3
output=$(run ./testp ./libgo.so)
diff --git a/libgo/misc/cgo/testplugin/src/issue19534/main.go b/libgo/misc/cgo/testplugin/src/issue19534/main.go
new file mode 100644
index 00000000000..de263b6f0f2
--- /dev/null
+++ b/libgo/misc/cgo/testplugin/src/issue19534/main.go
@@ -0,0 +1,23 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "plugin"
+
+func main() {
+ p, err := plugin.Open("plugin.so")
+ if err != nil {
+ panic(err)
+ }
+
+ sym, err := p.Lookup("Foo")
+ if err != nil {
+ panic(err)
+ }
+ f := sym.(func() int)
+ if f() != 42 {
+ panic("expected f() == 42")
+ }
+}
diff --git a/libgo/misc/cgo/testplugin/src/issue19534/plugin.go b/libgo/misc/cgo/testplugin/src/issue19534/plugin.go
new file mode 100644
index 00000000000..582d33305c9
--- /dev/null
+++ b/libgo/misc/cgo/testplugin/src/issue19534/plugin.go
@@ -0,0 +1,9 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func Foo() int {
+ return 42
+}
diff --git a/libgo/misc/cgo/testplugin/test.bash b/libgo/misc/cgo/testplugin/test.bash
index ab7430acc37..69df5bd2bfa 100644
--- a/libgo/misc/cgo/testplugin/test.bash
+++ b/libgo/misc/cgo/testplugin/test.bash
@@ -16,7 +16,7 @@ goarch=$(go env GOARCH)
function cleanup() {
rm -f plugin*.so unnamed*.so iface*.so
- rm -rf host pkg sub iface issue18676
+ rm -rf host pkg sub iface issue18676 issue19534
}
trap cleanup EXIT
@@ -44,3 +44,9 @@ LD_LIBRARY_PATH=$(pwd) ./iface
GOPATH=$(pwd) go build -buildmode=plugin -o plugin.so src/issue18676/plugin.go
GOPATH=$(pwd) go build -o issue18676 src/issue18676/main.go
timeout 10s ./issue18676
+
+# Test for issue 19534 - that we can load a plugin built in a path with non-alpha
+# characters
+GOPATH=$(pwd) go build -buildmode=plugin -ldflags='-pluginpath=issue.19534' -o plugin.so src/issue19534/plugin.go
+GOPATH=$(pwd) go build -o issue19534 src/issue19534/main.go
+./issue19534
diff --git a/libgo/misc/cgo/testplugin/unnamed1.go b/libgo/misc/cgo/testplugin/unnamed1.go
index 102edaf3e29..5c1df086d76 100644
--- a/libgo/misc/cgo/testplugin/unnamed1.go
+++ b/libgo/misc/cgo/testplugin/unnamed1.go
@@ -9,4 +9,15 @@ import "C"
func FuncInt() int { return 1 }
+// Add a recursive type to to check that type equality across plugins doesn't
+// crash. See https://golang.org/issues/19258
+func FuncRecursive() X { return X{} }
+
+type Y struct {
+ X *X
+}
+type X struct {
+ Y Y
+}
+
func main() {}
diff --git a/libgo/misc/cgo/testplugin/unnamed2.go b/libgo/misc/cgo/testplugin/unnamed2.go
index 55070d5e9f7..7ef66109c5c 100644
--- a/libgo/misc/cgo/testplugin/unnamed2.go
+++ b/libgo/misc/cgo/testplugin/unnamed2.go
@@ -9,4 +9,13 @@ import "C"
func FuncInt() int { return 2 }
+func FuncRecursive() X { return X{} }
+
+type Y struct {
+ X *X
+}
+type X struct {
+ Y Y
+}
+
func main() {}
diff --git a/libgo/misc/cgo/testsanitizers/test.bash b/libgo/misc/cgo/testsanitizers/test.bash
index 4da85020d89..9f80af6c507 100644
--- a/libgo/misc/cgo/testsanitizers/test.bash
+++ b/libgo/misc/cgo/testsanitizers/test.bash
@@ -72,12 +72,12 @@ testmsanshared() {
goos=$(go env GOOS)
suffix="-installsuffix testsanitizers"
libext="so"
- if [ "$goos" == "darwin" ]; then
+ if [ "$goos" = "darwin" ]; then
libext="dylib"
fi
go build -msan -buildmode=c-shared $suffix -o ${TMPDIR}/libmsanshared.$libext msan_shared.go
- echo 'int main() { return 0; }' > ${TMPDIR}/testmsanshared.c
+ echo 'int main() { return 0; }' > ${TMPDIR}/testmsanshared.c
$CC $(go env GOGCCFLAGS) -fsanitize=memory -o ${TMPDIR}/testmsanshared ${TMPDIR}/testmsanshared.c ${TMPDIR}/libmsanshared.$libext
if ! LD_LIBRARY_PATH=. ${TMPDIR}/testmsanshared; then
@@ -131,21 +131,43 @@ if test "$msan" = "yes"; then
testmsanshared
fi
+testtsanshared() {
+ goos=$(go env GOOS)
+ suffix="-installsuffix tsan"
+ libext="so"
+ if [ "$goos" = "darwin" ]; then
+ libext="dylib"
+ fi
+ go build -buildmode=c-shared $suffix -o ${TMPDIR}/libtsanshared.$libext tsan_shared.go
+
+ echo 'int main() { return 0; }' > ${TMPDIR}/testtsanshared.c
+ $CC $(go env GOGCCFLAGS) -fsanitize=thread -o ${TMPDIR}/testtsanshared ${TMPDIR}/testtsanshared.c ${TMPDIR}/libtsanshared.$libext
+
+ if ! LD_LIBRARY_PATH=. ${TMPDIR}/testtsanshared; then
+ echo "FAIL: tsan_shared"
+ status=1
+ fi
+ rm -f ${TMPDIR}/{testtsanshared,testtsanshared.c,libtsanshared.$libext}
+}
+
if test "$tsan" = "yes"; then
echo 'int main() { return 0; }' > ${TMPDIR}/testsanitizers$$.c
ok=yes
if ! $CC -fsanitize=thread ${TMPDIR}/testsanitizers$$.c -o ${TMPDIR}/testsanitizers$$ &> ${TMPDIR}/testsanitizers$$.err; then
ok=no
fi
- if grep "unrecognized" ${TMPDIR}/testsanitizers$$.err >& /dev/null; then
+ if grep "unrecognized" ${TMPDIR}/testsanitizers$$.err >& /dev/null; then
echo "skipping tsan tests: -fsanitize=thread not supported"
tsan=no
- elif test "$ok" != "yes"; then
- cat ${TMPDIR}/testsanitizers$$.err
- echo "skipping tsan tests: -fsanitizer=thread build failed"
- tsan=no
- fi
- rm -f ${TMPDIR}/testsanitizers$$*
+ elif test "$ok" != "yes"; then
+ cat ${TMPDIR}/testsanitizers$$.err
+ echo "skipping tsan tests: -fsanitizer=thread build failed"
+ tsan=no
+ elif ! ${TMPDIR}/testsanitizers$$ 2>&1; then
+ echo "skipping tsan tests: running tsan program failed"
+ tsan=no
+ fi
+ rm -f ${TMPDIR}/testsanitizers$$*
fi
# Run a TSAN test.
@@ -177,8 +199,10 @@ if test "$tsan" = "yes"; then
# These tests are only reliable using clang or GCC version 7 or later.
# Otherwise runtime/cgo/libcgo.h can't tell whether TSAN is in use.
ok=false
+ clang=false
if ${CC} --version | grep clang >/dev/null 2>&1; then
ok=true
+ clang=true
else
ver=$($CC -dumpversion)
major=$(echo $ver | sed -e 's/\([0-9]*\).*/\1/')
@@ -190,14 +214,19 @@ if test "$tsan" = "yes"; then
fi
if test "$ok" = "true"; then
- # This test requires rebuilding os/user with -fsanitize=thread.
+ # These tests require rebuilding os/user with -fsanitize=thread.
testtsan tsan5.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
-
- # This test requires rebuilding runtime/cgo with -fsanitize=thread.
testtsan tsan6.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
-
- # This test requires rebuilding runtime/cgo with -fsanitize=thread.
testtsan tsan7.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
+
+ # The remaining tests reportedly hang when built with GCC; issue #21196.
+ if test "$clang" = "true"; then
+ testtsan tsan10.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
+ testtsan tsan11.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
+ testtsan tsan12.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan"
+ fi
+
+ testtsanshared
fi
fi
diff --git a/libgo/misc/cgo/testsanitizers/tsan10.go b/libgo/misc/cgo/testsanitizers/tsan10.go
new file mode 100644
index 00000000000..a40f2455537
--- /dev/null
+++ b/libgo/misc/cgo/testsanitizers/tsan10.go
@@ -0,0 +1,31 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This program hung when run under the C/C++ ThreadSanitizer.
+// TSAN defers asynchronous signals until the signaled thread calls into libc.
+// Since the Go runtime makes direct futex syscalls, Go runtime threads could
+// run for an arbitrarily long time without triggering the libc interceptors.
+// See https://golang.org/issue/18717.
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+/*
+#cgo CFLAGS: -g -fsanitize=thread
+#cgo LDFLAGS: -g -fsanitize=thread
+*/
+import "C"
+
+func main() {
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, syscall.SIGUSR1)
+ defer signal.Stop(c)
+ syscall.Kill(syscall.Getpid(), syscall.SIGUSR1)
+ <-c
+}
diff --git a/libgo/misc/cgo/testsanitizers/tsan11.go b/libgo/misc/cgo/testsanitizers/tsan11.go
new file mode 100644
index 00000000000..70ac9c8ae2c
--- /dev/null
+++ b/libgo/misc/cgo/testsanitizers/tsan11.go
@@ -0,0 +1,55 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This program hung when run under the C/C++ ThreadSanitizer. TSAN defers
+// asynchronous signals until the signaled thread calls into libc. The runtime's
+// sysmon goroutine idles itself using direct usleep syscalls, so it could
+// run for an arbitrarily long time without triggering the libc interceptors.
+// See https://golang.org/issue/18717.
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+/*
+#cgo CFLAGS: -g -fsanitize=thread
+#cgo LDFLAGS: -g -fsanitize=thread
+
+#include
+#include
+#include
+#include
+
+static void raise_usr2(int signo) {
+ raise(SIGUSR2);
+}
+
+static void register_handler(int signo) {
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_ONSTACK;
+ sa.sa_handler = raise_usr2;
+
+ if (sigaction(SIGUSR1, &sa, NULL) != 0) {
+ perror("failed to register SIGUSR1 handler");
+ exit(EXIT_FAILURE);
+ }
+}
+*/
+import "C"
+
+func main() {
+ ch := make(chan os.Signal)
+ signal.Notify(ch, syscall.SIGUSR2)
+
+ C.register_handler(C.int(syscall.SIGUSR1))
+ syscall.Kill(syscall.Getpid(), syscall.SIGUSR1)
+
+ <-ch
+}
diff --git a/libgo/misc/cgo/testsanitizers/tsan12.go b/libgo/misc/cgo/testsanitizers/tsan12.go
new file mode 100644
index 00000000000..3e767eee1f8
--- /dev/null
+++ b/libgo/misc/cgo/testsanitizers/tsan12.go
@@ -0,0 +1,35 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This program hung when run under the C/C++ ThreadSanitizer. TSAN installs a
+// libc interceptor that writes signal handlers to a global variable within the
+// TSAN runtime instead of making a sigaction system call. A bug in
+// syscall.runtime_AfterForkInChild corrupted TSAN's signal forwarding table
+// during calls to (*os/exec.Cmd).Run, causing the parent process to fail to
+// invoke signal handlers.
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "os/signal"
+ "syscall"
+)
+
+import "C"
+
+func main() {
+ ch := make(chan os.Signal)
+ signal.Notify(ch, syscall.SIGUSR1)
+
+ if err := exec.Command("true").Run(); err != nil {
+ fmt.Fprintf(os.Stderr, "Unexpected error from `true`: %v", err)
+ os.Exit(1)
+ }
+
+ syscall.Kill(syscall.Getpid(), syscall.SIGUSR1)
+ <-ch
+}
diff --git a/libgo/misc/cgo/testsanitizers/tsan_shared.go b/libgo/misc/cgo/testsanitizers/tsan_shared.go
new file mode 100644
index 00000000000..55ff67ecbaf
--- /dev/null
+++ b/libgo/misc/cgo/testsanitizers/tsan_shared.go
@@ -0,0 +1,63 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// This program failed with SIGSEGV when run under the C/C++ ThreadSanitizer.
+// The Go runtime had re-registered the C handler with the wrong flags due to a
+// typo, resulting in null pointers being passed for the info and context
+// parameters to the handler.
+
+/*
+#cgo CFLAGS: -fsanitize=thread
+#cgo LDFLAGS: -fsanitize=thread
+
+#include
+#include
+#include
+#include
+#include
+
+void check_params(int signo, siginfo_t *info, void *context) {
+ ucontext_t* uc = (ucontext_t*)(context);
+
+ if (info->si_signo != signo) {
+ fprintf(stderr, "info->si_signo does not match signo.\n");
+ abort();
+ }
+
+ if (uc->uc_stack.ss_size == 0) {
+ fprintf(stderr, "uc_stack has size 0.\n");
+ abort();
+ }
+}
+
+
+// Set up the signal handler in a high priority constructor, so
+// that it is installed before the Go code starts.
+
+static void register_handler(void) __attribute__ ((constructor (200)));
+
+static void register_handler() {
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ sa.sa_sigaction = check_params;
+
+ if (sigaction(SIGUSR1, &sa, NULL) != 0) {
+ perror("failed to register SIGUSR1 handler");
+ exit(EXIT_FAILURE);
+ }
+}
+*/
+import "C"
+
+import "syscall"
+
+func init() {
+ C.raise(C.int(syscall.SIGUSR1))
+}
+
+func main() {}
diff --git a/libgo/misc/cgo/testshared/shared_test.go b/libgo/misc/cgo/testshared/shared_test.go
index f0766e511ec..9e682a2fb59 100644
--- a/libgo/misc/cgo/testshared/shared_test.go
+++ b/libgo/misc/cgo/testshared/shared_test.go
@@ -10,7 +10,6 @@ import (
"debug/elf"
"encoding/binary"
"errors"
- "flag"
"fmt"
"go/build"
"io"
@@ -166,7 +165,6 @@ func TestMain(m *testing.M) {
// That won't work if GOBIN is set.
os.Unsetenv("GOBIN")
- flag.Parse()
exitCode, err := testMain(m)
if err != nil {
log.Fatal(err)
@@ -402,6 +400,12 @@ func TestTrivialExecutablePIE(t *testing.T) {
AssertHasRPath(t, "./trivial.pie", gorootInstallDir)
}
+// Build a division test program and check it runs.
+func TestDivisionExecutable(t *testing.T) {
+ goCmd(t, "install", "-linkshared", "division")
+ run(t, "division executable", "./bin/division")
+}
+
// Build an executable that uses cgo linked against the shared runtime and check it
// runs.
func TestCgoExecutable(t *testing.T) {
@@ -759,6 +763,13 @@ func appendFile(path, content string) {
}
}
+func writeFile(path, content string) {
+ err := ioutil.WriteFile(path, []byte(content), 0644)
+ if err != nil {
+ log.Fatalf("ioutil.WriteFile failed: %v", err)
+ }
+}
+
func TestABIChecking(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "depBase")
goCmd(t, "install", "-linkshared", "exe")
@@ -797,9 +808,10 @@ func TestABIChecking(t *testing.T) {
run(t, "rebuilt exe", "./bin/exe")
// If we make a change which does not break ABI (such as adding an unexported
- // function) and rebuild libdepBase.so, exe still works.
+ // function) and rebuild libdepBase.so, exe still works, even if new function
+ // is in a file by itself.
resetFileStamps()
- appendFile("src/depBase/dep.go", "func noABIBreak() {}\n")
+ writeFile("src/depBase/dep2.go", "package depBase\nfunc noABIBreak() {}\n")
goCmd(t, "install", "-buildmode=shared", "-linkshared", "depBase")
run(t, "after non-ABI breaking change", "./bin/exe")
}
diff --git a/libgo/misc/cgo/testshared/src/division/division.go b/libgo/misc/cgo/testshared/src/division/division.go
new file mode 100644
index 00000000000..bb5fc984602
--- /dev/null
+++ b/libgo/misc/cgo/testshared/src/division/division.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+//go:noinline
+func div(x, y uint32) uint32 {
+ return x / y
+}
+
+func main() {
+ a := div(97, 11)
+ if a != 8 {
+ panic("FAIL")
+ }
+}
diff --git a/libgo/runtime/go-now.c b/libgo/runtime/go-now.c
index d24e6ee76af..13e8f517722 100644
--- a/libgo/runtime/go-now.c
+++ b/libgo/runtime/go-now.c
@@ -8,13 +8,22 @@
#include "runtime.h"
-// Return current time. This is the implementation of time.now().
+// Return current time. This is the implementation of time.walltime().
-struct time_now_ret
+struct walltime_ret
+{
+ int64_t sec;
+ int32_t nsec;
+};
+
+struct walltime_ret now() __asm__ (GOSYM_PREFIX "runtime.walltime")
+ __attribute__ ((no_split_stack));
+
+struct walltime_ret
now()
{
struct timespec ts;
- struct time_now_ret ret;
+ struct walltime_ret ret;
clock_gettime (CLOCK_REALTIME, &ts);
ret.sec = ts.tv_sec;
diff --git a/libgo/runtime/proc.c b/libgo/runtime/proc.c
index 3e3437e6a47..e591824b140 100644
--- a/libgo/runtime/proc.c
+++ b/libgo/runtime/proc.c
@@ -375,6 +375,8 @@ bool runtime_isarchive;
extern void kickoff(void)
__asm__(GOSYM_PREFIX "runtime.kickoff");
+extern void minit(void)
+ __asm__(GOSYM_PREFIX "runtime.minit");
extern void mstart1(void)
__asm__(GOSYM_PREFIX "runtime.mstart1");
extern void stopm(void)
@@ -476,6 +478,10 @@ runtime_mstart(void *arg)
gp->entry = nil;
gp->param = nil;
+ // We have to call minit before we call getcontext,
+ // because getcontext will copy the signal mask.
+ minit();
+
initcontext();
// Record top of stack for use by mcall.
diff --git a/libgo/runtime/runtime.h b/libgo/runtime/runtime.h
index 3324038a57d..dd5a958888f 100644
--- a/libgo/runtime/runtime.h
+++ b/libgo/runtime/runtime.h
@@ -193,16 +193,6 @@ enum {
};
void runtime_hashinit(void);
-void runtime_traceback(int32)
- __asm__ (GOSYM_PREFIX "runtime.traceback");
-void runtime_tracebackothers(G*)
- __asm__ (GOSYM_PREFIX "runtime.tracebackothers");
-enum
-{
- // The maximum number of frames we print for a traceback
- TracebackMaxFrames = 100,
-};
-
/*
* external data
*/
@@ -217,7 +207,6 @@ extern M* runtime_getallm(void)
extern Sched* runtime_sched;
extern uint32 runtime_panicking(void)
__asm__ (GOSYM_PREFIX "runtime.getPanicking");
-extern struct debugVars runtime_debug;
extern bool runtime_isstarted;
extern bool runtime_isarchive;
@@ -253,10 +242,6 @@ void runtime_schedinit(void)
__asm__ (GOSYM_PREFIX "runtime.schedinit");
void runtime_initsig(bool)
__asm__ (GOSYM_PREFIX "runtime.initsig");
-void runtime_goroutineheader(G*)
- __asm__ (GOSYM_PREFIX "runtime.goroutineheader");
-void runtime_printtrace(Slice, G*)
- __asm__ (GOSYM_PREFIX "runtime.printtrace");
#define runtime_open(p, f, m) open((p), (f), (m))
#define runtime_read(d, v, n) read((d), (v), (n))
#define runtime_write(d, v, n) write((d), (v), (n))
@@ -327,8 +312,6 @@ G* __go_go(void (*pfn)(void*), void*);
int32 runtime_callers(int32, Location*, int32, bool keep_callers);
int64 runtime_nanotime(void) // monotonic time
__asm__(GOSYM_PREFIX "runtime.nanotime");
-int64 runtime_unixnanotime(void) // real time, can skip
- __asm__ (GOSYM_PREFIX "runtime.unixnanotime");
void runtime_dopanic(int32) __attribute__ ((noreturn));
void runtime_startpanic(void)
__asm__ (GOSYM_PREFIX "runtime.startpanic");
@@ -343,22 +326,11 @@ void runtime_blockevent(int64, int32);
extern int64 runtime_blockprofilerate;
G* runtime_netpoll(bool)
__asm__ (GOSYM_PREFIX "runtime.netpoll");
-void runtime_crash(void)
- __asm__ (GOSYM_PREFIX "runtime.crash");
void runtime_parsedebugvars(void)
__asm__(GOSYM_PREFIX "runtime.parsedebugvars");
void _rt0_go(void);
G* runtime_timejump(void);
-void runtime_callStopTheWorldWithSema(void)
- __asm__(GOSYM_PREFIX "runtime.callStopTheWorldWithSema");
-void runtime_callStartTheWorldWithSema(void)
- __asm__(GOSYM_PREFIX "runtime.callStartTheWorldWithSema");
-void runtime_acquireWorldsema(void)
- __asm__(GOSYM_PREFIX "runtime.acquireWorldsema");
-void runtime_releaseWorldsema(void)
- __asm__(GOSYM_PREFIX "runtime.releaseWorldsema");
-
/*
* mutual exclusion locks. in the uncontended case,
* as fast as spin locks (just a few user-level instructions),
@@ -403,17 +375,6 @@ bool runtime_notetsleep(Note*, int64) // false - timeout
bool runtime_notetsleepg(Note*, int64) // false - timeout
__asm__ (GOSYM_PREFIX "runtime.notetsleepg");
-/*
- * Lock-free stack.
- * Initialize uint64 head to 0, compare with 0 to test for emptiness.
- * The stack does not keep pointers to nodes,
- * so they can be garbage collected if there are no other pointers to nodes.
- */
-void runtime_lfstackpush(uint64 *head, LFNode *node)
- __asm__ (GOSYM_PREFIX "runtime.lfstackpush");
-void* runtime_lfstackpop(uint64 *head)
- __asm__ (GOSYM_PREFIX "runtime.lfstackpop");
-
/*
* low level C-called
*/
@@ -454,9 +415,6 @@ void runtime_procyield(uint32)
void runtime_osyield(void)
__asm__(GOSYM_PREFIX "runtime.osyield");
-void runtime_printcreatedby(G*)
- __asm__(GOSYM_PREFIX "runtime.printcreatedby");
-
uintptr runtime_memlimit(void);
#define ISNAN(f) __builtin_isnan(f)
@@ -491,15 +449,6 @@ void runtime_badsignal(int);
Defer* runtime_newdefer(void);
void runtime_freedefer(Defer*);
-struct time_now_ret
-{
- int64_t sec;
- int32_t nsec;
-};
-
-struct time_now_ret now() __asm__ (GOSYM_PREFIX "time.now")
- __attribute__ ((no_split_stack));
-
extern void _cgo_wait_runtime_init_done (void);
extern void _cgo_notify_runtime_init_done (void)
__asm__ (GOSYM_PREFIX "runtime._cgo_notify_runtime_init_done");
diff --git a/libgo/runtime/runtime_c.c b/libgo/runtime/runtime_c.c
index 6da35210440..9a6672d602e 100644
--- a/libgo/runtime/runtime_c.c
+++ b/libgo/runtime/runtime_c.c
@@ -10,6 +10,10 @@
#include
#endif
+#ifdef __linux__
+#include
+#endif
+
#include "config.h"
#include "runtime.h"
@@ -81,13 +85,6 @@ runtime_signalstack(byte *p, uintptr n)
*(int *)0xf1 = 0xf1;
}
-struct debugVars runtime_debug;
-
-void
-runtime_setdebug(struct debugVars* d) {
- runtime_debug = *d;
-}
-
int32 go_open(char *, int32, int32)
__asm__ (GOSYM_PREFIX "runtime.open");
@@ -184,3 +181,18 @@ publicationBarrier()
{
__atomic_thread_fence(__ATOMIC_RELEASE);
}
+
+#ifdef __linux__
+
+/* Currently sbrk0 is only called on GNU/Linux. */
+
+uintptr sbrk0(void)
+ __asm__ (GOSYM_PREFIX "runtime.sbrk0");
+
+uintptr
+sbrk0()
+{
+ return syscall(SYS_brk, (uintptr)(0));
+}
+
+#endif /* __linux__ */
diff --git a/libgo/testsuite/gotest b/libgo/testsuite/gotest
index ad3a485d14f..83f78d4d9b9 100755
--- a/libgo/testsuite/gotest
+++ b/libgo/testsuite/gotest
@@ -348,18 +348,18 @@ x)
fi
match=false
;;
- $goos | $goarch | cgo)
+ $goos | $goarch | cgo | go1.[0-9])
match=true
;;
- "!"$goos | "!"$goarch | "!cgo")
+ "!"$goos | "!"$goarch | "!cgo" | "!"go1.[0-9])
;;
*,*)
cmatch=true
for ctag in `echo $tag | sed -e 's/,/ /g'`; do
case $ctag in
- $goos | $goarch | cgo)
+ $goos | $goarch | cgo | go1.[0-9])
;;
- "!"$goos | "!"$goarch | "!cgo")
+ "!"$goos | "!"$goarch | "!cgo" | "!"go1.[0-9])
cmatch=false
;;
"!"*)