-0950e905939f88c1421f8667ac4dc9e14528471c
+ceb1e4f5614b4772eed44f9cf57780e52f44753e
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
-60f14fddfee107dedd76c0be6b422a3d8ccc841a
+cc8838d645b2b7026c1f3aaceb011775c5ca3a08
The first line of this file holds the git revision number of the
last merge done from the master library sources.
# Force them to be built.
noinst_DATA = \
golang.org/x/net/nettest.gox \
+ internal/cfg.gox \
internal/testenv.gox \
internal/trace.gox \
net/internal/socktest.gox \
# Some packages are only needed for tests, so unlike the other
# internal packages nothing will explicitly depend on them.
# Force them to be built.
-noinst_DATA = golang.org/x/net/nettest.gox internal/testenv.gox \
- internal/trace.gox net/internal/socktest.gox \
- os/signal/internal/pty.gox runtime/pprof/internal/profile.gox \
- zdefaultcc.go
+noinst_DATA = golang.org/x/net/nettest.gox internal/cfg.gox \
+ internal/testenv.gox internal/trace.gox \
+ net/internal/socktest.gox os/signal/internal/pty.gox \
+ runtime/pprof/internal/profile.gox zdefaultcc.go
@LIBGO_IS_RTEMS_FALSE@rtems_task_variable_add_file =
@LIBGO_IS_RTEMS_TRUE@rtems_task_variable_add_file = runtime/rtems-task-variable-add.c
runtime_files = \
index/suffixarray
internal/cpu
internal/fmtsort
-internal/oserror
internal/poll
internal/reflectlite
internal/singleflight
params := name.FuncType.Params
args := call.Call.Args
- // Avoid a crash if the number of arguments is
- // less than the number of parameters.
+ // Avoid a crash if the number of arguments doesn't match
+ // the number of parameters.
// This will be caught when the generated file is compiled.
- if len(args) < len(params) {
+ if len(args) != len(params) {
return "", false
}
if strings.HasPrefix(t.Name, "_Ctype_") {
return true
}
+ case *ast.ParenExpr:
+ return p.isType(t.X)
case *ast.StarExpr:
return p.isType(t.X)
case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType,
}
}
+// elfImportedSymbols is like elf.File.ImportedSymbols, but it
+// includes weak symbols.
+//
+// A bug in some versions of LLD (at least LLD 8) cause it to emit
+// several pthreads symbols as weak, but we need to import those. See
+// issue #31912 or https://bugs.llvm.org/show_bug.cgi?id=42442.
+//
+// When doing external linking, we hand everything off to the external
+// linker, which will create its own dynamic symbol tables. For
+// internal linking, this may turn weak imports into strong imports,
+// which could cause dynamic linking to fail if a symbol really isn't
+// defined. However, the standard library depends on everything it
+// imports, and this is the primary use of dynamic symbol tables with
+// internal linking.
+func elfImportedSymbols(f *elf.File) []elf.ImportedSymbol {
+ syms, _ := f.DynamicSymbols()
+ var imports []elf.ImportedSymbol
+ for _, s := range syms {
+ if (elf.ST_BIND(s.Info) == elf.STB_GLOBAL || elf.ST_BIND(s.Info) == elf.STB_WEAK) && s.Section == elf.SHN_UNDEF {
+ imports = append(imports, elf.ImportedSymbol{
+ Name: s.Name,
+ Library: s.Library,
+ Version: s.Version,
+ })
+ }
+ }
+ return imports
+}
+
func dynimport(obj string) {
stdout := os.Stdout
if *dynout != "" {
}
}
}
- sym, _ := f.ImportedSymbols()
+ sym := elfImportedSymbols(f)
for _, s := range sym {
targ := s.Name
if s.Version != "" {
// If the arguments to build are a list of .go files from a single directory,
// build treats them as a list of source files specifying a single package.
//
+// When compiling packages, build ignores files that end in '_test.go'.
+//
// When compiling a single main package, build writes
// the resulting executable to an output file named after
// the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe')
// build compiles the packages but discards the resulting object,
// serving only as a check that the packages can be built.
//
-// When compiling packages, build ignores files that end in '_test.go'.
-//
// The -o flag forces build to write the resulting executable or object
// to the named output file or directory, instead of the default behavior described
// in the last two paragraphs. If the named output is a directory that exists,
// The first step is to resolve which dependencies to add.
//
// For each named package or package pattern, get must decide which version of
-// the corresponding module to use. By default, get chooses the latest tagged
+// the corresponding module to use. By default, get looks up the latest tagged
// release version, such as v0.4.5 or v1.2.3. If there are no tagged release
-// versions, get chooses the latest tagged pre-release version, such as
-// v0.0.1-pre1. If there are no tagged versions at all, get chooses the latest
-// known commit.
+// versions, get looks up the latest tagged pre-release version, such as
+// v0.0.1-pre1. If there are no tagged versions at all, get looks up the latest
+// known commit. If the module is not already required at a later version
+// (for example, a pre-release newer than the latest release), get will use
+// the version it looked up. Otherwise, get will use the currently
+// required version.
//
// This default version selection can be overridden by adding an @version
// suffix to the package argument, as in 'go get golang.org/x/text@v0.3.0'.
+// The version may be a prefix: @v1 denotes the latest available version starting
+// with v1. See 'go help modules' under the heading 'Module queries' for the
+// full query syntax.
+//
// For modules stored in source control repositories, the version suffix can
// also be a commit hash, branch identifier, or other syntax known to the
-// source control system, as in 'go get golang.org/x/text@master'.
+// source control system, as in 'go get golang.org/x/text@master'. Note that
+// branches with names that overlap with other module query syntax cannot be
+// selected explicitly. For example, the suffix @v2 means the latest version
+// starting with v2, not the branch named v2.
//
// If a module under consideration is already a dependency of the current
// development module, then get will update the required version.
// depending on it as needed.
//
// The version suffix @latest explicitly requests the latest minor release of the
-// given path. The suffix @patch requests the latest patch release: if the path
-// is already in the build list, the selected version will have the same minor
-// version. If the path is not already in the build list, @patch is equivalent
-// to @latest. Neither @latest nor @patch will cause 'go get' to downgrade a module
-// in the build list if it is required at a newer pre-release version that is
-// newer than the latest released version.
+// module named by the given path. The suffix @upgrade is like @latest but
+// will not downgrade a module if it is already required at a revision or
+// pre-release version newer than the latest released version. The suffix
+// @patch requests the latest patch release: the latest released version
+// with the same major and minor version numbers as the currently required
+// version. Like @upgrade, @patch will not downgrade a module already required
+// at a newer version. If the path is not already required, @upgrade and @patch
+// are equivalent to @latest.
//
// Although get defaults to using the latest version of the module containing
// a named package, it does not use the latest version of that module's
// Dir string // absolute path to cached source root directory
// Sum string // checksum for path, version (as in go.sum)
// GoModSum string // checksum for go.mod (as in go.sum)
+// Latest bool // would @latest resolve to this version?
// }
//
// See 'go help modules' for more about module queries.
// GOCACHE
// The directory where the go command will store cached
// information for reuse in future builds.
+// GODEBUG
+// Enable various debugging facilities. See 'go doc runtime'
+// for details.
// GOENV
// The location of the Go environment configuration file.
// Cannot be set using 'go env -w'.
// The string "latest" matches the latest available tagged version,
// or else the underlying source repository's latest untagged revision.
//
-// A revision identifier for the underlying source repository,
-// such as a commit hash prefix, revision tag, or branch name,
-// selects that specific code revision. If the revision is
-// also tagged with a semantic version, the query evaluates to
-// that semantic version. Otherwise the query evaluates to a
-// pseudo-version for the commit.
+// The string "upgrade" is like "latest", but if the module is
+// currently required at a later version than the version "latest"
+// would select (for example, a newer pre-release version), "upgrade"
+// will select the later version instead.
+//
+// The string "patch" matches the latest available tagged version
+// of a module with the same major and minor version numbers as the
+// currently required version. If no version is currently required,
+// "patch" is equivalent to "latest".
+//
+// A revision identifier for the underlying source repository, such as
+// a commit hash prefix, revision tag, or branch name, selects that
+// specific code revision. If the revision is also tagged with a
+// semantic version, the query evaluates to that semantic version.
+// Otherwise the query evaluates to a pseudo-version for the commit.
+// Note that branches and tags with names that are matched by other
+// query syntax cannot be selected this way. For example, the query
+// "v2" means the latest version starting with "v2", not the branch
+// named "v2".
//
// All queries prefer release versions to pre-release versions.
// For example, "<v1.2.3" will prefer to return "v1.2.2"
// GOSUMDB="sum.golang.org+<publickey>"
// GOSUMDB="sum.golang.org+<publickey> https://sum.golang.org"
//
-// The go command knows the public key of sum.golang.org; use of any other
-// database requires giving the public key explicitly. The URL defaults to
-// "https://" followed by the database name.
+// The go command knows the public key of sum.golang.org, and also that the name
+// sum.golang.google.cn (available inside mainland China) connects to the
+// sum.golang.org checksum database; use of any other database requires giving
+// the public key explicitly.
+// The URL defaults to "https://" followed by the database name.
//
// GOSUMDB defaults to "sum.golang.org", the Go checksum database run by Google.
// See https://sum.golang.org/privacy for the service's privacy policy.
tg.run("test", "testdata/standalone_test.go")
}
+func TestGoTestTestMainSeesTestingFlags(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.run("test", "testdata/standalone_testmain_flag_test.go")
+}
+
// Issue 22388
func TestGoTestMainWithWrongSignature(t *testing.T) {
tg := testgo(t)
i++
}
tm, err := strconv.ParseInt(string(etime[i:]), 10, 64)
- if err != nil || size < 0 {
+ if err != nil || tm < 0 {
return missing()
}
// in verify mode we are double-checking that the cache entries
// are entirely reproducible. As just noted, this may be unrealistic
// in some cases but the check is also useful for shaking out real bugs.
- entry := []byte(fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano()))
+ entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())
if verify && allowVerify {
old, err := c.get(id)
if err == nil && (old.OutputID != out || old.Size != size) {
}
}
file := c.fileName(id, "a")
- if err := ioutil.WriteFile(file, entry, 0666); err != nil {
+
+ // Copy file to cache directory.
+ mode := os.O_WRONLY | os.O_CREATE
+ f, err := os.OpenFile(file, mode, 0666)
+ if err != nil {
+ return err
+ }
+ _, err = f.WriteString(entry)
+ if err == nil {
+ // Truncate the file only *after* writing it.
+ // (This should be a no-op, but truncate just in case of previous corruption.)
+ //
+ // This differs from ioutil.WriteFile, which truncates to 0 *before* writing
+ // via os.O_TRUNC. Truncating only after writing ensures that a second write
+ // of the same content to the same file is idempotent, and does not — even
+ // temporarily! — undo the effect of the first write.
+ err = f.Truncate(int64(len(entry)))
+ }
+ if closeErr := f.Close(); err == nil {
+ err = closeErr
+ }
+ if err != nil {
// TODO(bcmills): This Remove potentially races with another go command writing to file.
// Can we eliminate it?
os.Remove(file)
"bytes"
"fmt"
"go/build"
+ "internal/cfg"
"io/ioutil"
"os"
"path/filepath"
// CanGetenv reports whether key is a valid go/env configuration key.
func CanGetenv(key string) bool {
- return strings.Contains(knownEnv, "\t"+key+"\n")
+ return strings.Contains(cfg.KnownEnv, "\t"+key+"\n")
}
-var knownEnv = `
- AR
- CC
- CGO_CFLAGS
- CGO_CFLAGS_ALLOW
- CGO_CFLAGS_DISALLOW
- CGO_CPPFLAGS
- CGO_CPPFLAGS_ALLOW
- CGO_CPPFLAGS_DISALLOW
- CGO_CXXFLAGS
- CGO_CXXFLAGS_ALLOW
- CGO_CXXFLAGS_DISALLOW
- CGO_ENABLED
- CGO_FFLAGS
- CGO_FFLAGS_ALLOW
- CGO_FFLAGS_DISALLOW
- CGO_LDFLAGS
- CGO_LDFLAGS_ALLOW
- CGO_LDFLAGS_DISALLOW
- CXX
- FC
- GCCGO
- GO111MODULE
- GO386
- GOARCH
- GOARM
- GOBIN
- GOCACHE
- GOENV
- GOEXE
- GOFLAGS
- GOGCCFLAGS
- GOHOSTARCH
- GOHOSTOS
- GOMIPS
- GOMIPS64
- GONOPROXY
- GONOSUMDB
- GOOS
- GOPATH
- GOPPC64
- GOPRIVATE
- GOPROXY
- GOROOT
- GOSUMDB
- GOTMPDIR
- GOTOOLDIR
- GOWASM
- GO_EXTLINK_ENABLED
- PKG_CONFIG
-`
-
var (
GOROOT = BuildContext.GOROOT
GOBIN = Getenv("GOBIN")
// See golang.org/issue/9032.
tagSyncDefault: []string{"submodule update --init --recursive"},
- scheme: []string{"git", "https", "http", "git+ssh", "ssh"},
- pingCmd: "ls-remote -- {scheme}://{repo}",
+ scheme: []string{"git", "https", "http", "git+ssh", "ssh"},
+
+ // Leave out the '--' separator in the ls-remote command: git 2.7.4 does not
+ // support such a separator for that command, and this use should be safe
+ // without it because the {scheme} value comes from the predefined list above.
+ // See golang.org/issue/33836.
+ pingCmd: "ls-remote {scheme}://{repo}",
+
remoteRepo: gitRemoteRepo,
}
// helpSuccess is the help command using as many args as possible that would succeed.
helpSuccess := "go help"
if i > 0 {
- helpSuccess = " " + strings.Join(args[:i], " ")
+ helpSuccess += " " + strings.Join(args[:i], " ")
}
fmt.Fprintf(os.Stderr, "go help %s: unknown help topic. Run '%s'.\n", strings.Join(args, " "), helpSuccess)
base.SetExitStatus(2) // failed at 'go help cmd'
GOCACHE
The directory where the go command will store cached
information for reuse in future builds.
+ GODEBUG
+ Enable various debugging facilities. See 'go doc runtime'
+ for details.
GOENV
The location of the Go environment configuration file.
Cannot be set using 'go env -w'.
if !*listE {
for _, m := range mods {
if m.Error != nil {
- base.Errorf("go list -m %s: %v", m.Path, m.Error.Err)
+ base.Errorf("go list -m: %v", m.Error.Err)
}
}
base.ExitIfErrors()
}
if pmain != nil {
pkgs = append(pkgs, pmain)
- data := pmain.Internal.TestmainGo
+ data := *pmain.Internal.TestmainGo
h := cache.NewHash("testmain")
h.Write([]byte("testmain\n"))
h.Write(data)
Doc string `json:",omitempty"` // package documentation string
Target string `json:",omitempty"` // installed target for this package (may be executable)
Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared)
- Root string `json:",omitempty"` // Go root or Go path dir containing this package
+ Root string `json:",omitempty"` // Go root, Go path dir, or module root dir containing this package
ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory
ForTest string `json:",omitempty"` // package is only for use in named test
Export string `json:",omitempty"` // file containing export data (set by go list -export)
OmitDebug bool // tell linker not to write debug information
GobinSubdir bool // install target would be subdir of GOBIN
BuildInfo string // add this info to package main
- TestinginitGo []byte // content for _testinginit.go
- TestmainGo []byte // content for _testmain.go
+ TestmainGo *[]byte // content for _testmain.go
Asmflags []string // -asmflags for this package
Gcflags []string // -gcflags for this package
buildMode = build.ImportComment
}
data.p, data.err = cfg.BuildContext.ImportDir(r.dir, buildMode)
+ if data.p.Root == "" && cfg.ModulesEnabled {
+ if info := ModPackageModuleInfo(path); info != nil {
+ data.p.Root = info.Dir
+ }
+ }
} else if r.err != nil {
data.p = new(build.Package)
- data.err = fmt.Errorf("unknown import path %q: %v", r.path, r.err)
+ data.err = r.err
} else if cfg.ModulesEnabled && path != "unsafe" {
data.p = new(build.Package)
data.err = fmt.Errorf("unknown import path %q: internal error: module loader did not resolve import", r.path)
var stk ImportStack
stk.Push(p.ImportPath + " (test)")
rawTestImports := str.StringList(p.TestImports)
- var ptestImportsTesting, pxtestImportsTesting bool
for i, path := range p.TestImports {
p1 := loadImport(pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport)
if str.Contains(p1.Deps, p.ImportPath) || p1.ImportPath == p.ImportPath {
}
p.TestImports[i] = p1.ImportPath
imports = append(imports, p1)
- if path == "testing" {
- ptestImportsTesting = true
- }
}
stk.Pop()
stk.Push(p.ImportPath + "_test")
ximports = append(ximports, p1)
}
p.XTestImports[i] = p1.ImportPath
- if path == "testing" {
- pxtestImportsTesting = true
- }
}
stk.Pop()
*ptest = *p
ptest.Error = ptestErr
ptest.ForTest = p.ImportPath
- if ptestImportsTesting {
- ptest.Internal.TestinginitGo = formatTestinginit(p)
- }
ptest.GoFiles = nil
ptest.GoFiles = append(ptest.GoFiles, p.GoFiles...)
ptest.GoFiles = append(ptest.GoFiles, p.TestGoFiles...)
Gccgoflags: p.Internal.Gccgoflags,
},
}
- if pxtestImportsTesting {
- pxtest.Internal.TestinginitGo = formatTestinginit(pxtest)
- }
if pxtestNeedsPtest {
pxtest.Internal.Imports = append(pxtest.Internal.Imports, ptest)
}
if err != nil && pmain.Error == nil {
pmain.Error = &PackageError{Err: err.Error()}
}
- pmain.Internal.TestmainGo = data
+ if data != nil {
+ pmain.Internal.TestmainGo = &data
+ }
return pmain, ptest, pxtest
}
return t, err
}
-// formatTestinginit returns the content of the _testinginit.go file for p.
-func formatTestinginit(p *Package) []byte {
- var buf bytes.Buffer
- if err := testinginitTmpl.Execute(&buf, p); err != nil {
- panic("testinginit template execution failed") // shouldn't be possible
- }
- return buf.Bytes()
-}
-
// formatTestmain returns the content of the _testmain.go file for t.
func formatTestmain(t *testFuncs) ([]byte, error) {
var buf bytes.Buffer
return nil
}
-var testinginitTmpl = lazytemplate.New("init", `
-package {{.Name}}
-
-import _go_testing "testing"
-
-{{/*
-Call testing.Init before any other user initialization code runs.
-(This file is passed to the compiler first.)
-This provides the illusion of the old behavior where testing flags
-were registered as part of the testing package's initialization.
-*/}}
-var _ = func() bool {
- _go_testing.Init()
- return true
-}()
-`)
-
var testmainTmpl = lazytemplate.New("main", `
// Code generated by 'go test'. DO NOT EDIT.
Dir string // absolute path to cached source root directory
Sum string // checksum for path, version (as in go.sum)
GoModSum string // checksum for go.mod (as in go.sum)
+ Latest bool // would @latest resolve to this version?
}
See 'go help modules' for more about module queries.
Dir string `json:",omitempty"`
Sum string `json:",omitempty"`
GoModSum string `json:",omitempty"`
+ Latest bool `json:",omitempty"`
}
func runDownload(cmd *base.Command, args []string) {
if info.Replace != nil {
info = info.Replace
}
- if info.Version == "" {
+ if info.Version == "" && info.Error == nil {
+ // main module
continue
}
m := &moduleJSON{
Version: info.Version,
}
mods = append(mods, m)
+ if info.Error != nil {
+ m.Error = info.Error.Err
+ continue
+ }
work.Add(m)
}
+ latest := map[string]string{} // path → version
+ if *downloadJSON {
+ // We need to populate the Latest field, but if the main module depends on a
+ // version newer than latest — or if the version requested on the command
+ // line is itself newer than latest — that's not trivial to determine from
+ // the info returned by ListModules. Instead, we issue a separate
+ // ListModules request for "latest", which should be inexpensive relative to
+ // downloading the modules.
+ var latestArgs []string
+ for _, m := range mods {
+ if m.Error != "" {
+ continue
+ }
+ latestArgs = append(latestArgs, m.Path+"@latest")
+ }
+
+ if len(latestArgs) > 0 {
+ for _, info := range modload.ListModules(latestArgs, listU, listVersions) {
+ if info.Version != "" {
+ latest[info.Path] = info.Version
+ }
+ }
+ }
+ }
+
work.Do(10, func(item interface{}) {
m := item.(*moduleJSON)
var err error
m.Error = err.Error()
return
}
+ if latest[m.Path] == m.Version {
+ m.Latest = true
+ }
})
if *downloadJSON {
} else {
for _, m := range mods {
if m.Error != "" {
- base.Errorf("%s@%s: %s\n", m.Path, m.Version, m.Error)
+ base.Errorf("%s", m.Error)
}
}
base.ExitIfErrors()
}
func (r *vcsRepo) fetch() {
- _, r.fetchErr = Run(r.dir, r.cmd.fetch)
+ if len(r.cmd.fetch) > 0 {
+ _, r.fetchErr = Run(r.dir, r.cmd.fetch)
+ }
}
func (r *vcsRepo) statLocal(rev string) (*RevInfo, error) {
codeRoot string
// codeDir is the directory (relative to root) at which we expect to find the module.
// If pathMajor is non-empty and codeRoot is not the full modPath,
- // then we look in both codeDir and codeDir+modPath
+ // then we look in both codeDir and codeDir/pathMajor[1:].
codeDir string
// pathMajor is the suffix of modPath that indicates its major version,
codeRev := r.revToRev(rev)
info, err := r.code.Stat(codeRev)
if err != nil {
- return nil, err
+ return nil, &module.ModuleError{
+ Path: r.modPath,
+ Err: &module.InvalidVersionError{
+ Version: rev,
+ Err: err,
+ },
+ }
}
return r.convert(info, rev)
}
// exist as required by info2.Version and the module path represented by r.
checkGoMod := func() (*RevInfo, error) {
// If r.codeDir is non-empty, then the go.mod file must exist: the module
- // author, not the module consumer, gets to decide how to carve up the repo
+ // author — not the module consumer, — gets to decide how to carve up the repo
// into modules.
- if r.codeDir != "" {
- _, _, _, err := r.findDir(info2.Version)
- if err != nil {
- // TODO: It would be nice to return an error like "not a module".
- // Right now we return "missing go.mod", which is a little confusing.
- return nil, &module.ModuleError{
- Path: r.modPath,
- Err: &module.InvalidVersionError{
- Version: info2.Version,
- Err: notExistError(err.Error()),
- },
- }
+ //
+ // Conversely, if the go.mod file exists, the module author — not the module
+ // consumer — gets to determine the module's path
+ //
+ // r.findDir verifies both of these conditions. Execute it now so that
+ // r.Stat will correctly return a notExistError if the go.mod location or
+ // declared module path doesn't match.
+ _, _, _, err := r.findDir(info2.Version)
+ if err != nil {
+ // TODO: It would be nice to return an error like "not a module".
+ // Right now we return "missing go.mod", which is a little confusing.
+ return nil, &module.ModuleError{
+ Path: r.modPath,
+ Err: &module.InvalidVersionError{
+ Version: info2.Version,
+ Err: notExistError(err.Error()),
+ },
}
}
return fmt.Errorf("does not match version-control timestamp (%s)", info.Time.UTC().Format(time.RFC3339))
}
+ tagPrefix := ""
+ if r.codeDir != "" {
+ tagPrefix = r.codeDir + "/"
+ }
+
// A pseudo-version should have a precedence just above its parent revisions,
// and no higher. Otherwise, it would be possible for library authors to "pin"
// dependency versions (and bypass the usual minimum version selection) by
return fmt.Errorf("major version without preceding tag must be v0, not v1")
}
return nil
- }
-
- tagPrefix := ""
- if r.codeDir != "" {
- tagPrefix = r.codeDir + "/"
+ } else {
+ for _, tag := range info.Tags {
+ versionOnly := strings.TrimPrefix(tag, tagPrefix)
+ if versionOnly == base {
+ // The base version is canonical, so if the version from the tag is
+ // literally equal (not just equivalent), then the tag is canonical too.
+ //
+ // We allow pseudo-versions to be derived from non-canonical tags on the
+ // same commit, so that tags like "v1.1.0+some-metadata" resolve as
+ // close as possible to the canonical version ("v1.1.0") while still
+ // enforcing a total ordering ("v1.1.1-0.[…]" with a unique suffix).
+ //
+ // However, canonical tags already have a total ordering, so there is no
+ // reason not to use the canonical tag directly, and we know that the
+ // canonical tag must already exist because the pseudo-version is
+ // derived from it. In that case, referring to the revision by a
+ // pseudo-version derived from its own canonical tag is just confusing.
+ return fmt.Errorf("tag (%s) found on revision %s is already canonical, so should not be replaced with a pseudo-version derived from that tag", tag, rev)
+ }
+ }
}
tags, err := r.code.Tags(tagPrefix + base)
return r.revToRev(version), nil
}
+// findDir locates the directory within the repo containing the module.
+//
+// If r.pathMajor is non-empty, this can be either r.codeDir or — if a go.mod
+// file exists — r.codeDir/r.pathMajor[1:].
func (r *codeRepo) findDir(version string) (rev, dir string, gomod []byte, err error) {
rev, err = r.versionToRev(version)
if err != nil {
"pkg/p.go",
},
},
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ rev: "v0.0.0-20180219231006-80d85c5d4d17",
+ version: "v0.0.0-20180219231006-80d85c5d4d17",
+ name: "80d85c5d4d17598a0e9055e7c175a32b415d6128",
+ short: "80d85c5d4d17",
+ time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC),
+ zip: []string{
+ "LICENSE",
+ "README.md",
+ "pkg/p.go",
+ },
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ rev: "v0.0.1-0.20180219231006-80d85c5d4d17",
+ err: `github.com/rsc/vgotest1@v0.0.1-0.20180219231006-80d85c5d4d17: invalid pseudo-version: tag (v0.0.0) found on revision 80d85c5d4d17 is already canonical, so should not be replaced with a pseudo-version derived from that tag`,
+ },
{
vcs: "git",
path: "github.com/rsc/vgotest1",
name: "45f53230a74ad275c7127e117ac46914c8126160",
short: "45f53230a74a",
time: time.Date(2018, 7, 19, 1, 21, 27, 0, time.UTC),
- ziperr: "missing github.com/rsc/vgotest1/go.mod and .../v2/go.mod at revision v2.0.0",
+ err: "missing github.com/rsc/vgotest1/go.mod and .../v2/go.mod at revision v2.0.0",
},
{
vcs: "git",
},
},
{
- vcs: "git",
- path: "github.com/rsc/vgotest1/v2",
- rev: "45f53230a",
- version: "v2.0.0",
- name: "45f53230a74ad275c7127e117ac46914c8126160",
- short: "45f53230a74a",
- time: time.Date(2018, 7, 19, 1, 21, 27, 0, time.UTC),
- gomoderr: "missing github.com/rsc/vgotest1/go.mod and .../v2/go.mod at revision v2.0.0",
- ziperr: "missing github.com/rsc/vgotest1/go.mod and .../v2/go.mod at revision v2.0.0",
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ rev: "45f53230a",
+ version: "v2.0.0",
+ name: "45f53230a74ad275c7127e117ac46914c8126160",
+ short: "45f53230a74a",
+ time: time.Date(2018, 7, 19, 1, 21, 27, 0, time.UTC),
+ err: "missing github.com/rsc/vgotest1/go.mod and .../v2/go.mod at revision v2.0.0",
},
{
vcs: "git",
name: "80d85c5d4d17598a0e9055e7c175a32b415d6128",
short: "80d85c5d4d17",
time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC),
- ziperr: "missing github.com/rsc/vgotest1/go.mod and .../v54321/go.mod at revision 80d85c5d4d17",
+ err: "missing github.com/rsc/vgotest1/go.mod and .../v54321/go.mod at revision 80d85c5d4d17",
},
{
vcs: "git",
gomod: "module \"github.com/rsc/vgotest1/v2\" // root go.mod\n",
},
{
- vcs: "git",
- path: "github.com/rsc/vgotest1/v2",
- rev: "v2.0.3",
- version: "v2.0.3",
- name: "f18795870fb14388a21ef3ebc1d75911c8694f31",
- short: "f18795870fb1",
- time: time.Date(2018, 2, 19, 23, 16, 4, 0, time.UTC),
- gomoderr: "github.com/rsc/vgotest1/v2/go.mod has non-.../v2 module path \"github.com/rsc/vgotest\" at revision v2.0.3",
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ rev: "v2.0.3",
+ version: "v2.0.3",
+ name: "f18795870fb14388a21ef3ebc1d75911c8694f31",
+ short: "f18795870fb1",
+ time: time.Date(2018, 2, 19, 23, 16, 4, 0, time.UTC),
+ err: "github.com/rsc/vgotest1/v2/go.mod has non-.../v2 module path \"github.com/rsc/vgotest\" at revision v2.0.3",
},
{
- vcs: "git",
- path: "github.com/rsc/vgotest1/v2",
- rev: "v2.0.4",
- version: "v2.0.4",
- name: "1f863feb76bc7029b78b21c5375644838962f88d",
- short: "1f863feb76bc",
- time: time.Date(2018, 2, 20, 0, 3, 38, 0, time.UTC),
- gomoderr: "github.com/rsc/vgotest1/go.mod and .../v2/go.mod both have .../v2 module paths at revision v2.0.4",
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ rev: "v2.0.4",
+ version: "v2.0.4",
+ name: "1f863feb76bc7029b78b21c5375644838962f88d",
+ short: "1f863feb76bc",
+ time: time.Date(2018, 2, 20, 0, 3, 38, 0, time.UTC),
+ err: "github.com/rsc/vgotest1/go.mod and .../v2/go.mod both have .../v2 module paths at revision v2.0.4",
},
{
vcs: "git",
tt.name = remap(tt.name, m)
tt.short = remap(tt.short, m)
tt.rev = remap(tt.rev, m)
+ tt.err = remap(tt.err, m)
tt.gomoderr = remap(tt.gomoderr, m)
tt.ziperr = remap(tt.ziperr, m)
t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.rev, f(tt))
}
var hgmap = map[string]string{
- "github.com/rsc/vgotest1/": "vcs-test.golang.org/hg/vgotest1.hg/",
+ "github.com/rsc/vgotest1": "vcs-test.golang.org/hg/vgotest1.hg",
"f18795870fb14388a21ef3ebc1d75911c8694f31": "a9ad6d1d14eb544f459f446210c7eb3b009807c6",
"ea65f87c8f52c15ea68f3bdd9925ef17e20d91e9": "f1fc0f22021b638d073d31c752847e7bf385def7",
"b769f2de407a4db81af9c5de0a06016d60d2ea09": "92c7eb888b4fac17f1c6bd2e1060a1b881a3b832",
err: "no commits",
},
{
- vcs: "git",
- path: "github.com/rsc/vgotest1",
- version: "v0.0.0-20180219223237-a08abb797a67",
+ vcs: "git",
+ path: "github.com/rsc/vgotest1",
+ err: `github.com/rsc/vgotest1@v0.0.0-20180219223237-a08abb797a67: invalid version: go.mod has post-v0 module path "github.com/vgotest1/v2" at revision a08abb797a67`,
+ },
+ {
+ vcs: "git",
+ path: "github.com/rsc/vgotest1/v2",
+ err: `github.com/rsc/vgotest1/v2@v2.0.0-20180219223237-a08abb797a67: invalid version: github.com/rsc/vgotest1/go.mod and .../v2/go.mod both have .../v2 module paths at revision a08abb797a67`,
},
{
vcs: "git",
path: "github.com/rsc/vgotest1/subdir",
err: "github.com/rsc/vgotest1/subdir@v0.0.0-20180219223237-a08abb797a67: invalid version: missing github.com/rsc/vgotest1/subdir/go.mod at revision a08abb797a67",
},
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/commit-after-tag.git",
+ version: "v1.0.1-0.20190715211727-b325d8217783",
+ },
+ {
+ vcs: "git",
+ path: "vcs-test.golang.org/git/no-tags.git",
+ version: "v0.0.0-20190715212047-e706ba1d9f6d",
+ },
{
vcs: "mod",
path: "swtch.com/testmod",
GOSUMDB="sum.golang.org+<publickey>"
GOSUMDB="sum.golang.org+<publickey> https://sum.golang.org"
-The go command knows the public key of sum.golang.org; use of any other
-database requires giving the public key explicitly. The URL defaults to
-"https://" followed by the database name.
+The go command knows the public key of sum.golang.org, and also that the name
+sum.golang.google.cn (available inside mainland China) connects to the
+sum.golang.org checksum database; use of any other database requires giving
+the public key explicitly.
+The URL defaults to "https://" followed by the database name.
GOSUMDB defaults to "sum.golang.org", the Go checksum database run by Google.
See https://sum.golang.org/privacy for the service's privacy policy.
func (p *proxyRepo) Latest() (*RevInfo, error) {
data, err := p.getBytes("@latest")
if err != nil {
- // TODO return err if not 404
+ if !errors.Is(err, os.ErrNotExist) {
+ return nil, p.versionError("", err)
+ }
return p.latest()
}
info := new(RevInfo)
var (
errModVendor = errors.New("module lookup disabled by -mod=vendor")
- errProxyOff = errors.New("module lookup disabled by GOPROXY=off")
+ errProxyOff = notExistError("module lookup disabled by GOPROXY=off")
errNoproxy error = notExistError("disabled by GOPRIVATE/GONOPROXY")
errUseProxy error = notExistError("path does not match GOPRIVATE/GONOPROXY")
)
// $GOSUMDB can be "key" or "key url",
// and the key can be a full verifier key
// or a host on our list of known keys.
- key := strings.Fields(cfg.GOSUMDB)
+
+ // Special case: sum.golang.google.cn
+ // is an alias, reachable inside mainland China,
+ // for sum.golang.org. If there are more
+ // of these we should add a map like knownGOSUMDB.
+ gosumdb := cfg.GOSUMDB
+ if gosumdb == "sum.golang.google.cn" {
+ gosumdb = "sum.golang.org https://sum.golang.google.cn"
+ }
+
+ key := strings.Fields(gosumdb)
if len(key) >= 1 {
if k := knownGOSUMDB[key[0]]; k != "" {
key[0] = k
}
// ReadCache reads cached lookups or tiles from
-// GOPATH/pkg/mod/download/cache/sumdb,
+// GOPATH/pkg/mod/cache/download/sumdb,
// which will be deleted by "go clean -modcache".
func (*dbClient) ReadCache(file string) ([]byte, error) {
- targ := filepath.Join(PkgMod, "download/cache/sumdb", file)
+ targ := filepath.Join(PkgMod, "cache/download/sumdb", file)
data, err := lockedfile.Read(targ)
// lockedfile.Write does not atomically create the file with contents.
// There is a moment between file creation and locking the file for writing,
// WriteCache updates cached lookups or tiles.
func (*dbClient) WriteCache(file string, data []byte) {
- targ := filepath.Join(PkgMod, "download/cache/sumdb", file)
+ targ := filepath.Join(PkgMod, "cache/download/sumdb", file)
os.MkdirAll(filepath.Dir(targ), 0777)
lockedfile.Write(targ, bytes.NewReader(data), 0666)
}
The first step is to resolve which dependencies to add.
For each named package or package pattern, get must decide which version of
-the corresponding module to use. By default, get chooses the latest tagged
+the corresponding module to use. By default, get looks up the latest tagged
release version, such as v0.4.5 or v1.2.3. If there are no tagged release
-versions, get chooses the latest tagged pre-release version, such as
-v0.0.1-pre1. If there are no tagged versions at all, get chooses the latest
-known commit.
+versions, get looks up the latest tagged pre-release version, such as
+v0.0.1-pre1. If there are no tagged versions at all, get looks up the latest
+known commit. If the module is not already required at a later version
+(for example, a pre-release newer than the latest release), get will use
+the version it looked up. Otherwise, get will use the currently
+required version.
This default version selection can be overridden by adding an @version
suffix to the package argument, as in 'go get golang.org/x/text@v0.3.0'.
+The version may be a prefix: @v1 denotes the latest available version starting
+with v1. See 'go help modules' under the heading 'Module queries' for the
+full query syntax.
+
For modules stored in source control repositories, the version suffix can
also be a commit hash, branch identifier, or other syntax known to the
-source control system, as in 'go get golang.org/x/text@master'.
+source control system, as in 'go get golang.org/x/text@master'. Note that
+branches with names that overlap with other module query syntax cannot be
+selected explicitly. For example, the suffix @v2 means the latest version
+starting with v2, not the branch named v2.
If a module under consideration is already a dependency of the current
development module, then get will update the required version.
depending on it as needed.
The version suffix @latest explicitly requests the latest minor release of the
-given path. The suffix @patch requests the latest patch release: if the path
-is already in the build list, the selected version will have the same minor
-version. If the path is not already in the build list, @patch is equivalent
-to @latest. Neither @latest nor @patch will cause 'go get' to downgrade a module
-in the build list if it is required at a newer pre-release version that is
-newer than the latest released version.
+module named by the given path. The suffix @upgrade is like @latest but
+will not downgrade a module if it is already required at a revision or
+pre-release version newer than the latest released version. The suffix
+@patch requests the latest patch release: the latest released version
+with the same major and minor version numbers as the currently required
+version. Like @upgrade, @patch will not downgrade a module already required
+at a newer version. If the path is not already required, @upgrade and @patch
+are equivalent to @latest.
Although get defaults to using the latest version of the module containing
a named package, it does not use the latest version of that module's
s = ""
}
if s == "true" {
- s = "latest"
+ s = "upgrade"
}
*v = upgradeFlag(s)
return nil
// if there is no "@"). path specifies the modules or packages to get.
path string
- // vers is the part of the argument after "@" (or "" if there is no "@").
- // vers specifies the module version to get.
+ // vers is the part of the argument after "@" or an implied
+ // "upgrade" or "patch" if there is no "@". vers specifies the
+ // module version to get.
vers string
}
}
switch getU {
- case "", "latest", "patch":
+ case "", "upgrade", "patch":
// ok
default:
base.Fatalf("go get: unknown upgrade flag -u=%s", getU)
// Parse command-line arguments and report errors. The command-line
// arguments are of the form path@version or simply path, with implicit
- // @latest. path@none is "downgrade away".
+ // @upgrade. path@none is "downgrade away".
var gets []getArg
var queries []*query
for _, arg := range search.CleanPatterns(args) {
- // Argument is module query path@vers, or else path with implicit @latest.
+ // Argument is path or path@vers.
path := arg
vers := ""
if i := strings.Index(arg, "@"); i >= 0 {
continue
}
- // If the user runs 'go get -u=patch some/module', update some/module to a
- // patch release, not a minor version.
- if vers == "" && getU != "" {
- vers = string(getU)
+ // If no version suffix is specified, assume @upgrade.
+ // If -u=patch was specified, assume @patch instead.
+ if vers == "" {
+ if getU != "" {
+ vers = string(getU)
+ } else {
+ vers = "upgrade"
+ }
}
gets = append(gets, getArg{raw: arg, path: path, vers: vers})
// The argument is a package path.
if pkgs := modload.TargetPackages(path); len(pkgs) != 0 {
// The path is in the main module. Nothing to query.
- if vers != "" && vers != "latest" && vers != "patch" {
+ if vers != "upgrade" && vers != "patch" {
base.Errorf("go get %s: can't request explicit version of path in main module", arg)
}
continue
continue
}
- // If we're querying "latest" or "patch", we need to know the current
- // version of the module. For "latest", we want to avoid accidentally
+ // If we're querying "upgrade" or "patch", we need to know the current
+ // version of the module. For "upgrade", we want to avoid accidentally
// downgrading from a newer prerelease. For "patch", we need to query
// the correct minor version.
// Here, we check if "path" is the name of a module in the build list
return byPath
}
-// getQuery evaluates the given package path, version pair
+// getQuery evaluates the given (package or module) path and version
// to determine the underlying module version being requested.
// If forceModulePath is set, getQuery must interpret path
// as a module path.
base.Fatalf("go get: internal error: prevM may be set if and only if forceModulePath is set")
}
- if vers == "" || vers == "patch" && prevM.Version == "" {
- vers = "latest"
- }
-
- if forceModulePath || !strings.Contains(path, "...") {
+ // If the query must be a module path, try only that module path.
+ if forceModulePath {
if path == modload.Target.Path {
if vers != "latest" {
return module.Version{}, fmt.Errorf("can't get a specific version of the main module")
}
}
- // If the path doesn't contain a wildcard, try interpreting it as a module path.
info, err := modload.Query(path, vers, prevM.Version, modload.Allowed)
if err == nil {
return module.Version{Path: path, Version: info.Version}, nil
}
- // If the query fails, and the path must be a real module, report the query error.
- if forceModulePath {
- return module.Version{}, err
+ // If the query was "upgrade" or "patch" and the current version has been
+ // replaced, check to see whether the error was for that same version:
+ // if so, the version was probably replaced because it is invalid,
+ // and we should keep that replacement without complaining.
+ if vers == "upgrade" || vers == "patch" {
+ var vErr *module.InvalidVersionError
+ if errors.As(err, &vErr) && vErr.Version == prevM.Version && modload.Replacement(prevM).Path != "" {
+ return prevM, nil
+ }
}
+
+ return module.Version{}, err
}
- // Otherwise, try a package path or pattern.
+ // If the query may be either a package or a module, try it as a package path.
+ // If it turns out to only exist as a module, we can detect the resulting
+ // PackageNotInModuleError and avoid a second round-trip through (potentially)
+ // all of the configured proxies.
results, err := modload.QueryPattern(path, vers, modload.Allowed)
if err != nil {
+ // If the path doesn't contain a wildcard, check whether it was actually a
+ // module path instead. If so, return that.
+ if !strings.Contains(path, "...") {
+ var modErr *modload.PackageNotInModuleError
+ if errors.As(err, &modErr) && modErr.Mod.Path == path {
+ return modErr.Mod, nil
+ }
+ }
+
return module.Version{}, err
}
+
return results[0].Mod, nil
}
// which may return a pseudoversion for the latest commit.
// Query "latest" returns the newest tagged version or the newest
// prerelease version if there are no non-prereleases, or repo.Latest
- // if there aren't any tagged versions. Since we're providing the previous
- // version, Query will confirm the latest version is actually newer
- // and will return the current version if not.
+ // if there aren't any tagged versions.
+ // If we're querying "upgrade" or "patch", Query will compare the current
+ // version against the chosen version and will return the current version
+ // if it is newer.
info, err := modload.Query(m.Path, string(getU), m.Version, modload.Allowed)
if err != nil {
// Report error but return m, to let version selection continue.
// (Reporting the error will fail the command at the next base.ExitIfErrors.)
+
+ // Special case: if the error is for m.Version itself and m.Version has a
+ // replacement, then keep it and don't report the error: the fact that the
+ // version is invalid is likely the reason it was replaced to begin with.
+ var vErr *module.InvalidVersionError
+ if errors.As(err, &vErr) && vErr.Version == m.Version && modload.Replacement(m).Path != "" {
+ return m, nil
+ }
+
// Special case: if the error is "no matching versions" then don't
// even report the error. Because Query does not consider pseudo-versions,
// it may happen that we have a pseudo-version but during -u=patch
return
}
- if info, err := Query(m.Path, "latest", m.Version, Allowed); err == nil && semver.Compare(info.Version, m.Version) > 0 {
+ if info, err := Query(m.Path, "upgrade", m.Version, Allowed); err == nil && semver.Compare(info.Version, m.Version) > 0 {
m.Update = &modinfo.ModulePublic{
Path: m.Path,
Version: info.Version,
The string "latest" matches the latest available tagged version,
or else the underlying source repository's latest untagged revision.
-A revision identifier for the underlying source repository,
-such as a commit hash prefix, revision tag, or branch name,
-selects that specific code revision. If the revision is
-also tagged with a semantic version, the query evaluates to
-that semantic version. Otherwise the query evaluates to a
-pseudo-version for the commit.
+The string "upgrade" is like "latest", but if the module is
+currently required at a later version than the version "latest"
+would select (for example, a newer pre-release version), "upgrade"
+will select the later version instead.
+
+The string "patch" matches the latest available tagged version
+of a module with the same major and minor version numbers as the
+currently required version. If no version is currently required,
+"patch" is equivalent to "latest".
+
+A revision identifier for the underlying source repository, such as
+a commit hash prefix, revision tag, or branch name, selects that
+specific code revision. If the revision is also tagged with a
+semantic version, the query evaluates to that semantic version.
+Otherwise the query evaluates to a pseudo-version for the commit.
+Note that branches and tags with names that are matched by other
+query syntax cannot be selected this way. For example, the query
+"v2" means the latest version starting with "v2", not the branch
+named "v2".
All queries prefer release versions to pre-release versions.
For example, "<v1.2.3" will prefer to return "v1.2.2"
"cmd/go/internal/par"
"cmd/go/internal/search"
"cmd/go/internal/semver"
+ "cmd/go/internal/str"
)
type ImportMissingError struct {
func (e *ImportMissingError) Error() string {
if e.Module.Path == "" {
+ if str.HasPathPrefix(e.ImportPath, "cmd") {
+ return fmt.Sprintf("package %s is not in GOROOT (%s)", e.ImportPath, filepath.Join(cfg.GOROOT, "src", e.ImportPath))
+ }
return "cannot find module providing package " + e.ImportPath
}
return "missing module for import: " + e.Module.Path + "@" + e.Module.Version + " provides " + e.ImportPath
dir := filepath.Join(cfg.GOROOT, "src", path)
return module.Version{}, dir, nil
}
+ if str.HasPathPrefix(path, "cmd") {
+ return module.Version{}, "", &ImportMissingError{ImportPath: path}
+ }
// -mod=vendor is special.
// Everything must be in the main module or the main module's vendor directory.
package modload
import (
+ "errors"
"fmt"
"os"
"strings"
mods = append(mods, &modinfo.ModulePublic{
Path: path,
Version: vers,
- Error: &modinfo.ModuleError{
- Err: err.Error(),
- },
+ Error: modinfoError(path, vers, err),
})
continue
}
mods = append(mods, moduleInfo(module.Version{Path: arg, Version: info.Version}, false))
} else {
mods = append(mods, &modinfo.ModulePublic{
- Path: arg,
- Error: &modinfo.ModuleError{
- Err: err.Error(),
- },
+ Path: arg,
+ Error: modinfoError(arg, "", err),
})
}
continue
}
mods = append(mods, &modinfo.ModulePublic{
- Path: arg,
- Error: &modinfo.ModuleError{
- Err: fmt.Sprintf("module %q is not a known dependency", arg),
- },
+ Path: arg,
+ Error: modinfoError(arg, "", errors.New("not a known dependency")),
})
} else {
fmt.Fprintf(os.Stderr, "warning: pattern %q matched no module dependencies\n", arg)
return mods
}
+
+// modinfoError wraps an error to create an error message in
+// modinfo.ModuleError with minimal redundancy.
+func modinfoError(path, vers string, err error) *modinfo.ModuleError {
+ var nerr *NoMatchingVersionError
+ var merr *module.ModuleError
+ if errors.As(err, &nerr) {
+ // NoMatchingVersionError contains the query, so we don't mention the
+ // query again in ModuleError.
+ err = &module.ModuleError{Path: path, Err: err}
+ } else if !errors.As(err, &merr) {
+ // If the error does not contain path and version, wrap it in a
+ // module.ModuleError.
+ err = &module.ModuleError{Path: path, Version: vers, Err: err}
+ }
+
+ return &modinfo.ModuleError{Err: err.Error()}
+}
dir = filepath.Clean(dir)
}
+ // golang.org/issue/32917: We should resolve a relative path to a
+ // package path only if the relative path actually contains the code
+ // for that package.
+ if !dirContainsPackage(dir) {
+ // If we're outside of a module, ensure that the failure mode
+ // indicates that.
+ ModRoot()
+
+ // If the directory is local but does not exist, don't return it
+ // while loader is iterating, since this might trigger a fetch.
+ // After loader is done iterating, we still need to return the
+ // path, so that "go list -e" produces valid output.
+ if !iterating {
+ // We don't have a valid path to resolve to, so report the
+ // unresolved path.
+ m.Pkgs = append(m.Pkgs, pkg)
+ }
+ continue
+ }
+
// Note: The checks for @ here are just to avoid misinterpreting
// the module cache directories (formerly GOPATH/src/mod/foo@v1.5.2/bar).
// It's not strictly necessary but helpful to keep the checks.
if modRoot != "" && dir == modRoot {
- pkg = Target.Path
+ pkg = targetPrefix
} else if modRoot != "" && strings.HasPrefix(dir, modRoot+string(filepath.Separator)) && !strings.Contains(dir[len(modRoot):], "@") {
suffix := filepath.ToSlash(dir[len(modRoot):])
if strings.HasPrefix(suffix, "/vendor/") {
continue
}
} else {
- pkg = Target.Path + suffix
+ modPkg := targetPrefix + suffix
+ if _, ok := dirInModule(modPkg, targetPrefix, modRoot, true); ok {
+ pkg = modPkg
+ } else if !iterating {
+ ModRoot()
+ base.Errorf("go: directory %s is outside main module", base.ShortPath(dir))
+ }
}
} else if sub := search.InDir(dir, cfg.GOROOTsrc); sub != "" && sub != "." && !strings.Contains(sub, "@") {
pkg = filepath.ToSlash(sub)
base.Errorf("go: directory %s outside available modules", base.ShortPath(dir))
}
}
- info, err := os.Stat(dir)
- if err != nil || !info.IsDir() {
- // If the directory is local but does not exist, don't return it
- // while loader is iterating, since this would trigger a fetch.
- // After loader is done iterating, we still need to return the
- // path, so that "go list -e" produces valid output.
- if iterating {
- continue
- }
- }
m.Pkgs = append(m.Pkgs, pkg)
}
return ""
}
-// warnPattern returns list, the result of matching pattern,
-// but if list is empty then first it prints a warning about
-// the pattern not matching any packages.
-func warnPattern(pattern string, list []string) []string {
- if len(list) == 0 {
- fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
+var dirContainsPackageCache sync.Map // absolute dir → bool
+
+func dirContainsPackage(dir string) bool {
+ isPkg, ok := dirContainsPackageCache.Load(dir)
+ if !ok {
+ _, err := cfg.BuildContext.ImportDir(dir, 0)
+ if err == nil {
+ isPkg = true
+ } else {
+ if fi, statErr := os.Stat(dir); statErr != nil || !fi.IsDir() {
+ // A non-directory or inaccessible directory is not a Go package.
+ isPkg = false
+ } else if _, noGo := err.(*build.NoGoError); noGo {
+ // A directory containing no Go source files is not a Go package.
+ isPkg = false
+ } else {
+ // An error other than *build.NoGoError indicates that the package exists
+ // but has some other problem (such as a syntax error).
+ isPkg = true
+ }
+ }
+ isPkg, _ = dirContainsPackageCache.LoadOrStore(dir, isPkg)
}
- return list
+ return isPkg.(bool)
}
// ImportFromFiles adds modules to the build list as needed
var paths []string
for _, pkg := range loaded.pkgs {
- if e, ok := pkg.err.(*ImportMissingError); ok && e.Module.Path == "" {
- continue // Package doesn't actually exist.
+ if pkg.err != nil {
+ base.Errorf("%s: %v", pkg.stackText(), pkg.err)
+ continue
}
paths = append(paths, pkg.path)
}
+ base.ExitIfErrors()
return paths
}
return nil, module.VersionError(mod, errors.New("parsing go.mod: missing module line"))
}
if mpath := f.Module.Mod.Path; mpath != origPath && mpath != mod.Path {
- return nil, module.VersionError(mod, fmt.Errorf("parsing go.mod: unexpected module path %q", mpath))
+ return nil, module.VersionError(mod, fmt.Errorf(`parsing go.mod:
+ module declares its path as: %s
+ but was required as: %s`, mpath, mod.Path))
}
if f.Go != nil {
r.versions.LoadOrStore(mod, f.Go.Version)
// tagged version, with non-prereleases preferred over prereleases.
// If there are no tagged versions in the repo, latest returns the most
// recent commit.
+// - the literal string "upgrade", equivalent to "latest" except that if
+// current is a newer version, current will be returned (see below).
// - the literal string "patch", denoting the latest available tagged version
-// with the same major and minor number as current. If current is "",
-// "patch" is equivalent to "latest".
+// with the same major and minor number as current (see below).
// - v1, denoting the latest available tagged version v1.x.x.
// - v1.2, denoting the latest available tagged version v1.2.x.
// - v1.2.3, a semantic version string denoting that tagged version.
// with non-prereleases preferred over prereleases.
// - a repository commit identifier or tag, denoting that commit.
//
-// current is optional, denoting the current version of the module.
-// If query is "latest" or "patch", current will be returned if it is a newer
-// semantic version or if it is a chronologically later pseudoversion. This
-// prevents accidental downgrades from newer prerelease or development
-// versions.
+// current denotes the current version of the module; it may be "" if the
+// current version is unknown or should not be considered. If query is
+// "upgrade" or "patch", current will be returned if it is a newer
+// semantic version or a chronologically later pseudo-version than the
+// version that would otherwise be chosen. This prevents accidental downgrades
+// from newer pre-release or development versions.
//
// If the allowed function is non-nil, Query excludes any versions for which
// allowed returns false.
ok = allowed
mayUseLatest = true
+ case query == "upgrade":
+ ok = allowed
+ mayUseLatest = true
+
case query == "patch":
if current == "" {
ok = allowed
return nil, err
}
- // For "latest" and "patch", make sure we don't accidentally downgrade
+ // For "upgrade" and "patch", make sure we don't accidentally downgrade
// from a newer prerelease or from a chronologically newer pseudoversion.
- if current != "" && (query == "latest" || query == "patch") {
+ if current != "" && (query == "upgrade" || query == "patch") {
currentTime, err := modfetch.PseudoVersionTime(current)
if semver.Compare(rev.Version, current) < 0 || (err == nil && rev.Time.Before(currentTime)) {
return repo.Stat(current)
}
r.Packages = match(r.Mod, root, isLocal)
if len(r.Packages) == 0 {
- return r, &packageNotInModuleError{
- mod: r.Mod,
- query: query,
- pattern: pattern,
+ return r, &PackageNotInModuleError{
+ Mod: r.Mod,
+ Query: query,
+ Pattern: pattern,
}
}
return r, nil
wg.Wait()
// Classify the results. In case of failure, identify the error that the user
- // is most likely to find helpful.
+ // is most likely to find helpful: the most useful class of error at the
+ // longest matching path.
var (
+ noPackage *PackageNotInModuleError
noVersion *NoMatchingVersionError
- noPackage *packageNotInModuleError
notExistErr error
)
for _, r := range results {
switch rErr := r.err.(type) {
case nil:
found = append(found, r.QueryResult)
+ case *PackageNotInModuleError:
+ if noPackage == nil {
+ noPackage = rErr
+ }
case *NoMatchingVersionError:
if noVersion == nil {
noVersion = rErr
}
- case *packageNotInModuleError:
- if noPackage == nil {
- noPackage = rErr
- }
default:
if errors.Is(rErr, os.ErrNotExist) {
if notExistErr == nil {
notExistErr = rErr
}
- } else {
+ } else if err == nil {
err = r.err
}
}
func (e *NoMatchingVersionError) Error() string {
currentSuffix := ""
- if (e.query == "latest" || e.query == "patch") && e.current != "" {
+ if (e.query == "upgrade" || e.query == "patch") && e.current != "" {
currentSuffix = fmt.Sprintf(" (current version is %s)", e.current)
}
return fmt.Sprintf("no matching versions for query %q", e.query) + currentSuffix
}
-// A packageNotInModuleError indicates that QueryPattern found a candidate
+// A PackageNotInModuleError indicates that QueryPattern found a candidate
// module at the requested version, but that module did not contain any packages
// matching the requested pattern.
//
-// NOTE: packageNotInModuleError MUST NOT implement Is(os.ErrNotExist).
+// NOTE: PackageNotInModuleError MUST NOT implement Is(os.ErrNotExist).
//
// If the module came from a proxy, that proxy had to return a successful status
// code for the versions it knows about, and thus did not have the opportunity
// to return a non-400 status code to suppress fallback.
-type packageNotInModuleError struct {
- mod module.Version
- query string
- pattern string
+type PackageNotInModuleError struct {
+ Mod module.Version
+ Query string
+ Pattern string
}
-func (e *packageNotInModuleError) Error() string {
+func (e *PackageNotInModuleError) Error() string {
found := ""
- if e.query != e.mod.Version {
- found = fmt.Sprintf(" (%s)", e.mod.Version)
+ if e.Query != e.Mod.Version {
+ found = fmt.Sprintf(" (%s)", e.Mod.Version)
}
- if strings.Contains(e.pattern, "...") {
- return fmt.Sprintf("module %s@%s%s found, but does not contain packages matching %s", e.mod.Path, e.query, found, e.pattern)
+ if strings.Contains(e.Pattern, "...") {
+ return fmt.Sprintf("module %s@%s%s found, but does not contain packages matching %s", e.Mod.Path, e.Query, found, e.Pattern)
}
- return fmt.Sprintf("module %s@%s%s found, but does not contain package %s", e.mod.Path, e.query, found, e.pattern)
+ return fmt.Sprintf("module %s@%s%s found, but does not contain package %s", e.Mod.Path, e.Query, found, e.Pattern)
}
// ModuleHasRootPackage returns whether module m contains a package m.Path.
{path: queryRepo, query: ">=v0.0.0", vers: "v0.0.0"},
{path: queryRepo, query: "v0.0.1", vers: "v0.0.1"},
{path: queryRepo, query: "v0.0.1+foo", vers: "v0.0.1"},
- {path: queryRepo, query: "v0.0.99", err: `unknown revision v0.0.99`},
+ {path: queryRepo, query: "v0.0.99", err: `vcs-test.golang.org/git/querytest.git@v0.0.99: invalid version: unknown revision v0.0.99`},
{path: queryRepo, query: "v0", vers: "v0.3.0"},
{path: queryRepo, query: "v0.1", vers: "v0.1.2"},
{path: queryRepo, query: "v0.2", err: `no matching versions for query "v0.2"`},
// unconditionally).
{path: queryRepo, query: "42abcb6df8ee", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"},
- {path: queryRepo, query: "v1.9.10-pre2+wrongmetadata", err: `unknown revision v1.9.10-pre2+wrongmetadata`},
- {path: queryRepo, query: "v1.9.10-pre2", err: `unknown revision v1.9.10-pre2`},
+ {path: queryRepo, query: "v1.9.10-pre2+wrongmetadata", err: `vcs-test.golang.org/git/querytest.git@v1.9.10-pre2+wrongmetadata: invalid version: unknown revision v1.9.10-pre2+wrongmetadata`},
+ {path: queryRepo, query: "v1.9.10-pre2", err: `vcs-test.golang.org/git/querytest.git@v1.9.10-pre2: invalid version: unknown revision v1.9.10-pre2`},
{path: queryRepo, query: "latest", vers: "v1.9.9"},
- {path: queryRepo, query: "latest", current: "v1.9.10-pre1", vers: "v1.9.10-pre1"},
- {path: queryRepo, query: "latest", current: "v1.9.10-pre2+metadata", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"},
- {path: queryRepo, query: "latest", current: "v0.0.0-20190513201126-42abcb6df8ee", vers: "v0.0.0-20190513201126-42abcb6df8ee"},
- {path: queryRepo, query: "latest", allow: "NOMATCH", err: `no matching versions for query "latest"`},
- {path: queryRepo, query: "latest", current: "v1.9.9", allow: "NOMATCH", err: `no matching versions for query "latest" (current version is v1.9.9)`},
- {path: queryRepo, query: "latest", current: "v1.99.99", err: `unknown revision v1.99.99`},
+ {path: queryRepo, query: "latest", current: "v1.9.10-pre1", vers: "v1.9.9"},
+ {path: queryRepo, query: "upgrade", vers: "v1.9.9"},
+ {path: queryRepo, query: "upgrade", current: "v1.9.10-pre1", vers: "v1.9.10-pre1"},
+ {path: queryRepo, query: "upgrade", current: "v1.9.10-pre2+metadata", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"},
+ {path: queryRepo, query: "upgrade", current: "v0.0.0-20190513201126-42abcb6df8ee", vers: "v0.0.0-20190513201126-42abcb6df8ee"},
+ {path: queryRepo, query: "upgrade", allow: "NOMATCH", err: `no matching versions for query "upgrade"`},
+ {path: queryRepo, query: "upgrade", current: "v1.9.9", allow: "NOMATCH", err: `no matching versions for query "upgrade" (current version is v1.9.9)`},
+ {path: queryRepo, query: "upgrade", current: "v1.99.99", err: `vcs-test.golang.org/git/querytest.git@v1.99.99: invalid version: unknown revision v1.99.99`},
{path: queryRepo, query: "patch", current: "", vers: "v1.9.9"},
{path: queryRepo, query: "patch", current: "v0.1.0", vers: "v0.1.2"},
{path: queryRepo, query: "patch", current: "v1.9.0", vers: "v1.9.9"},
{path: queryRepoV2, query: "v2.6.0-pre1", vers: "v2.6.0-pre1"},
{path: queryRepoV2, query: "latest", vers: "v2.5.5"},
- {path: queryRepoV3, query: "e0cf3de987e6", vers: "v3.0.0-20180704024501-e0cf3de987e6"},
- {path: queryRepoV3, query: "latest", vers: "v3.0.0-20180704024501-e0cf3de987e6"},
+ // e0cf3de987e6 is the latest commit on the master branch, and it's actually
+ // v1.19.10-pre1, not anything resembling v3: attempting to query it as such
+ // should fail.
+ {path: queryRepoV3, query: "e0cf3de987e6", err: `vcs-test.golang.org/git/querytest.git/v3@v3.0.0-20180704024501-e0cf3de987e6: invalid version: go.mod has non-.../v3 module path "vcs-test.golang.org/git/querytest.git" (and .../v3/go.mod does not exist) at revision e0cf3de987e6`},
+ {path: queryRepoV3, query: "latest", err: `no matching versions for query "latest"`},
{path: emptyRepo, query: "latest", vers: "v0.0.0-20180704023549-7bb914627242"},
{path: emptyRepo, query: ">v0.0.0", err: `no matching versions for query ">v0.0.0"`},
ok, _ := path.Match(allow, m.Version)
return ok
}
+ tt := tt
t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.query+"/"+tt.current+"/"+allow, func(t *testing.T) {
+ t.Parallel()
+
info, err := Query(tt.path, tt.query, tt.current, allowed)
if tt.err != "" {
if err == nil {
}
}
- // Construct the list by traversing the graph again, replacing older
- // modules with required minimum versions.
+ // The final list is the minimum version of each module found in the graph.
+
if v := min[target.Path]; v != target.Version {
// TODO(jayconrod): there is a special case in modload.mvsReqs.Max
// that prevents us from selecting a newer version of a module
}
list := []module.Version{target}
- listed := map[string]bool{target.Path: true}
- for i := 0; i < len(list); i++ {
- n := modGraph[list[i]]
+ for path, vers := range min {
+ if path != target.Path {
+ list = append(list, module.Version{Path: path, Version: vers})
+ }
+
+ n := modGraph[module.Version{Path: path, Version: vers}]
required := n.required
for _, r := range required {
v := min[r.Path]
if r.Path != target.Path && reqs.Max(v, r.Version) != v {
panic(fmt.Sprintf("mistake: version %q does not satisfy requirement %+v", v, r)) // TODO: Don't panic.
}
- if !listed[r.Path] {
- list = append(list, module.Version{Path: r.Path, Version: v})
- listed[r.Path] = true
- }
}
}
}
// Walk modules in reverse post-order, only adding those not implied already.
- have := map[string]string{}
+ have := map[module.Version]bool{}
walk = func(m module.Version) error {
- if v, ok := have[m.Path]; ok && reqs.Max(m.Version, v) == v {
+ if have[m] {
return nil
}
- have[m.Path] = m.Version
+ have[m] = true
for _, m1 := range reqCache[m] {
walk(m1)
}
// Older version.
continue
}
- if have[m.Path] != m.Version {
+ if !have[m] {
min = append(min, m)
walk(m)
}
G1: C4
A2: B1 C4 D4
build A: A B1 C2 D4 E2 F1
-upgrade* A: A B1 C4 D5 E2 G1
+upgrade* A: A B1 C4 D5 E2 F1 G1
upgrade A C4: A B1 C4 D4 E2 F1 G1
downgrade A2 D2: A2 C4 D2
B1: D3
C2: B2
B2:
-build A: A B2 C2
+build A: A B2 C2 D3
# Cross-dependency between D and E.
# No matter how it arises, should get result of merging all build lists via max,
E1: D2
build A: A B C D2 E2
-# Upgrade from B1 to B2 should drop the transitive dep on D.
+# golang.org/issue/31248:
+# Even though we select X2, the requirement on I1
+# via X1 should be preserved.
+name: cross8
+M: A1 B1
+A1: X1
+B1: X2
+X1: I1
+X2:
+build M: M A1 B1 I1 X2
+
+# Upgrade from B1 to B2 should not drop the transitive dep on D.
name: drop
A: B1 C1
B1: D1
C2:
D2:
build A: A B1 C1 D1
-upgrade* A: A B2 C2
+upgrade* A: A B2 C2 D2
name: simplify
A: B1 C1
B1: C2
C1: D1
C2:
-build A: A B1 C2
+build A: A B1 C2 D1
name: up1
A: B1 C1
upgrade A B2: A B2
upgrade* A: A B3
+# golang.org/issue/29773:
# Requirements of older versions of the target
-# must not be carried over.
+# must be carried over.
name: cycle2
A: B1
A1: C1
C1: A2
C2:
D2:
-build A: A B1
-upgrade* A: A B2
+build A: A B1 C1 D1
+upgrade* A: A B2 C2 D2
# Requirement minimization.
req A: G1
req A G: G1
req A H: H1
+
+name: req3
+M: A1 B1
+A1: X1
+B1: X2
+X1: I1
+X2:
+req M: A1 B1
`
func Test(t *testing.T) {
if !cfg.BuildN {
// writeTestmain writes _testmain.go,
// using the test description gathered in t.
- if err := ioutil.WriteFile(testDir+"_testmain.go", pmain.Internal.TestmainGo, 0666); err != nil {
+ if err := ioutil.WriteFile(testDir+"_testmain.go", *pmain.Internal.TestmainGo, 0666); err != nil {
return nil, nil, nil, err
}
}
return false
}
+ if a.Package.Root == "" {
+ // Caching does not apply to tests outside of any module, GOPATH, or GOROOT.
+ if cache.DebugTest {
+ fmt.Fprintf(os.Stderr, "testcache: caching disabled for package outside of module root, GOPATH, or GOROOT: %s\n", a.Package.ImportPath)
+ }
+ c.disableCache = true
+ return false
+ }
+
var cacheArgs []string
for _, arg := range testArgs {
i := strings.Index(arg, "=")
if !filepath.IsAbs(name) {
name = filepath.Join(pwd, name)
}
- if !inDir(name, a.Package.Root) {
- // Do not recheck files outside the GOPATH or GOROOT root.
+ if a.Package.Root == "" || !inDir(name, a.Package.Root) {
+ // Do not recheck files outside the module, GOPATH, or GOROOT root.
break
}
fmt.Fprintf(h, "stat %s %x\n", name, hashStat(name))
if !filepath.IsAbs(name) {
name = filepath.Join(pwd, name)
}
- if !inDir(name, a.Package.Root) {
- // Do not recheck files outside the GOPATH or GOROOT root.
+ if a.Package.Root == "" || !inDir(name, a.Package.Root) {
+ // Do not recheck files outside the module, GOPATH, or GOROOT root.
break
}
fh, err := hashOpen(name)
}
func (x *elfExe) DataStart() uint64 {
+ for _, s := range x.f.Sections {
+ if s.Name == ".go.buildinfo" {
+ return s.Addr
+ }
+ }
for _, p := range x.f.Progs {
if p.Type == elf.PT_LOAD && p.Flags&(elf.PF_X|elf.PF_W) == elf.PF_W {
return p.Vaddr
}
func (x *machoExe) DataStart() uint64 {
- // Assume data is first non-empty writable segment.
+ // Look for section named "__go_buildinfo".
+ for _, sec := range x.f.Sections {
+ if sec.Name == "__go_buildinfo" {
+ return sec.Addr
+ }
+ }
+ // Try the first non-empty writable segment.
const RW = 3
for _, load := range x.f.Loads {
seg, ok := load.(*macho.Segment)
If the arguments to build are a list of .go files from a single directory,
build treats them as a list of source files specifying a single package.
+When compiling packages, build ignores files that end in '_test.go'.
+
When compiling a single main package, build writes
the resulting executable to an output file named after
the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe')
build compiles the packages but discards the resulting object,
serving only as a check that the packages can be built.
-When compiling packages, build ignores files that end in '_test.go'.
-
The -o flag forces build to write the resulting executable or object
to the named output file or directory, instead of the default behavior described
in the last two paragraphs. If the named output is a directory that exists,
// On the development branch, use the content ID part of the build ID.
id = contentID(f[len(f)-1])
} else {
- // For a release, the output is like: "compile version go1.9.1". Use the whole line.
- id = f[2]
+ // For a release, the output is like: "compile version go1.9.1 X:framepointer".
+ // Use the whole line.
+ id = strings.TrimSpace(line)
}
b.id.Lock()
}
}
- // Write out the _testinginit.go file for any test packages that import "testing".
- if a.Package.Internal.TestinginitGo != nil {
- initfile := objdir + "_testinginit.go"
- if err := b.writeFile(initfile, a.Package.Internal.TestinginitGo); err != nil {
- return err
- }
- gofiles = append([]string{initfile}, gofiles...)
- }
-
// Run cgo.
if a.Package.UsesCgo() || a.Package.UsesSwig() {
// In a package using cgo, cgo compiles the C, C++ and assembly files with gcc.
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
package flag_test
import (
"flag"
+ "log"
"testing"
)
var v = flag.Int("v", 0, "v flag")
-// Run this as go test pkg -args -v=7
+// Run this as go test pkg -v=7
func TestVFlagIsSet(t *testing.T) {
if *v != 7 {
- t.Fatal("v flag not set")
+ log.Fatal("v flag not set")
}
}
example.com/badchain/c v1.1.0
-- .mod --
-module example.com/badchain/wrong
+module badchain.example.com/c
-- .info --
{"Version":"v1.1.0"}
-- c.go --
env GO111MODULE=off
+env GODEBUG=gocachetest=1
[!gc] skip
[short] skip # clears cache, rebuilds too much
stderr '\d+ symbols' # from linker
# Running a test should run the compiler, linker, and the test the first time.
-go test -v -x -gcflags=-m -ldflags=-v p_test.go
+go test -v -x -gcflags=-m -ldflags=-v p
stderr 'compile( |\.exe"?)'
stderr 'p_test.go:.*can inline Test' # from compile of p_test
stderr 'testmain\.go:.*inlin' # from compile of testmain
stdout 'TEST' # from test
# ... but not the second, even though it still prints the compiler, linker, and test output.
-go test -v -x -gcflags=-m -ldflags=-v p_test.go
+go test -v -x -gcflags=-m -ldflags=-v p
! stderr 'compile( |\.exe"?)'
stderr 'p_test.go:.*can inline Test' # from compile of p_test
stderr 'testmain\.go:.*inlin' # from compile of testmain
package main
func main() {}
--- p_test.go --
+-- p/p_test.go --
package p
import "testing"
func Test(t *testing.T) {println("TEST")}
--- /dev/null
+env GO111MODULE=on
+
+# Regression test for golang.org/issue/31031:
+# Importing or loading a non-existent package in cmd/ should print
+# a clear error in module mode.
+
+! go list cmd/unknown
+stderr '^can''t load package: package cmd/unknown: package cmd/unknown is not in GOROOT \('$GOROOT'[/\\]src[/\\]cmd[/\\]unknown\)$'
+
+go list -f '{{range .DepsErrors}}{{.Err}}{{end}}' x.go
+stdout '^package cmd/unknown is not in GOROOT \('$GOROOT'[/\\]src[/\\]cmd[/\\]unknown\)$'
+
+-- x.go --
+package x
+
+import _ "cmd/unknown"
env GO111MODULE=on
[short] skip
+# Check when module x is inside GOPATH/src.
go doc y
stdout 'Package y is.*alphabet'
stdout 'import "x/y"'
go doc quote
stdout 'Package quote collects pithy sayings.'
-# Double-check go doc y when y is not in GOPATH/src.
-env GOPATH=$WORK/altgopath
+# Double-check when module x is outside GOPATH/src.
+env GOPATH=$WORK/emptygopath
go doc x/y
stdout 'Package y is.*alphabet'
go doc y
stdout 'Package y is.*alphabet'
+# Triple-check when module x is outside GOPATH/src,
+# but other packages with same import paths are in GOPATH/src.
+# Since go doc is running in module mode here, packages in active module
+# should be preferred over packages in GOPATH. See golang.org/issue/28992.
+env GOPATH=$WORK/gopath2
+go doc x/y
+! stdout 'Package y is.*GOPATH'
+stdout 'Package y is.*alphabet'
+go doc rsc.io/quote
+! stdout 'Package quote is located in a GOPATH workspace.'
+stdout 'Package quote collects pithy sayings.'
+
-- go.mod --
module x
require rsc.io/quote v1.5.2
-- x.go --
package x
+
+-- $WORK/gopath2/src/x/y/y.go --
+// Package y is located in a GOPATH workspace.
+package y
+-- $WORK/gopath2/src/rsc.io/quote/quote.go --
+// Package quote is located in a GOPATH workspace.
+package quote
+
+// Hello is located in a GOPATH workspace.
+func Hello() string { return "" }
--- /dev/null
+env GO111MODULE=on
+
+# golang.org/issue/32917 and golang.org/issue/28459: 'go build' and 'go test'
+# in an empty directory should refer to the path '.' and should not attempt
+# to resolve an external module.
+cd dir
+! go get .
+stderr 'go get \.: path .* is not a package in module rooted at .*[/\\]dir$'
+! go list
+! stderr 'cannot find module providing package'
+stderr '^can.t load package: package \.: no Go files in '$WORK'[/\\]gopath[/\\]src[/\\]dir$'
+
+cd subdir
+! go list
+! stderr 'cannot find module providing package'
+stderr '^can.t load package: package \.: no Go files in '$WORK'[/\\]gopath[/\\]src[/\\]dir[/\\]subdir$'
+cd ..
+
+# golang.org/issue/30590: if a package is found in the filesystem
+# but is not in the main module, the error message should not say
+# "cannot find module providing package", and we shouldn't try
+# to find a module providing the package.
+! go list ./othermodule
+! stderr 'cannot find module providing package'
+stderr 'go: directory othermodule is outside main module'
+
+-- dir/go.mod --
+module example.com
+go 1.13
+-- dir/subdir/README --
+There are no Go source files in this directory.
+-- dir/othermodule/go.mod --
+module example.com/othermodule
+go 1.13
+-- dir/othermodule/om.go --
+package othermodule
stdout '"Error": ".*this.domain.is.invalid.*"'
# download -json with version should print JSON
+# and download the .info file for the 'latest' version.
go mod download -json 'rsc.io/quote@<=v1.5.0'
stdout '^\t"Path": "rsc.io/quote"'
stdout '^\t"Version": "v1.5.0"'
stdout '^\t"GoModSum": "h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe\+TKr0="'
! stdout '"Error"'
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info
+
# download queries above should not have added to go.mod.
go list -m all
! stdout rsc.io
# add to go.mod so we can test non-query downloads
go mod edit -require rsc.io/quote@v1.5.2
-! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info
! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod
! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip
go mod download -json rsc.io/quote@v1.5.1
exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.1.zip
+# download reports errors encountered when locating modules
+! go mod download bad/path
+stderr '^module bad/path: not a known dependency$'
+! go mod download bad/path@latest
+stderr '^bad/path@latest: malformed module path "bad/path": missing dot in first path element$'
+! go mod download rsc.io/quote@v1.999.999
+stderr '^rsc.io/quote@v1.999.999: reading .*/v1.999.999.info: 404 Not Found$'
+! go mod download -json bad/path
+stdout '^\t"Error": "module bad/path: not a known dependency"'
+
# allow go mod download without go.mod
env GO111MODULE=auto
rm go.mod
--- /dev/null
+env GO111MODULE=on
+
+# If the module is the latest version of itself,
+# the Latest field should be set.
+go mod download -json rsc.io/quote@v1.5.2
+stdout '"Latest":\s*true'
+
+# If the module is older than latest, the field should be unset.
+go mod download -json rsc.io/quote@v1.5.1
+! stdout '"Latest":'
+
+# If the module is newer than "latest", the field should be unset...
+go mod download -json rsc.io/quote@v1.5.3-pre1
+! stdout '"Latest":'
+
+# ...even if that version is also what is required by the main module.
+go mod init example.com
+go mod edit -require rsc.io/quote@v1.5.3-pre1
+go mod download -json rsc.io/quote@v1.5.3-pre1
+! stdout '"Latest":'
! go build -mod=readonly ./nonexist
! stderr 'import lookup disabled'
-stderr 'unknown import path "m/nonexist": cannot find package'
+stderr '^can.t load package: package ./nonexist: cannot find package "." in:\n\t'$WORK'[/\\]gopath[/\\]src[/\\]x[/\\]nonexist$'
! go build -mod=readonly ./go.mod
! stderr 'import lookup disabled'
-stderr 'unknown import path "m/go.mod": cannot find package'
+stderr 'can.t load package: package ./go.mod: cannot find package'
-- x/go.mod --
module m
--- /dev/null
+env GO111MODULE=on
+
+[!net] skip
+
+env GOPROXY=https://proxy.golang.org,direct
+env GOSUMDB=off
+
+go get -x -v -d golang.org/x/tools/cmd/goimports
+stderr '# get https://proxy.golang.org/golang.org/x/tools/@latest'
+! stderr '# get https://golang.org'
# @patch and @latest within the main module refer to the current version.
# The main module won't be upgraded, but missing dependencies will be added.
cp go.mod.orig go.mod
-go get -d rsc.io/x@latest
+go get -d rsc.io/x
+grep 'rsc.io/quote v1.5.2' go.mod
+go get -d rsc.io/x@upgrade
grep 'rsc.io/quote v1.5.2' go.mod
cp go.mod.orig go.mod
go get -d rsc.io/x@patch
grep 'rsc.io/quote v1.5.2' go.mod
cp go.mod.orig go.mod
+# The main module cannot be updated to @latest, which is a specific version.
+! go get -d rsc.io/x@latest
+stderr '^go get rsc.io/x@latest: can.t request explicit version of path in main module$'
+
# The main module cannot be updated to a specific version.
! go get rsc.io/x@v0.1.0
stderr '^go get rsc.io/x@v0.1.0: can.t request explicit version of path in main module$'
env GO111MODULE=on
# Download modules to avoid stderr chatter
+go mod download example.com@v1.0.0
go mod download example.com/newcycle/a@v1.0.0
go mod download example.com/newcycle/a@v1.0.1
go mod download example.com/newcycle/b@v1.0.0
cmp stderr stderr-expected
-- stderr-expected --
+go: finding example.com/newcycle v1.0.0
go get: inconsistent versions:
example.com/newcycle/a@v1.0.0 requires example.com/newcycle/a@v1.0.1 (not example.com/newcycle/a@v1.0.0)
cp go.mod.orig go.mod
! go get -d rsc.io/quote/x...
-stderr 'go get rsc.io/quote/x...: module rsc.io/quote@latest \(v1.5.2\) found, but does not contain packages matching rsc.io/quote/x...'
+stderr 'go get rsc.io/quote/x...: module rsc.io/quote@upgrade \(v1.5.2\) found, but does not contain packages matching rsc.io/quote/x...'
! grep 'require rsc.io/quote' go.mod
! go get -d rsc.io/quote/x/...
-stderr 'go get rsc.io/quote/x/...: module rsc.io/quote@latest \(v1.5.2\) found, but does not contain packages matching rsc.io/quote/x/...'
+stderr 'go get rsc.io/quote/x/...: module rsc.io/quote@upgrade \(v1.5.2\) found, but does not contain packages matching rsc.io/quote/x/...'
! grep 'require rsc.io/quote' go.mod
# If a pattern matches no packages within a module, the module should not
[!exec:svn] skip
env GO111MODULE=on
-env GOPROXY=direct # obtain llvm.org directory, not via svn.
+env GOPROXY=direct
+env GOSUMDB=off
# Attempting to get a module zip using svn should fail with a reasonable
# message instead of a panic.
# TODO(golang.org/issue/26092): Really, it shouldn't fail at all.
-! go get -d llvm.org/llvm/bindings/go/llvm
+! go get -d vcs-test.golang.org/svn/hello.svn
stderr 'ReadZip not implemented for svn'
! go install .
stderr 'ReadZip not implemented for svn'
+# Attempting to get a nonexistent module using svn should fail with a
+# reasonable message instead of a panic.
+! go get -d vcs-test.golang.org/svn/nonexistent.svn
+! stderr panic
+stderr 'go get vcs-test.golang.org/svn/nonexistent.svn: no matching versions for query "upgrade"'
+
-- go.mod --
module golang/go/issues/28943/main
-- main.go --
package main
-import _ "llvm.org/llvm/bindings/go/llvm"
+import _ "vcs-test.golang.org/svn/hello.svn"
func main() {}
# The v0.1.1 pseudo-version is semantically higher than the latest tag.
# The v0.0.0 pseudo-version is chronologically newer.
-# 'get -u' should not downgrade to the (lower) tagged version.
+# Start at v0.1.1-0.20190429073117-b5426c86b553
go get -d example.com/pseudoupgrade@b5426c8
+go list -m -u all
+stdout '^example.com/pseudoupgrade v0.1.1-0.20190429073117-b5426c86b553$'
+
+# 'get -u' should not downgrade to the (lower) tagged version.
go get -d -u
go list -m -u all
stdout '^example.com/pseudoupgrade v0.1.1-0.20190429073117-b5426c86b553$'
-# 'get example.com/pseudoupgrade@latest' should not downgrade to
-# the (lower) tagged version.
-go get -d example.com/pseudoupgrade@latest
+# 'get example.com/pseudoupgrade@upgrade' should not downgrade.
+go get -d example.com/pseudoupgrade@upgrade
go list -m all
stdout '^example.com/pseudoupgrade v0.1.1-0.20190429073117-b5426c86b553$'
+# 'get example.com/pseudoupgrade' should not downgrade.
+# This is equivalent to 'get example.com/pseudoupgrade@upgrade'.
+go get -d example.com/pseudoupgrade
+go list -m all
+stdout '^example.com/pseudoupgrade v0.1.1-0.20190429073117-b5426c86b553$'
+
+# 'get example.com/pseudoupgrade@latest' should downgrade.
+# @latest should not consider the current version.
+go get -d example.com/pseudoupgrade@latest
+go list -m all
+stdout '^example.com/pseudoupgrade v0.1.0$'
+
# We should observe the same behavior with the newer pseudo-version.
go get -d example.com/pseudoupgrade@v0.0.0-20190430073000-30950c05d534
go list -m -u all
stdout '^example.com/pseudoupgrade v0.0.0-20190430073000-30950c05d534$'
-# 'get example.com/pseudoupgrade@latest' should not downgrade to the
-# chronologically older tagged version.
-go get -d example.com/pseudoupgrade@latest
+# 'get example.com/pseudoupgrade@upgrade should not downgrade.
+go get -d example.com/pseudoupgrade@upgrade
go list -m -u all
stdout '^example.com/pseudoupgrade v0.0.0-20190430073000-30950c05d534$'
+# 'get example.com/pseudoupgrade' should not downgrade.
+go get -d example.com/pseudoupgrade
+go list -m -u all
+stdout '^example.com/pseudoupgrade v0.0.0-20190430073000-30950c05d534$'
+
+# 'get example.com/pseudoupgrade@latest' should downgrade.
+go get -d example.com/pseudoupgrade@latest
+go list -m -u all
+stdout '^example.com/pseudoupgrade v0.1.0$'
+
-- go.mod --
module x
"go/build"
"log"
"os"
+ "path/filepath"
"strings"
)
func main() {
- p, err := build.Import(os.Args[1], os.Args[2], 0)
+ // build.Import should support relative and absolute source dir paths.
+ path := os.Args[1]
+ srcDir := os.Args[2]
+ p1, err := build.Import(path, srcDir, 0)
if err != nil {
log.Fatal(err)
}
- fmt.Printf("%s\n%s\n", p.Dir, strings.Join(p.GoFiles, " "))
+ absSrcDir, err := filepath.Abs(srcDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+ p2, err := build.Import(path, absSrcDir, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if p1.Dir != p2.Dir {
+ log.Fatalf("different packages loaded with relative and absolute paths:\n\t%s\n\t%s", p1.Dir, p2.Dir)
+ }
+
+ fmt.Printf("%s\n%s\n", p1.Dir, strings.Join(p1.GoFiles, " "))
}
-- $GOPATH/other/go.mod --
--- /dev/null
+env GO111MODULE=on
+
+# golang.org/issue/31248: module requirements imposed by dependency versions
+# older than the selected version must still be taken into account.
+
+env GOFLAGS=-mod=readonly
+
+# Indirect dependencies required via older-than-selected versions must exist in
+# the module graph, but do not need to be listed explicitly in the go.mod file
+# (since they are implied).
+go mod graph
+stdout i@v0.1.0
+
+# The modules must also appear in the build list, not just the graph.
+go list -m all
+stdout '^i v0.1.0'
+
+# The packages provided by those dependencies must resolve.
+go list all
+stdout '^i$'
+
+-- go.mod --
+module main
+
+go 1.13
+
+require (
+ a v0.0.0
+ b v0.0.0
+ c v0.0.0
+)
+
+// Apply replacements so that the test can be self-contained.
+// (It's easier to see all of the modules here than to go
+// rooting around in testdata/mod.)
+replace (
+ a => ./a
+ b => ./b
+ c => ./c
+ x v0.1.0 => ./x1
+ x v0.2.0 => ./x2
+ i => ./i
+)
+-- main.go --
+package main
+
+import (
+ _ "a"
+ _ "b"
+ _ "c"
+)
+
+func main() {}
+-- a/go.mod --
+module a
+go 1.13
+require x v0.1.0
+-- a/a.go --
+package a
+-- b/go.mod --
+module b
+go 1.13
+require x v0.2.0
+-- b/b.go --
+package b
+-- c/go.mod --
+module c
+go 1.13
+-- c/c.go --
+package c
+import _ "i"
+-- x1/go.mod --
+module x
+go1.13
+require i v0.1.0
+-- x2/go.mod --
+module x
+go1.13
+-- i/go.mod --
+-- i/i.go --
+package i
--- /dev/null
+env GO111MODULE=on
+
+# Regression test for golang.org/issue/29773: 'go list -m' was not following
+# dependencies through older versions of the main module.
+
+go list -f '{{with .Module}}{{.Path}}{{with .Version}} {{.}}{{end}}{{end}}' all
+cmp stdout pkgmods.txt
+
+go list -m all
+cmp stdout mods.txt
+
+go mod graph
+cmp stdout graph.txt
+
+-- go.mod --
+module golang.org/issue/root
+
+go 1.12
+
+replace (
+ golang.org/issue/mirror v0.1.0 => ./mirror-v0.1.0
+ golang.org/issue/pkg v0.1.0 => ./pkg-v0.1.0
+ golang.org/issue/root v0.1.0 => ./root-v0.1.0
+)
+
+require golang.org/issue/mirror v0.1.0
+
+-- root.go --
+package root
+
+import _ "golang.org/issue/mirror"
+
+-- mirror-v0.1.0/go.mod --
+module golang.org/issue/mirror
+
+require golang.org/issue/root v0.1.0
+
+-- mirror-v0.1.0/mirror.go --
+package mirror
+
+import _ "golang.org/issue/pkg"
+
+-- pkg-v0.1.0/go.mod --
+module golang.org/issue/pkg
+
+-- pkg-v0.1.0/pkg.go --
+package pkg
+
+-- root-v0.1.0/go.mod --
+module golang.org/issue/root
+
+require golang.org/issue/pkg v0.1.0
+
+-- pkgmods.txt --
+golang.org/issue/mirror v0.1.0
+golang.org/issue/pkg v0.1.0
+golang.org/issue/root
+-- mods.txt --
+golang.org/issue/root
+golang.org/issue/mirror v0.1.0 => ./mirror-v0.1.0
+golang.org/issue/pkg v0.1.0 => ./pkg-v0.1.0
+-- graph.txt --
+golang.org/issue/root golang.org/issue/mirror@v0.1.0
+golang.org/issue/mirror@v0.1.0 golang.org/issue/root@v0.1.0
+golang.org/issue/root@v0.1.0 golang.org/issue/pkg@v0.1.0
--- /dev/null
+env GO111MODULE=on
+
+# golang.org/issue/31248: loading the build list must not add explicit entries
+# for indirect dependencies already implied by older-than-selected versions
+# already in the build list.
+
+cp go.mod.orig go.mod
+go mod tidy
+cmp go.mod go.mod.orig
+
+cp go.mod.orig go.mod
+go list -m all
+cmp go.mod go.mod.orig
+
+-- go.mod.orig --
+module main
+
+go 1.13
+
+require a v0.0.0
+
+replace (
+ a v0.0.0 => ./a
+ b v0.0.0 => ./b
+ i v0.0.0 => ./i
+ x v0.1.0 => ./x1
+ x v0.2.0 => ./x2
+)
+-- main.go --
+package main
+
+import _ "a"
+
+func main() {}
+-- a/go.mod --
+module a
+go 1.13
+require (
+ x v0.2.0
+ b v0.0.0
+)
+-- a/a.go --
+package a
+-- b/go.mod --
+module b
+go 1.13
+require x v0.1.0
+-- x1/go.mod --
+module x
+go 1.13
+require (
+ b v0.0.0
+ i v0.0.0
+)
+-- x2/go.mod --
+module x
+go 1.13
+-- i/go.mod --
+module i
+go 1.13
go list -m golang.org/x/text
stdout 'golang.org/x/text v0.0.0-0.20170915032832-14c0d48ead0c => golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c'
+# A 'replace' directive can replace an invalid 'latest' version, and
+# should suppress errors for that version in 'go get -u'
+cp go.mod.orig go.mod
+go mod edit -require golang.org/x/text@v1.999999.0
+go mod edit -replace golang.org/x/text@v1.999999.0=golang.org/x/text@v0.0.0-20170915032832-14c0d48ead0c
+cd outside
+! go get -d golang.org/x/text@upgrade
+stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v1.999999.0: reading golang.org/x/text/go.mod at revision v1.999999.0: unknown revision v1.999999.0'
+cd ..
+go get -d golang.org/x/text@upgrade
+go list -m golang.org/x/text
+stdout 'golang.org/x/text v1.999999.0 => golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c'
+
# A pseudo-version derived from a non-ancestor tag is invalid.
cp go.mod.orig go.mod
go mod edit -require golang.org/x/text@v0.2.1-0.20170915032832-14c0d48ead0c
! go list -m golang.org/x/text
stderr 'golang.org/x/text@v0.2.1-0.20170915032832-14c0d48ead0c: invalid pseudo-version: revision 14c0d48ead0c is not a descendent of preceding tag \(v0.2.0\)'
+# A pseudo-version derived from a canonical tag on the same revision is invalid.
+cp go.mod.orig go.mod
+go mod edit -require golang.org/x/text@v0.2.1-0.20171213102548-c4d099d611ac
+cd outside
+! go list -m golang.org/x/text
+stderr 'go: example.com@v0.0.0 requires\n\tgolang.org/x/text@v0.2.1-0.20171213102548-c4d099d611ac: invalid pseudo-version: tag \(v0.2.0\) found on revision c4d099d611ac is already canonical, so should not be replaced with a pseudo-version derived from that tag'
+cd ..
+! go list -m golang.org/x/text
+stderr 'golang.org/x/text@v0.2.1-0.20171213102548-c4d099d611ac: invalid pseudo-version: tag \(v0.2.0\) found on revision c4d099d611ac is already canonical, so should not be replaced with a pseudo-version derived from that tag'
+
# A +incompatible suffix is not allowed on a version that is actually compatible.
cp go.mod.orig go.mod
go mod edit -require golang.org/x/text@v0.1.1-0.20170915032832-14c0d48ead0c+incompatible
stdout 'github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1'
cd ..
-# A +incompatible version for a module that has an explicit go.mod file is invalid.
+# A +incompatible pseudo-version for a module that has an explicit go.mod file is invalid.
cp go.mod.orig go.mod
-go mod edit -require github.com/pierrec/lz4@v2.0.9-0.20190131084431-473cd7ce01a1+incompatible
+go mod edit -require github.com/pierrec/lz4@v2.0.9-0.20190209155647-9a39efadad3d+incompatible
cd outside
! go list -m github.com/pierrec/lz4
-stderr 'go: example.com@v0.0.0 requires\n\tgithub.com/pierrec/lz4@v2.0.9-0.20190131084431-473cd7ce01a1\+incompatible: invalid version: \+incompatible suffix not allowed: module contains a go.mod file, so semantic import versioning is required'
+stderr 'go: example.com@v0.0.0 requires\n\tgithub.com/pierrec/lz4@v2.0.9-0.20190209155647-9a39efadad3d\+incompatible: invalid version: \+incompatible suffix not allowed: module contains a go.mod file, so semantic import versioning is required'
cd ..
! go list -m github.com/pierrec/lz4
-stderr 'github.com/pierrec/lz4@v2.0.9-0.20190131084431-473cd7ce01a1\+incompatible: invalid version: \+incompatible suffix not allowed: module contains a go.mod file, so semantic import versioning is required'
+stderr 'github.com/pierrec/lz4@v2.0.9-0.20190209155647-9a39efadad3d\+incompatible: invalid version: \+incompatible suffix not allowed: module contains a go.mod file, so semantic import versioning is required'
# A +incompatible pseudo-version is valid for a revision of the module
# that lacks a go.mod file.
# rsc.io/quote/buggy should not be listable as a module
go list -m -e -f '{{.Error.Err}}' nonexist rsc.io/quote/buggy
-stdout '^module "nonexist" is not a known dependency'
-stdout '^module "rsc.io/quote/buggy" is not a known dependency'
+stdout '^module nonexist: not a known dependency$'
+stdout '^module rsc.io/quote/buggy: not a known dependency$'
! go list -m nonexist rsc.io/quote/buggy
-stderr '^go list -m nonexist: module "nonexist" is not a known dependency'
-stderr '^go list -m rsc.io/quote/buggy: module "rsc.io/quote/buggy" is not a known dependency'
+stderr '^go list -m: module nonexist: not a known dependency'
+stderr '^go list -m: module rsc.io/quote/buggy: not a known dependency'
# Module loader does not interfere with list -e (golang.org/issue/24149).
go list -e -f '{{.Error.Err}}' database
--- /dev/null
+env GO111MODULE=on
+
+[short] skip
+
+# Regression test for golang.org/issue/29667:
+# spurious 'failed to cache compiled Go files' errors.
+# This test failed reliably when run with -count=10
+# on a Linux workstation.
+
+env GOCACHE=$WORK/gocache
+mkdir $GOCACHE
+
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+go list -json -compiled -test=false -export=false -deps=true -- . &
+
+wait
+
+-- go.mod --
+module sandbox/bar
+-- bar.go --
+package bar
+
+import "C"
go list -f '{{.ImportPath}}' .
stdout ^x$
! go list -f '{{.ImportPath}}' $GOPATH/pkg/mod/rsc.io/quote@v1.5.2
-stderr 'unknown import path "rsc.io/quote": cannot find package'
+stderr '^can.t load package: package '$WORK'[/\\]gopath/pkg/mod/rsc.io/quote@v1.5.2: can only use path@version syntax with .go get.'
+
go list -e -f '{{with .Error}}{{.}}{{end}}' $GOPATH/pkg/mod/rsc.io/quote@v1.5.2
-stdout 'unknown import path "rsc.io/quote": cannot find package'
+stdout '^package '$WORK'[/\\]gopath/pkg/mod/rsc.io/quote@v1.5.2: can only use path@version syntax with .go get.'
go mod download rsc.io/quote@v1.5.2
go list -f '{{.ImportPath}}' $GOPATH/pkg/mod/rsc.io/quote@v1.5.2
stdout '^rsc.io/quote$'
--- /dev/null
+env GO111MODULE=on
+env GOPROXY=direct
+env GOSUMDB=off
+
+[!net] skip
+[!exec:git] skip
+
+# golang.org/issue/33099: if an import path ends in a major-version suffix,
+# ensure that 'direct' mode can resolve the package to the module.
+# For a while, (*modfetch.codeRepo).Stat was not checking for a go.mod file,
+# which would produce a hard error at the subsequent call to GoMod.
+
+go list all
+
+-- go.mod --
+module example.com
+go 1.13
+
+-- main.go --
+package main
+
+import _ "vcs-test.golang.org/git/v3pkg.git/v3"
+
+func main() {}
go mod download
! go list $GOPATH/pkg/mod/rsc.io/quote@v1.5.2
-stderr 'outside available modules'
+stderr 'can only use path@version syntax with .go get.'
go list $GOPATH/pkg/mod/rsc.io/quote@v1.5.1
stdout 'rsc.io/quote'
env GO111MODULE=on
+# If the current version is not latest, 'go list -u' should include its upgrade.
go list -m -u all
stdout 'rsc.io/quote v1.2.0 \[v1\.5\.2\]'
+# If the current version is latest, 'go list -u' should omit the upgrade.
+go get -d rsc.io/quote@v1.5.2
+go list -m -u all
+stdout 'rsc.io/quote v1.5.2$'
+
+# If the current version is newer than latest, 'go list -u' should
+# omit the upgrade.
+go get -d rsc.io/quote@v1.5.3-pre1
+go list -m -u all
+stdout 'rsc.io/quote v1.5.3-pre1$'
+
+# If the current build list has a higher version and the user asks about
+# a lower one, -u should report the upgrade for the lower one
+# but leave the build list unchanged.
+go list -m -u rsc.io/quote@v1.5.1
+stdout 'rsc.io/quote v1.5.1 \[v1.5.2\]$'
+go list -m -u rsc.io/quote
+stdout 'rsc.io/quote v1.5.3-pre1$'
+
-- go.mod --
module x
require rsc.io/quote v1.2.0
func Test(t *testing.T) {}
-- update-main-expected --
go get: example.com/badchain/c@v1.0.0 updating to
- example.com/badchain/c@v1.1.0: parsing go.mod: unexpected module path "example.com/badchain/wrong"
+ example.com/badchain/c@v1.1.0: parsing go.mod:
+ module declares its path as: badchain.example.com/c
+ but was required as: example.com/badchain/c
-- update-a-expected --
go get: example.com/badchain/a@v1.1.0 requires
example.com/badchain/b@v1.1.0 requires
- example.com/badchain/c@v1.1.0: parsing go.mod: unexpected module path "example.com/badchain/wrong"
+ example.com/badchain/c@v1.1.0: parsing go.mod:
+ module declares its path as: badchain.example.com/c
+ but was required as: example.com/badchain/c
-- list-expected --
go: example.com/badchain/a@v1.1.0 requires
example.com/badchain/b@v1.1.0 requires
- example.com/badchain/c@v1.1.0: parsing go.mod: unexpected module path "example.com/badchain/wrong"
+ example.com/badchain/c@v1.1.0: parsing go.mod:
+ module declares its path as: badchain.example.com/c
+ but was required as: example.com/badchain/c
-- list-missing-expected --
go: m/use imports
- example.com/badchain/c: example.com/badchain/c@v1.1.0: parsing go.mod: unexpected module path "example.com/badchain/wrong"
+ example.com/badchain/c: example.com/badchain/c@v1.1.0: parsing go.mod:
+ module declares its path as: badchain.example.com/c
+ but was required as: example.com/badchain/c
-- list-missing-test-expected --
go: m/testuse tested by
m/testuse.test imports
- example.com/badchain/c: example.com/badchain/c@v1.1.0: parsing go.mod: unexpected module path "example.com/badchain/wrong"
+ example.com/badchain/c: example.com/badchain/c@v1.1.0: parsing go.mod:
+ module declares its path as: badchain.example.com/c
+ but was required as: example.com/badchain/c
stdout 'rsc.io/quote v1.5.2$'
! go list -m rsc.io/quote@>v1.5.3
-stderr 'go list -m rsc.io/quote: no matching versions for query ">v1.5.3"'
+stderr 'go list -m: module rsc.io/quote: no matching versions for query ">v1.5.3"'
go list -m -e -f '{{.Error.Err}}' rsc.io/quote@>v1.5.3
stdout 'no matching versions for query ">v1.5.3"'
! stdout 'example.com/join/subpkg'
stdout 'example.com/join v1.1.0'
+# If the proxy provides an empty @v/list but rejects @latest with
+# some other explicit error (for example, a "permission denied" error),
+# that error should be reported to the user (and override a successful
+# result for other possible module paths).
+#
+# Depending on how the specific platform enforces permissions, the 'go get' may
+# fail either due to the intended permission error or due to a parse error.
+# We accept either failure message.
+env GOPROXY=file:///$WORK/gatekeeper
+chmod 0000 $WORK/gatekeeper/example.com/join/subpkg/@latest
+cp go.mod.orig go.mod
+! go get -d example.com/join/subpkg
+stderr 'go get example.com/join/subpkg: module example.com/join/subpkg: (invalid character .+|reading file://.*/gatekeeper/example.com/join/subpkg/@latest: .+)'
+
-- go.mod.orig --
module example.com/othermodule
go 1.13
v1.1.0
-- $WORK/notfound/example.com/join/@v/v1.1.0.info --
{"Version": "v1.1.0"}
+-- $WORK/gatekeeper/example.com/join/subpkg/@v/list --
+-- $WORK/gatekeeper/example.com/join/subpkg/@latest --
+ERROR: Latest version is forbidden.
+-- $WORK/gatekeeper/example.com/join/@v/list --
+v1.1.0
+-- $WORK/gatekeeper/example.com/join/@v/v1.1.0.info --
+{"Version": "v1.1.0"}
cp go.mod.orig go.mod
env GOSUMDB=$sumdb' '$proxy/sumdb-wrong
! go get -d rsc.io/quote
-stderr 'verifying rsc.io/quote@v1.5.2/go.mod: checksum mismatch'
-stderr 'downloaded: h1:LzX7'
+stderr 'verifying rsc.io/quote@v1.5.2: checksum mismatch'
+stderr 'downloaded: h1:3fEy'
stderr 'localhost.localdev/sumdb: h1:wrong'
stderr 'SECURITY ERROR\nThis download does NOT match the one reported by the checksum server.'
! go get -d rsc.io/sampler
rm go.sum
env GOPROXY=off
go get -d rsc.io/quote@v1.5.2 # using cache
-rm $GOPATH/pkg/mod/download/cache/sumdb/localhost.localdev/sumdb/lookup/rsc.io/quote@v1.5.2
+rm $GOPATH/pkg/mod/cache/download/sumdb/localhost.localdev/sumdb/lookup/rsc.io/quote@v1.5.2
go get -d rsc.io/quote@v1.5.2 # using go.sum
# fetch fails once we lose access to both cache and go.sum
env GO111MODULE=on
env GOSUMDB=
+env GOPATH=$WORK/gopath1
# With a file-based proxy with an empty checksum directory,
# downloading a new module should fail, even if a subsequent
[!windows] env GOPROXY=file://$WORK/emptyproxy,https://proxy.golang.org
go get -d golang.org/x/text@v0.3.2
+# After a successful sumdb lookup, the lookup can be repeated
+# using the download cache as a proxy.
+cp supported $GOPATH/pkg/mod/cache/download/sumdb/sum.golang.org/supported
+[windows] env GOPROXY=file:///$WORK/gopath1/pkg/mod/cache/download,file:///$WORK/sumproxy
+[!windows] env GOPROXY=file://$WORK/gopath1/pkg/mod/cache/download,file://$WORK/sumproxy
+env GOPATH=$WORK/gopath2
+rm go.sum
+go get -d -x -v golang.org/x/text@v0.3.2
+
# Once the checksum is present in the go.sum file,
# an empty file-based sumdb can be used in conjunction with
# a fallback module mirror.
grep golang.org/x/text go.sum
-go clean -modcache
+env GOPATH=$WORK/gopath3
[windows] env GOPROXY=file:///$WORK/sumproxy
[!windows] env GOPROXY=file://$WORK/sumproxy
! go get -d golang.org/x/text@v0.3.2
[!windows] env GOPROXY=file://$WORK/sumproxy,https://proxy.golang.org
go get -d golang.org/x/text@v0.3.2
+-- supported --
+
-- go.mod --
module example.com
go 1.13
[!exec:git] skip
env GOSUMDB=sum.golang.org
env GOPROXY=direct
-go get -d rsc.io/quote
+go get -d rsc.io/quote@v1.5.2
+cp go.sum saved.sum
# download from proxy.golang.org with go.sum entry already
go clean -modcache
env GOSUMDB=
env GOPROXY=
-go get -x -d rsc.io/quote
+go get -x -d rsc.io/quote@v1.5.2
! stderr github
stderr proxy.golang.org/rsc.io/quote
! stderr sum.golang.org/tile
! stderr sum.golang.org/lookup/rsc.io/quote
+cmp go.sum saved.sum
-# download again, using checksum database to validate new go.sum lines
+# Download again.
+# Should use the checksum database to validate new go.sum lines,
+# but not need to fetch any new data from the proxy.
rm go.sum
-go get -x -d rsc.io/quote
+go get -x -d rsc.io/quote@v1.5.2
! stderr github
-stderr proxy.golang.org/rsc.io/quote
+! stderr proxy.golang.org/rsc.io/quote
stderr sum.golang.org/tile
stderr sum.golang.org/lookup/rsc.io/quote
+cmp go.sum saved.sum
# test fallback to direct
env TESTGOPROXY404=1
-go get -x -d rsc.io/quote
+go clean -modcache
+rm go.sum
+go get -x -d rsc.io/quote@v1.5.2
stderr 'proxy.golang.org.*404 testing'
stderr github.com/rsc
+cmp go.sum saved.sum
-- go.mod --
module m
# basic fetch (through proxy) works
cp go.mod.orig go.mod
go get -d rsc.io/fortune@v1.0.0 # note: must use test proxy, does not exist in real world
-rm $GOPATH/pkg/mod/download/cache/sumdb # rm sumdb cache but NOT package download cache
+rm $GOPATH/pkg/mod/cache/download/sumdb # rm sumdb cache but NOT package download cache
rm go.sum
# can fetch by explicit URL
cp go.mod.orig go.mod
env GOSUMDB=$sumdb' '$proxy/sumdb-direct
go get -d rsc.io/fortune@v1.0.0
-rm $GOPATH/pkg/mod/download/cache/sumdb
+rm $GOPATH/pkg/mod/cache/download/sumdb
rm go.sum
# direct access fails (because localhost.localdev does not exist)
env GOPROXY=direct
! go get -d rsc.io/fortune@v1.0.0
stderr 'verifying.*localhost.localdev'
-rm $GOPATH/pkg/mod/download/cache/sumdb
+rm $GOPATH/pkg/mod/cache/download/sumdb
rm go.sum
# proxy 404 falls back to direct access (which fails)
env GOPROXY=$proxy/sumdb-404
! go get -d rsc.io/fortune@v1.0.0
stderr 'verifying.*localhost.localdev'
-rm $GOPATH/pkg/mod/download/cache/sumdb
+rm $GOPATH/pkg/mod/cache/download/sumdb
rm go.sum
# proxy non-200/404/410 stops direct access
env GOPROXY=$proxy/sumdb-503
! go get -d rsc.io/fortune@v1.0.0
stderr '503 Service Unavailable'
-rm $GOPATH/pkg/mod/download/cache/sumdb
+rm $GOPATH/pkg/mod/cache/download/sumdb
rm go.sum
-- go.mod.orig --
--- /dev/null
+[short] skip
+
+env GO111MODULE=on
+env GOCACHE=$WORK/gocache
+env GODEBUG=gocachetest=1
+
+# The first run of a test should not be cached.
+# The second run should be.
+go test -run=WriteTmp .
+! stdout '(cached)'
+go test -run=WriteTmp .
+stdout '(cached)'
+
+# 'go test' without arguments should never be cached.
+go test -run=WriteTmp
+! stdout '(cached)'
+go test -run=WriteTmp
+! stdout '(cached)'
+
+# We should never cache a test run from command-line files.
+go test -run=WriteTmp ./foo_test.go
+! stdout '(cached)'
+go test -run=WriteTmp ./foo_test.go
+! stdout '(cached)'
+
+[!exec:sleep] stop
+# The go command refuses to cache access to files younger than 2s, so sleep that long.
+exec sleep 2
+
+# Touching a file that the test reads from within its testdata should invalidate the cache.
+go test -run=ReadTestdata .
+! stdout '(cached)'
+go test -run=ReadTestdata .
+stdout '(cached)'
+cp testdata/bar.txt testdata/foo.txt
+go test -run=ReadTestdata .
+! stdout '(cached)'
+
+-- go.mod --
+module golang.org/issue/29111/foo
+
+-- foo.go --
+package foo
+
+-- testdata/foo.txt --
+foo
+-- testdata/bar.txt --
+bar
+
+-- foo_test.go --
+package foo_test
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestWriteTmp(t *testing.T) {
+ dir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+ err = ioutil.WriteFile(filepath.Join(dir, "x"), nil, 0666)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestReadTestdata(t *testing.T) {
+ _, err := ioutil.ReadFile("testdata/foo.txt")
+ if err != nil {
+ t.Fatal(err)
+ }
+}
--- /dev/null
+env GO111MODULE=on
+
+# Regression test for golang.org/issue/27063:
+# 'go mod tidy' and 'go mod vendor' should not hide loading errors.
+
+! go mod tidy
+stderr '^issue27063 imports\n\tnonexist: malformed module path "nonexist": missing dot in first path element'
+stderr '^issue27063 imports\n\tnonexist.example.com: cannot find module providing package nonexist.example.com'
+stderr '^issue27063 imports\n\tissue27063/other imports\n\tother.example.com/nonexist: cannot find module providing package other.example.com/nonexist'
+
+! go mod vendor
+stderr '^issue27063 imports\n\tnonexist: malformed module path "nonexist": missing dot in first path element'
+stderr '^issue27063 imports\n\tnonexist.example.com: cannot find module providing package nonexist.example.com'
+stderr '^issue27063 imports\n\tissue27063/other imports\n\tother.example.com/nonexist: cannot find module providing package other.example.com/nonexist'
+
+-- go.mod --
+module issue27063
+
+go 1.13
+
+require issue27063/other v0.0.0
+replace issue27063/other => ./other
+-- x.go --
+package main
+
+import (
+ "nonexist"
+
+ "nonexist.example.com"
+ "issue27063/other"
+)
+
+func main() {}
+-- other/go.mod --
+module issue27063/other
+-- other/other.go --
+package other
+
+import "other.example.com/nonexist"
import _ "appengine"
import _ "appengine/datastore"
--- nonexistent.go --
-// +build alternatereality
-
-package m
-
-import _ "nonexistent.rsc.io"
-- mypkg/go.mod --
module me
-- mypkg/mydir/d.go --
--- /dev/null
+env GO111MODULE=on
+go mod init foo
+go test
+stdout ^ok\s+foo
+env GO111MODULE=off
+go test
+stdout ^ok\s+
+! stdout ^ok\s+(cache)$
+
+-- main_test.go --
+package main
+
+import "testing"
+
+func TestF(t *testing.T) {}
+++ /dev/null
-# Tests for automatic testing.Init calls when using 'go test'.
-
-env GO111MODULE=on
-
-# A TestMain should be able to access testing flags if it calls flag.Parse
-# without needing to use testing.Init.
-# Test code can use the name 'testing' without colliding with generated
-# testinginit code.
-# Tests running under 'go test' should observe that testing.Init is called
-# before any user package initialization code runs.
-go test
-stdout TestMain
-stdout TestInit
-stdout TestExt
-
--- go.mod --
-module m
-
--- init_test.go --
-package testinitflag
-
-import (
- "flag"
- "fmt"
- "os"
- Testing "testing"
-)
-
-func testFlagsInitialized() bool {
- found := false
- flag.VisitAll(func(f *flag.Flag) {
- if f.Name == "test.count" {
- found = true
- }
- })
- return found
-}
-
-var testing int
-var testingInitAtInitialization = testFlagsInitialized()
-
-func TestInit(t *Testing.T) {
- if !testingInitAtInitialization {
- t.Fatal("testing.Init not called before package initialization")
- }
- fmt.Printf("TestInit\n")
-}
-
-func TestMain(m *Testing.M) {
- fmt.Printf("TestMain\n")
- flag.Parse()
- if !testFlagsInitialized() {
- fmt.Println("testing flags not registered")
- os.Exit(1)
- }
- os.Exit(m.Run())
-}
-
--- external_test.go --
-package testinitflag_test
-
-import (
- "flag"
- "fmt"
- Testing "testing"
-)
-
-func testFlagsInitialized() bool {
- found := false
- flag.VisitAll(func(f *flag.Flag) {
- if f.Name == "test.count" {
- found = true
- }
- })
- return found
-}
-
-var testing int
-var testingInitAtInitialization = testFlagsInitialized()
-
-func TestExt(t *Testing.T) {
- fmt.Printf("TestExt\n")
- if !testingInitAtInitialization {
- t.Fatal("testing.Init not called before package initialization")
- }
-}
stdout '^\tpath\trsc.io/fortune'
stdout '^\tmod\trsc.io/fortune\tv1.0.0'
+go build -buildmode=pie -o external.exe rsc.io/fortune
+go version external.exe
+stdout '^external.exe: .+'
+go version -m external.exe
+stdout '^\tpath\trsc.io/fortune'
+stdout '^\tmod\trsc.io/fortune\tv1.0.0'
+
-- go.mod --
module m
--- /dev/null
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package standalone_testmain_flag_test
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "testing"
+)
+
+func TestMain(m *testing.M) {
+ // A TestMain should be able to access testing flags if it calls
+ // flag.Parse without needing to use testing.Init.
+ flag.Parse()
+ found := false
+ flag.VisitAll(func(f *flag.Flag) {
+ if f.Name == "test.count" {
+ found = true
+ }
+ })
+ if !found {
+ fmt.Println("testing flags not registered")
+ os.Exit(1)
+ }
+ os.Exit(m.Run())
+}
--- /dev/null
+package p
+
+const (
+ // 0-octals
+ _ = 0
+ _ = 0123
+ _ = 0123456
+
+ _ = 0_123
+ _ = 0123_456
+
+ // decimals
+ _ = 1
+ _ = 1234
+ _ = 1234567
+
+ _ = 1_234
+ _ = 1_234_567
+
+ // hexadecimals
+ _ = 0x0
+ _ = 0x1234
+ _ = 0xcafef00d
+
+ _ = 0x0
+ _ = 0x1234
+ _ = 0xCAFEf00d
+
+ _ = 0x_0
+ _ = 0x_1234
+ _ = 0x_CAFE_f00d
+
+ // octals
+ _ = 0o0
+ _ = 0o1234
+ _ = 0o01234567
+
+ _ = 0o0
+ _ = 0o1234
+ _ = 0o01234567
+
+ _ = 0o_0
+ _ = 0o_1234
+ _ = 0o0123_4567
+
+ _ = 0o_0
+ _ = 0o_1234
+ _ = 0o0123_4567
+
+ // binaries
+ _ = 0b0
+ _ = 0b1011
+ _ = 0b00101101
+
+ _ = 0b0
+ _ = 0b1011
+ _ = 0b00101101
+
+ _ = 0b_0
+ _ = 0b10_11
+ _ = 0b_0010_1101
+
+ // decimal floats
+ _ = 0.
+ _ = 123.
+ _ = 0123.
+
+ _ = .0
+ _ = .123
+ _ = .0123
+
+ _ = 0e0
+ _ = 123e+0
+ _ = 0123e-1
+
+ _ = 0e-0
+ _ = 123e+0
+ _ = 0123e123
+
+ _ = 0.e+1
+ _ = 123.e-10
+ _ = 0123.e123
+
+ _ = .0e-1
+ _ = .123e+10
+ _ = .0123e123
+
+ _ = 0.0
+ _ = 123.123
+ _ = 0123.0123
+
+ _ = 0.0e1
+ _ = 123.123e-10
+ _ = 0123.0123e+456
+
+ _ = 1_2_3.
+ _ = 0_123.
+
+ _ = 0_0e0
+ _ = 1_2_3e0
+ _ = 0_123e0
+
+ _ = 0e-0_0
+ _ = 1_2_3e+0
+ _ = 0123e1_2_3
+
+ _ = 0.e+1
+ _ = 123.e-1_0
+ _ = 01_23.e123
+
+ _ = .0e-1
+ _ = .123e+10
+ _ = .0123e123
+
+ _ = 1_2_3.123
+ _ = 0123.01_23
+
+ // hexadecimal floats
+ _ = 0x0.p+0
+ _ = 0xdeadcafe.p-10
+ _ = 0x1234.p123
+
+ _ = 0x.1p-0
+ _ = 0x.deadcafep2
+ _ = 0x.1234p+10
+
+ _ = 0x0p0
+ _ = 0xdeadcafep+1
+ _ = 0x1234p-10
+
+ _ = 0x0.0p0
+ _ = 0xdead.cafep+1
+ _ = 0x12.34p-10
+
+ _ = 0xdead_cafep+1
+ _ = 0x_1234p-10
+
+ _ = 0x_dead_cafe.p-10
+ _ = 0x12_34.p1_2_3
+ _ = 0x1_2_3_4.p-1_2_3
+
+ // imaginaries
+ _ = 0i
+ _ = 0i
+ _ = 8i
+ _ = 0i
+ _ = 123i
+ _ = 123i
+ _ = 56789i
+ _ = 1234i
+ _ = 1234567i
+
+ _ = 0i
+ _ = 0i
+ _ = 8i
+ _ = 0i
+ _ = 123i
+ _ = 123i
+ _ = 56_789i
+ _ = 1_234i
+ _ = 1_234_567i
+
+ _ = 0.i
+ _ = 123.i
+ _ = 0123.i
+ _ = 000123.i
+
+ _ = 0e0i
+ _ = 123e0i
+ _ = 0123e0i
+ _ = 000123e0i
+
+ _ = 0.e+1i
+ _ = 123.e-1_0i
+ _ = 01_23.e123i
+ _ = 00_01_23.e123i
+
+ _ = 0b1010i
+ _ = 0b1010i
+ _ = 0o660i
+ _ = 0o660i
+ _ = 0xabcDEFi
+ _ = 0xabcDEFi
+ _ = 0xabcDEFp0i
+ _ = 0xabcDEFp0i
+)
--- /dev/null
+package p
+
+const (
+ // 0-octals
+ _ = 0
+ _ = 0123
+ _ = 0123456
+
+ _ = 0_123
+ _ = 0123_456
+
+ // decimals
+ _ = 1
+ _ = 1234
+ _ = 1234567
+
+ _ = 1_234
+ _ = 1_234_567
+
+ // hexadecimals
+ _ = 0x0
+ _ = 0x1234
+ _ = 0xcafef00d
+
+ _ = 0X0
+ _ = 0X1234
+ _ = 0XCAFEf00d
+
+ _ = 0X_0
+ _ = 0X_1234
+ _ = 0X_CAFE_f00d
+
+ // octals
+ _ = 0o0
+ _ = 0o1234
+ _ = 0o01234567
+
+ _ = 0O0
+ _ = 0O1234
+ _ = 0O01234567
+
+ _ = 0o_0
+ _ = 0o_1234
+ _ = 0o0123_4567
+
+ _ = 0O_0
+ _ = 0O_1234
+ _ = 0O0123_4567
+
+ // binaries
+ _ = 0b0
+ _ = 0b1011
+ _ = 0b00101101
+
+ _ = 0B0
+ _ = 0B1011
+ _ = 0B00101101
+
+ _ = 0b_0
+ _ = 0b10_11
+ _ = 0b_0010_1101
+
+ // decimal floats
+ _ = 0.
+ _ = 123.
+ _ = 0123.
+
+ _ = .0
+ _ = .123
+ _ = .0123
+
+ _ = 0e0
+ _ = 123e+0
+ _ = 0123E-1
+
+ _ = 0e-0
+ _ = 123E+0
+ _ = 0123E123
+
+ _ = 0.e+1
+ _ = 123.E-10
+ _ = 0123.e123
+
+ _ = .0e-1
+ _ = .123E+10
+ _ = .0123E123
+
+ _ = 0.0
+ _ = 123.123
+ _ = 0123.0123
+
+ _ = 0.0e1
+ _ = 123.123E-10
+ _ = 0123.0123e+456
+
+ _ = 1_2_3.
+ _ = 0_123.
+
+ _ = 0_0e0
+ _ = 1_2_3e0
+ _ = 0_123e0
+
+ _ = 0e-0_0
+ _ = 1_2_3E+0
+ _ = 0123E1_2_3
+
+ _ = 0.e+1
+ _ = 123.E-1_0
+ _ = 01_23.e123
+
+ _ = .0e-1
+ _ = .123E+10
+ _ = .0123E123
+
+ _ = 1_2_3.123
+ _ = 0123.01_23
+
+ // hexadecimal floats
+ _ = 0x0.p+0
+ _ = 0Xdeadcafe.p-10
+ _ = 0x1234.P123
+
+ _ = 0x.1p-0
+ _ = 0X.deadcafep2
+ _ = 0x.1234P+10
+
+ _ = 0x0p0
+ _ = 0Xdeadcafep+1
+ _ = 0x1234P-10
+
+ _ = 0x0.0p0
+ _ = 0Xdead.cafep+1
+ _ = 0x12.34P-10
+
+ _ = 0Xdead_cafep+1
+ _ = 0x_1234P-10
+
+ _ = 0X_dead_cafe.p-10
+ _ = 0x12_34.P1_2_3
+ _ = 0X1_2_3_4.P-1_2_3
+
+ // imaginaries
+ _ = 0i
+ _ = 00i
+ _ = 08i
+ _ = 0000000000i
+ _ = 0123i
+ _ = 0000000123i
+ _ = 0000056789i
+ _ = 1234i
+ _ = 1234567i
+
+ _ = 0i
+ _ = 0_0i
+ _ = 0_8i
+ _ = 0_000_000_000i
+ _ = 0_123i
+ _ = 0_000_000_123i
+ _ = 0_000_056_789i
+ _ = 1_234i
+ _ = 1_234_567i
+
+ _ = 0.i
+ _ = 123.i
+ _ = 0123.i
+ _ = 000123.i
+
+ _ = 0e0i
+ _ = 123e0i
+ _ = 0123E0i
+ _ = 000123E0i
+
+ _ = 0.e+1i
+ _ = 123.E-1_0i
+ _ = 01_23.e123i
+ _ = 00_01_23.e123i
+
+ _ = 0b1010i
+ _ = 0B1010i
+ _ = 0o660i
+ _ = 0O660i
+ _ = 0xabcDEFi
+ _ = 0XabcDEFi
+ _ = 0xabcDEFP0i
+ _ = 0XabcDEFp0i
+)
"math"
)
+import (
+ "fmt"
+ "math"
+)
+
import (
"fmt"
"io"
)
+import("fmt"
+"math")
+
import (
"fmt"
--- /dev/null
+//gofmt -r=a&&b!=2->a
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 18987.
+
+package p
+
+const _ = x != 1
--- /dev/null
+//gofmt -r=a&&b!=2->a
+
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 18987.
+
+package p
+
+const _ = x != 1 && x != 2
--- /dev/null
+package q
+
+import "p"
+
+type _ = int
+type a = struct{ x int }
+type b = p.B
+
+type (
+ _ = chan<- int
+ aa = interface{}
+ bb = p.BB
+)
+
+// TODO(gri) We may want to put the '=' into a separate column if
+// we have mixed (regular and alias) type declarations in a group.
+type (
+ _ chan<- int
+ _ = chan<- int
+ aa0 interface{}
+ aaa = interface{}
+ bb0 p.BB
+ bbb = p.BB
+)
--- /dev/null
+package q
+
+import "p"
+
+type _ = int
+type a = struct{ x int }
+type b = p.B
+
+type (
+ _ = chan<- int
+ aa = interface{}
+ bb = p.BB
+)
+
+// TODO(gri) We may want to put the '=' into a separate column if
+// we have mixed (regular and alias) type declarations in a group.
+type (
+ _ chan<- int
+ _ = chan<- int
+ aa0 interface{}
+ aaa = interface{}
+ bb0 p.BB
+ bbb = p.BB
+)
name = name[strings.LastIndex(name, `/`)+1:]
name = name[strings.LastIndex(name, `\`)+1:]
name = strings.TrimSuffix(name, ".exe")
+
+ // If there's an active experiment, include that,
+ // to distinguish go1.10.2 with an experiment
+ // from go1.10.2 without an experiment.
p := Expstring()
if p == DefaultExpstring() {
p = ""
// build ID of the binary, so that if the compiler is changed and
// rebuilt, we notice and rebuild all packages.
if s == "full" {
- // If there's an active experiment, include that,
- // to distinguish go1.10.2 with an experiment
- // from go1.10.2 without an experiment.
- if x := Expstring(); x != "" {
- p += " " + x
- }
if strings.HasPrefix(Version, "devel") {
p += " buildID=" + buildID
}
import (
"errors"
- "internal/oserror"
"internal/reflectlite"
"sync"
"time"
func (deadlineExceededError) Error() string { return "context deadline exceeded" }
func (deadlineExceededError) Timeout() bool { return true }
func (deadlineExceededError) Temporary() bool { return true }
-func (deadlineExceededError) Is(target error) bool {
- return target == oserror.ErrTimeout || target == oserror.ErrTemporary
-}
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
package context
import (
- "errors"
"fmt"
"math/rand"
- "os"
"runtime"
"strings"
"sync"
if !i.Timeout() {
t.Fatal("wrong value for timeout")
}
- if !errors.Is(DeadlineExceeded, os.ErrTimeout) {
- t.Fatal("errors.Is(DeadlineExceeded, os.ErrTimeout) = false, want true")
- }
}
}
// DecryptOAEP decrypts ciphertext using RSA-OAEP.
-
+//
// OAEP is parameterised by a hash function that is used as a random oracle.
// Encryption and decryption of a given message must use the same hash function
// and sha256.New() is a reasonable choice.
)
const (
- VersionSSL30 = 0x0300
VersionTLS10 = 0x0301
VersionTLS11 = 0x0302
VersionTLS12 = 0x0303
VersionTLS13 = 0x0304
+
+ // Deprecated: SSLv3 is cryptographically broken, and will be
+ // removed in Go 1.14. See golang.org/issue/32716.
+ VersionSSL30 = 0x0300
)
const (
func (c *Config) supportedVersions(isClient bool) []uint16 {
versions := make([]uint16, 0, len(supportedVersions))
for _, v := range supportedVersions {
+ // TLS 1.0 is the default minimum version.
+ if (c == nil || c.MinVersion == 0) && v < VersionTLS10 {
+ continue
+ }
if c != nil && c.MinVersion != 0 && v < c.MinVersion {
continue
}
}, "unsupported versions")
}
+func TestSSLv3OptIn(t *testing.T) {
+ config := testConfig.Clone()
+ config.MinVersion = 0
+ testClientHelloFailure(t, config, &clientHelloMsg{
+ vers: VersionSSL30,
+ random: make([]byte, 32),
+ }, "unsupported versions")
+ testClientHelloFailure(t, config, &clientHelloMsg{
+ vers: VersionTLS12,
+ supportedVersions: []uint16{VersionSSL30},
+ random: make([]byte, 32),
+ }, "unsupported versions")
+}
+
func TestNoSuiteOverlap(t *testing.T) {
clientHello := &clientHelloMsg{
vers: VersionTLS10,
// localListener is set up by TestMain and used by localPipe to create Conn
// pairs like net.Pipe, but connected by an actual buffered TCP connection.
var localListener struct {
- sync.Mutex
- net.Listener
+ mu sync.Mutex
+ addr net.Addr
+ ch chan net.Conn
+}
+
+const localFlakes = 0 // change to 1 or 2 to exercise localServer/localPipe handling of mismatches
+
+func localServer(l net.Listener) {
+ for n := 0; ; n++ {
+ c, err := l.Accept()
+ if err != nil {
+ return
+ }
+ if localFlakes == 1 && n%2 == 0 {
+ c.Close()
+ continue
+ }
+ localListener.ch <- c
+ }
}
func localPipe(t testing.TB) (net.Conn, net.Conn) {
- localListener.Lock()
- defer localListener.Unlock()
- c := make(chan net.Conn)
- go func() {
- conn, err := localListener.Accept()
+ localListener.mu.Lock()
+ defer localListener.mu.Unlock()
+
+ addr := localListener.addr
+
+Dialing:
+ // We expect a rare mismatch, but probably not 5 in a row.
+ for i := 0; i < 5; i++ {
+ tooSlow := time.NewTimer(1 * time.Second)
+ defer tooSlow.Stop()
+ c1, err := net.Dial(addr.Network(), addr.String())
if err != nil {
- t.Errorf("Failed to accept local connection: %v", err)
+ t.Fatalf("localPipe: %v", err)
+ }
+ if localFlakes == 2 && i == 0 {
+ c1.Close()
+ continue
+ }
+ for {
+ select {
+ case <-tooSlow.C:
+ t.Logf("localPipe: timeout waiting for %v", c1.LocalAddr())
+ c1.Close()
+ continue Dialing
+
+ case c2 := <-localListener.ch:
+ if c2.RemoteAddr().String() == c1.LocalAddr().String() {
+ return c1, c2
+ }
+ t.Logf("localPipe: unexpected connection: %v != %v", c2.RemoteAddr(), c1.LocalAddr())
+ c2.Close()
+ }
}
- c <- conn
- }()
- addr := localListener.Addr()
- c1, err := net.Dial(addr.Network(), addr.String())
- if err != nil {
- t.Fatalf("Failed to dial local connection: %v", err)
}
- c2 := <-c
- return c1, c2
+
+ t.Fatalf("localPipe: failed to connect")
+ panic("unreachable")
}
// zeroSource is an io.Reader that returns an unlimited number of zero bytes.
fmt.Fprintf(os.Stderr, "Failed to open local listener: %v", err)
os.Exit(1)
}
- localListener.Listener = l
- defer localListener.Close()
+ localListener.ch = make(chan net.Conn)
+ localListener.addr = l.Addr()
+ defer l.Close()
+ go localServer(l)
if err := checkOpenSSLVersion(); err != nil {
fmt.Fprintf(os.Stderr, "Error: %v", err)
}
}
-func TestVerifyHostnameResumed(t *testing.T) {
- t.Run("TLSv12", func(t *testing.T) { testVerifyHostnameResumed(t, VersionTLS12) })
- t.Run("TLSv13", func(t *testing.T) { testVerifyHostnameResumed(t, VersionTLS13) })
-}
-
-func testVerifyHostnameResumed(t *testing.T, version uint16) {
- testenv.MustHaveExternalNetwork(t)
-
- config := &Config{
- MaxVersion: version,
- ClientSessionCache: NewLRUClientSessionCache(32),
- }
- for i := 0; i < 2; i++ {
- c, err := DialWithDialer(&net.Dialer{
- Timeout: 10 * time.Second,
- }, "tcp", "mail.google.com:https", config)
- if err != nil {
- t.Fatalf("Dial #%d: %v", i, err)
- }
- cs := c.ConnectionState()
- if i > 0 && !cs.DidResume {
- t.Fatalf("Subsequent connection unexpectedly didn't resume")
- }
- if cs.Version != version {
- t.Fatalf("Unexpectedly negotiated version %x", cs.Version)
- }
- if cs.VerifiedChains == nil {
- t.Fatalf("Dial #%d: cs.VerifiedChains == nil", i)
- }
- if err := c.VerifyHostname("mail.google.com"); err != nil {
- t.Fatalf("verify mail.google.com #%d: %v", i, err)
- }
- // Have the server send some data so session tickets are delivered.
- c.SetDeadline(time.Now().Add(5 * time.Second))
- if _, err := io.WriteString(c, "HEAD / HTTP/1.0\n\n"); err != nil {
- t.Fatal(err)
- }
- if _, err := c.Read(make([]byte, 1)); err != nil {
- t.Fatal(err)
- }
- c.Close()
- }
-}
-
func TestConnCloseBreakingWrite(t *testing.T) {
ln := newLocalListener(t)
defer ln.Close()
Info, Other byte
Section SectionIndex
Value, Size uint64
+
+ // Version and Library are present only for the dynamic symbol
+ // table.
+ Version string
+ Library string
}
/*
// DynamicSymbols returns the dynamic symbol table for f. The symbols
// will be listed in the order they appear in f.
//
+// If f has a symbol version table, the returned Symbols will have
+// initialized Version and Library fields.
+//
// For compatibility with Symbols, DynamicSymbols omits the null symbol at index 0.
// After retrieving the symbols as symtab, an externally supplied index x
// corresponds to symtab[x-1], not symtab[x].
func (f *File) DynamicSymbols() ([]Symbol, error) {
- sym, _, err := f.getSymbols(SHT_DYNSYM)
- return sym, err
+ sym, str, err := f.getSymbols(SHT_DYNSYM)
+ if err != nil {
+ return nil, err
+ }
+ if f.gnuVersionInit(str) {
+ for i := range sym {
+ sym[i].Library, sym[i].Version = f.gnuVersion(i)
+ }
+ }
+ return sym, nil
}
type ImportedSymbol struct {
for i, s := range sym {
if ST_BIND(s.Info) == STB_GLOBAL && s.Section == SHN_UNDEF {
all = append(all, ImportedSymbol{Name: s.Name})
- f.gnuVersion(i, &all[len(all)-1])
+ sym := &all[len(all)-1]
+ sym.Library, sym.Version = f.gnuVersion(i)
}
}
return all, nil
// gnuVersionInit parses the GNU version tables
// for use by calls to gnuVersion.
-func (f *File) gnuVersionInit(str []byte) {
+func (f *File) gnuVersionInit(str []byte) bool {
+ if f.gnuNeed != nil {
+ // Already initialized
+ return true
+ }
+
// Accumulate verneed information.
vn := f.SectionByType(SHT_GNU_VERNEED)
if vn == nil {
- return
+ return false
}
d, _ := vn.Data()
// Versym parallels symbol table, indexing into verneed.
vs := f.SectionByType(SHT_GNU_VERSYM)
if vs == nil {
- return
+ return false
}
d, _ = vs.Data()
f.gnuNeed = need
f.gnuVersym = d
+ return true
}
// gnuVersion adds Library and Version information to sym,
// which came from offset i of the symbol table.
-func (f *File) gnuVersion(i int, sym *ImportedSymbol) {
+func (f *File) gnuVersion(i int) (library string, version string) {
// Each entry is two bytes.
i = (i + 1) * 2
if i >= len(f.gnuVersym) {
return
}
n := &f.gnuNeed[j]
- sym.Library = n.File
- sym.Version = n.Name
+ return n.File, n.Name
}
// ImportedLibraries returns the names of all libraries
Section: 0x0,
Value: 0x0,
Size: 0x18C,
+ Version: "GLIBC_2.2.5",
+ Library: "libc.so.6",
},
Symbol{
Name: "__libc_start_main",
Section: 0x0,
Value: 0x0,
Size: 0x1C2,
+ Version: "GLIBC_2.2.5",
+ Library: "libc.so.6",
},
},
"testdata/go-relocation-test-clang-x86.obj": {},
}
}
-// Writer writes a single CSV record to w along with any necessary quoting.
+// Write writes a single CSV record to w along with any necessary quoting.
// A record is a slice of strings with each string being one field.
// Writes are buffered, so Flush must eventually be called to ensure
// that the record is written to the underlying io.Writer.
savedError error
useNumber bool
disallowUnknownFields bool
- // safeUnquote is the number of current string literal bytes that don't
- // need to be unquoted. When negative, no bytes need unquoting.
- safeUnquote int
}
// readIndex returns the position of the last byte read.
Switch:
switch data[i-1] {
case '"': // string
- // safeUnquote is initialized at -1, which means that all bytes
- // checked so far can be unquoted at a later time with no work
- // at all. When reaching the closing '"', if safeUnquote is
- // still -1, all bytes can be unquoted with no work. Otherwise,
- // only those bytes up until the first '\\' or non-ascii rune
- // can be safely unquoted.
- safeUnquote := -1
for ; i < len(data); i++ {
- if c := data[i]; c == '\\' {
- if safeUnquote < 0 { // first unsafe byte
- safeUnquote = int(i - d.off)
- }
+ switch data[i] {
+ case '\\':
i++ // escaped char
- } else if c == '"' {
- d.safeUnquote = safeUnquote
+ case '"':
i++ // tokenize the closing quote too
break Switch
- } else if c >= utf8.RuneSelf {
- if safeUnquote < 0 { // first unsafe byte
- safeUnquote = int(i - d.off)
- }
}
}
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
- key, ok := d.unquoteBytes(item)
+ key, ok := unquoteBytes(item)
if !ok {
panic(phasePanicMsg)
}
d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())})
return nil
}
- s, ok := d.unquoteBytes(item)
+ s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
}
case '"': // string
- s, ok := d.unquoteBytes(item)
+ s, ok := unquoteBytes(item)
if !ok {
if fromQuoted {
return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
start := d.readIndex()
d.rescanLiteral()
item := d.data[start:d.readIndex()]
- key, ok := d.unquote(item)
+ key, ok := unquote(item)
if !ok {
panic(phasePanicMsg)
}
return c == 't'
case '"': // string
- s, ok := d.unquote(item)
+ s, ok := unquote(item)
if !ok {
panic(phasePanicMsg)
}
// unquote converts a quoted JSON string literal s into an actual string t.
// The rules are different than for Go, so cannot use strconv.Unquote.
-func (d *decodeState) unquote(s []byte) (t string, ok bool) {
- s, ok = d.unquoteBytes(s)
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
t = string(s)
return
}
-func (d *decodeState) unquoteBytes(s []byte) (t []byte, ok bool) {
- r := d.safeUnquote
- // The bytes have been scanned, so we know that the first and last bytes
- // are double quotes.
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
s = s[1 : len(s)-1]
- // If there are no unusual characters, no unquoting is needed, so return
- // a slice of the original bytes.
- if r == -1 {
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
return s, true
}
{`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`},
{`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`},
{`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`},
+ {`{"result":"\""}`, `json: invalid use of ,string struct tag, trying to unmarshal "\"" into string`},
+ {`{"result":"\"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "\"foo" into string`},
}
// If people misuse the ,string modifier, the error message should be
// string, an integer type, or implement encoding.TextMarshaler. The map keys
// are sorted and used as JSON object keys by applying the following rules,
// subject to the UTF-8 coercion described for string values above:
-// - string keys are used directly
+// - keys of any string type are used directly
// - encoding.TextMarshalers are marshaled
// - integer keys are converted to strings
//
}
}
-func addrMarshalerEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
va := v.Addr()
if va.IsNil() {
e.WriteString("null")
b, err := m.MarshalJSON()
if err == nil {
// copy JSON into buffer, checking validity.
- err = compact(&e.Buffer, b, true)
+ err = compact(&e.Buffer, b, opts.escapeHTML)
}
if err != nil {
e.error(&MarshalerError{v.Type(), err})
err = Unmarshal(m, u)
if err != nil {
fmt.Printf("v=%#v\n", v)
- fmt.Println("m=%s\n", string(m))
+ fmt.Printf("m=%s\n", m)
panic(err)
}
}
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
-// Like Marshal, Compact applies HTMLEscape to any
-// string literals so that the JSON will be safe to embed
-// inside HTML <script> tags.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
}
}
+type strMarshaler string
+
+func (s strMarshaler) MarshalJSON() ([]byte, error) {
+ return []byte(s), nil
+}
+
+type strPtrMarshaler string
+
+func (s *strPtrMarshaler) MarshalJSON() ([]byte, error) {
+ return []byte(*s), nil
+}
+
func TestEncoderSetEscapeHTML(t *testing.T) {
var c C
var ct CText
Valid int `json:"<>&#! "`
Invalid int `json:"\\"`
}
+
+ // This case is particularly interesting, as we force the encoder to
+ // take the address of the Ptr field to use its MarshalJSON method. This
+ // is why the '&' is important.
+ marshalerStruct := &struct {
+ NonPtr strMarshaler
+ Ptr strPtrMarshaler
+ }{`"<str>"`, `"<str>"`}
+
for _, tt := range []struct {
name string
v interface{}
`{"\u003c\u003e\u0026#! ":0,"Invalid":0}`,
`{"<>&#! ":0,"Invalid":0}`,
},
+ {
+ `"<str>"`, marshalerStruct,
+ `{"NonPtr":"\u003cstr\u003e","Ptr":"\u003cstr\u003e"}`,
+ `{"NonPtr":"<str>","Ptr":"<str>"}`,
+ },
} {
var buf bytes.Buffer
enc := NewEncoder(&buf)
// license that can be found in the LICENSE file.
// Package errors implements functions to manipulate errors.
+//
+// The New function creates errors whose only content is a text message.
+//
+// The Unwrap, Is and As functions work on errors that may wrap other errors.
+// An error wraps another error if its type has the method
+//
+// Unwrap() error
+//
+// If e.Unwrap() returns a non-nil error w, then we say that e wraps w.
+//
+// A simple way to create wrapped errors is to call fmt.Errorf and apply the %w verb
+// to the error argument:
+//
+// fmt.Errorf("... %w ...", ..., err, ...).Unwrap()
+//
+// returns err.
+//
+// Unwrap unpacks wrapped errors. If its argument's type has an
+// Unwrap method, it calls the method once. Otherwise, it returns nil.
+//
+// Is unwraps its first argument sequentially looking for an error that matches the
+// second. It reports whether it finds a match. It should be used in preference to
+// simple equality checks:
+//
+// if errors.Is(err, os.ErrExist)
+//
+// is preferable to
+//
+// if err == os.ErrExist
+//
+// because the former will succeed if err wraps os.ErrExist.
+//
+// As unwraps its first argument sequentially looking for an error that can be
+// assigned to its second argument, which must be a pointer. If it succeeds, it
+// performs the assignment and returns true. Otherwise, it returns false. The form
+//
+// var perr *os.PathError
+// if errors.As(err, &perr) {
+// fmt.Println(perr.Path)
+// }
+//
+// is preferable to
+//
+// if perr, ok := err.(*os.PathError); ok {
+// fmt.Println(perr.Path)
+// }
+//
+// because the former will succeed if err wraps an *os.PathError.
package errors
// New returns an error that formats as the given text.
+// Each call to New returns a distinct error value even if the text is identical.
func New(text string) error {
return &errorString{text}
}
// Is reports whether any error in err's chain matches target.
//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
// An error is considered to match a target if it is equal to that target or if
// it implements a method Is(error) bool such that Is(target) returns true.
func Is(err, target error) bool {
// As finds the first error in err's chain that matches target, and if so, sets
// target to that error value and returns true.
//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
// An error matches target if the error's concrete value is assignable to the value
// pointed to by target, or if the error has a method As(interface{}) bool such that
// As(target) returns true. In the latter case, the As method is responsible for
}
}
-// Deletes the given key from the map.
+// Delete deletes the given key from the map.
func (v *Map) Delete(key string) {
v.keysMu.Lock()
defer v.keysMu.Unlock()
// If the format specifier includes a %w verb with an error operand,
// the returned error will implement an Unwrap method returning the operand. It is
// invalid to include more than one %w verb or to supply it with an operand
-// that does not implement the error innterface. The %w verb is otherwise
+// that does not implement the error interface. The %w verb is otherwise
// a synonym for %v.
func Errorf(format string, a ...interface{}) error {
p := newPrinter()
return r
}
-// scanBasePrefix reports whether the integer begins with a bas prefix
+// scanBasePrefix reports whether the integer begins with a base prefix
// and returns the base, digit string, and whether a zero was found.
// It is called only if the verb is %v.
func (s *ss) scanBasePrefix() (base int, digits string, zeroFound bool) {
i := 0
specs := d.Specs[:0]
for j, s := range d.Specs {
- if j > i && lineAt(fset, s.Pos()) > 1+lineAt(fset, d.Specs[j-1].End()) {
+ if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
// j begins a new run. End this one.
specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
i = j
// Deduping can leave a blank line before the rparen; clean that up.
if len(d.Specs) > 0 {
lastSpec := d.Specs[len(d.Specs)-1]
- lastLine := lineAt(fset, lastSpec.Pos())
- rParenLine := lineAt(fset, d.Rparen)
+ lastLine := fset.Position(lastSpec.Pos()).Line
+ rParenLine := fset.Position(d.Rparen).Line
for rParenLine > lastLine+1 {
rParenLine--
fset.File(d.Rparen).MergeLine(rParenLine)
}
}
-func lineAt(fset *token.FileSet, pos token.Pos) int {
- return fset.PositionFor(pos, false).Line
-}
-
func importPath(s Spec) string {
t, err := strconv.Unquote(s.(*ImportSpec).Path.Value)
if err == nil {
End token.Pos
}
-type cgPos struct {
- left bool // true if comment is to the left of the spec, false otherwise.
- cg *CommentGroup
-}
-
func sortSpecs(fset *token.FileSet, f *File, specs []Spec) []Spec {
// Can't short-circuit here even if specs are already sorted,
// since they might yet need deduplication.
}
// Identify comments in this range.
- begSpecs := pos[0].Start
- endSpecs := pos[len(pos)-1].End
- beg := fset.File(begSpecs).LineStart(lineAt(fset, begSpecs))
- end := fset.File(endSpecs).LineStart(lineAt(fset, endSpecs) + 1) // beginning of next line
- first := len(f.Comments)
- last := -1
+ // Any comment from pos[0].Start to the final line counts.
+ lastLine := fset.Position(pos[len(pos)-1].End).Line
+ cstart := len(f.Comments)
+ cend := len(f.Comments)
for i, g := range f.Comments {
- if g.End() >= end {
- break
+ if g.Pos() < pos[0].Start {
+ continue
}
- // g.End() < end
- if beg <= g.Pos() {
- // comment is within the range [beg, end[ of import declarations
- if i < first {
- first = i
- }
- if i > last {
- last = i
- }
+ if i < cstart {
+ cstart = i
+ }
+ if fset.Position(g.End()).Line > lastLine {
+ cend = i
+ break
}
}
+ comments := f.Comments[cstart:cend]
- var comments []*CommentGroup
- if last >= 0 {
- comments = f.Comments[first : last+1]
- }
-
- // Assign each comment to the import spec on the same line.
- importComments := map[*ImportSpec][]cgPos{}
+ // Assign each comment to the import spec preceding it.
+ importComments := map[*ImportSpec][]*CommentGroup{}
specIndex := 0
for _, g := range comments {
for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
specIndex++
}
- var left bool
- // A block comment can appear before the first import spec.
- if specIndex == 0 && pos[specIndex].Start > g.Pos() {
- left = true
- } else if specIndex+1 < len(specs) && // Or it can appear on the left of an import spec.
- lineAt(fset, pos[specIndex].Start)+1 == lineAt(fset, g.Pos()) {
- specIndex++
- left = true
- }
s := specs[specIndex].(*ImportSpec)
- importComments[s] = append(importComments[s], cgPos{left: left, cg: g})
+ importComments[s] = append(importComments[s], g)
}
// Sort the import specs by import path.
// Remove duplicates, when possible without data loss.
// Reassign the import paths to have the same position sequence.
- // Reassign each comment to the spec on the same line.
+ // Reassign each comment to abut the end of its spec.
// Sort the comments by new position.
sort.Slice(specs, func(i, j int) bool {
ipath := importPath(specs[i])
deduped = append(deduped, s)
} else {
p := s.Pos()
- fset.File(p).MergeLine(lineAt(fset, p))
+ fset.File(p).MergeLine(fset.Position(p).Line)
}
}
specs = deduped
s.Path.ValuePos = pos[i].Start
s.EndPos = pos[i].End
for _, g := range importComments[s] {
- for _, c := range g.cg.List {
- if g.left {
- c.Slash = pos[i].Start - 1
- } else {
- // An import spec can have both block comment and a line comment
- // to its right. In that case, both of them will have the same pos.
- // But while formatting the AST, the line comment gets moved to
- // after the block comment.
- c.Slash = pos[i].End
- }
+ for _, c := range g.List {
+ c.Slash = pos[i].End
}
}
}
return errNoModules
}
+ // Find the absolute source directory. hasSubdir does not handle
+ // relative paths (and can't because the callbacks don't support this).
+ absSrcDir, err := filepath.Abs(srcDir)
+ if err != nil {
+ return errNoModules
+ }
+
// If modules are not enabled, then the in-process code works fine and we should keep using it.
- // TODO(bcmills): This assumes that the default is "auto" instead of "on".
switch os.Getenv("GO111MODULE") {
case "off":
return errNoModules
- case "on":
- // ok
- default: // "", "auto", anything else
- // Automatic mode: no module use in $GOPATH/src.
- for _, root := range gopath {
- sub, ok := ctxt.hasSubdir(root, srcDir)
- if ok && strings.HasPrefix(sub, "src/") {
- return errNoModules
- }
- }
+ default: // "", "on", "auto", anything else
+ // Maybe use modules.
}
// If the source directory is in GOROOT, then the in-process code works fine
// and we should keep using it. Moreover, the 'go list' approach below doesn't
// take standard-library vendoring into account and will fail.
- if _, ok := ctxt.hasSubdir(filepath.Join(ctxt.GOROOT, "src"), srcDir); ok {
+ if _, ok := ctxt.hasSubdir(filepath.Join(ctxt.GOROOT, "src"), absSrcDir); ok {
return errNoModules
}
}
// Look to see if there is a go.mod.
- abs, err := filepath.Abs(srcDir)
- if err != nil {
- return errNoModules
- }
+ // Since go1.13, it doesn't matter if we're inside GOPATH.
+ parent := absSrcDir
for {
- info, err := os.Stat(filepath.Join(abs, "go.mod"))
+ info, err := os.Stat(filepath.Join(parent, "go.mod"))
if err == nil && !info.IsDir() {
break
}
- d := filepath.Dir(abs)
- if len(d) >= len(abs) {
+ d := filepath.Dir(parent)
+ if len(d) >= len(parent) {
return errNoModules // reached top of file system, no go.mod
}
- abs = d
+ parent = d
}
cmd := exec.Command("go", "list", "-compiler="+ctxt.Compiler, "-tags="+strings.Join(ctxt.BuildTags, ","), "-installsuffix="+ctxt.InstallSuffix, "-f={{.Dir}}\n{{.ImportPath}}\n{{.Root}}\n{{.Goroot}}\n", path)
"syscall/js",
},
+ "internal/cfg": {"L0"},
"internal/poll": {"L0", "internal/oserror", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8", "internal/syscall/windows"},
"internal/testlog": {"L0"},
"os": {"L1", "os", "syscall", "time", "internal/oserror", "internal/poll", "internal/syscall/windows", "internal/syscall/unix", "internal/testlog"},
"testing": {"L2", "flag", "fmt", "internal/race", "os", "runtime/debug", "runtime/pprof", "runtime/trace", "time"},
"testing/iotest": {"L2", "log"},
"testing/quick": {"L2", "flag", "fmt", "reflect", "time"},
- "internal/testenv": {"L2", "OS", "flag", "testing", "syscall"},
+ "internal/testenv": {"L2", "OS", "flag", "testing", "syscall", "internal/cfg"},
"internal/lazyregexp": {"L2", "OS", "regexp"},
"internal/lazytemplate": {"L2", "OS", "text/template"},
"compress/gzip": {"L4", "compress/flate"},
"compress/lzw": {"L4"},
"compress/zlib": {"L4", "compress/flate"},
- "context": {"errors", "internal/oserror", "internal/reflectlite", "sync", "time"},
+ "context": {"errors", "internal/reflectlite", "sync", "time"},
"database/sql": {"L4", "container/list", "context", "database/sql/driver", "database/sql/internal"},
"database/sql/driver": {"L4", "context", "time", "database/sql/internal"},
"debug/dwarf": {"L4"},
// To distinguish build constraints from package documentation, a series of
// build constraints must be followed by a blank line.
//
-// A build constraint is evaluated as the OR of space-separated options;
-// each option evaluates as the AND of its comma-separated terms;
-// and each term is an alphanumeric word or, preceded by !, its negation.
-// That is, the build constraint:
+// A build constraint is evaluated as the OR of space-separated options.
+// Each option evaluates as the AND of its comma-separated terms.
+// Each term consists of letters, digits, underscores, and dots.
+// A term may be negated with a preceding !.
+// For example, the build constraint:
//
// // +build linux,386 darwin,!cgo
//
+++ /dev/null
-package issue10858
-
-import "unsafe"
-
-// Should be ignored
-
-// First line
-//
-// Second line
-type Type interface {
- // Should be present
-
- // Align returns the alignment in bytes of a value of
- // this type when allocated in memory.
- Align() int
-
- // FieldAlign returns the alignment in bytes of a value of
- // this type when used as a field in a struct.
- FieldAlign() int // adjacent comment
-
- // Ptr: Elem
- // Slice: Elem
-
- // Bits returns the size of the type in bits.
-
- //
- // It panics if the type's Kind is not one of the
- // sized or unsized Int, Uint, Float, or Complex kinds.
- Bits() int
-
- // Should be ignored
-}
-
-// Should be ignored
-
-// NewType is a comment
-//
-// ending with this line.
-func NewType() Type {}
-
-// Ignore
-
-// First line
-//
-// Second line
-const (
- // Should be ignored
-
- // C1 comment
- C1 int = 1 << 0
-
- // Should
- //
- // be ignored
-
- C2 int = 1 << 1
-
- // C3 comment
- //
- // with a line gap
- C3 int = 1 << 2
-
- // Should be ignored
-)
-
-// Should be ignored
-
-// Should be ignored
-
-// TypeAlg is a
-// copy of runtime.typeAlg
-type TypeAlg struct {
- // function for hashing objects of this type
- //
- //
- // (ptr to object, seed) -> hash
- Hash func(unsafe.Pointer, uintptr) uintptr
-
- // include
- // include
-
- // include
-
- // function for comparing objects of this type
- // (ptr to object A, ptr to object B) -> ==?
- Equal func(unsafe.Pointer, unsafe.Pointer) bool
- // Should be ignored
-}
-
-// Should be ignored
-
-// StructTag is a comment
-//
-//
-// with 2 connecting lines
-type StructTag string // adjacent comment
-
-// Should be ignored
-
-// Get returns the value associated with key in the tag string.
-func (tag StructTag) Get(key string) string {
-}
// (if the package API depends on cgo-defined entities, the type
// checker won't have access to those).
//
-// If lookup is nil, the default package lookup mechanism for the
-// given compiler is used, and the resulting importer attempts
-// to resolve relative and absolute import paths to canonical
-// import path IDs before finding the imported file.
+// The lookup function is called each time the resulting importer needs
+// to resolve an import path. In this mode the importer can only be
+// invoked with canonical import paths (not relative or absolute ones);
+// it is assumed that the translation to canonical import paths is being
+// done by the client of the importer.
//
-// If lookup is non-nil, then the returned importer calls lookup
-// each time it needs to resolve an import path. In this mode
-// the importer can only be invoked with canonical import paths
-// (not relative or absolute ones); it is assumed that the translation
-// to canonical import paths is being done by the client of the
-// importer.
+// A lookup function must be provided for correct module-aware operation.
+// Deprecated: If lookup is nil, for backwards-compatibility, the importer
+// will attempt to resolve imports in the $GOPATH workspace.
func ForCompiler(fset *token.FileSet, compiler string, lookup Lookup) types.Importer {
switch compiler {
case "gc":
topScope *ast.Scope // top-most scope; may be pkgScope
unresolved []*ast.Ident // unresolved identifiers
imports []*ast.ImportSpec // list of imports
- inStruct bool // if set, parser is parsing a struct or interface (for comment collection)
// Label scopes
// (maintained by open/close LabelScope)
// consume successor comments, if any
endline = -1
for p.tok == token.COMMENT {
- n := 1
- // When inside a struct (or interface), we don't want to lose comments
- // separated from individual field (or method) documentation by empty
- // lines. Allow for some white space in this case and collect those
- // comments as a group. See issue #10858 for details.
- if p.inStruct {
- n = 2
- }
- comment, endline = p.consumeCommentGroup(n)
+ comment, endline = p.consumeCommentGroup(1)
}
if endline+1 == p.file.Line(p.pos) {
}
pos := p.expect(token.STRUCT)
- p.inStruct = true
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // struct scope
var list []*ast.Field
list = append(list, p.parseFieldDecl(scope))
}
rbrace := p.expect(token.RBRACE)
- p.inStruct = false
return &ast.StructType{
Struct: pos,
}
pos := p.expect(token.INTERFACE)
- p.inStruct = true
lbrace := p.expect(token.LBRACE)
scope := ast.NewScope(nil) // interface scope
var list []*ast.Field
list = append(list, p.parseMethodSpec(scope))
}
rbrace := p.expect(token.RBRACE)
- p.inStruct = false
return &ast.InterfaceType{
Interface: pos,
// Parent returns the scope's containing (parent) scope.
func (s *Scope) Parent() *Scope { return s.parent }
-// Len() returns the number of scope elements.
+// Len returns the number of scope elements.
func (s *Scope) Len() int { return len(s.elems) }
// Names returns the scope's element names in sorted order.
return names
}
-// NumChildren() returns the number of scopes nested in s.
+// NumChildren returns the number of scopes nested in s.
func (s *Scope) NumChildren() int { return len(s.children) }
// Child returns the i'th child scope for 0 <= i < NumChildren().
//
type Qualifier func(*Package) string
-// RelativeTo(pkg) returns a Qualifier that fully qualifies members of
+// RelativeTo returns a Qualifier that fully qualifies members of
// all packages other than pkg.
func RelativeTo(pkg *Package) Qualifier {
if pkg == nil {
-// Created by cgo -godefs - DO NOT EDIT
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_darwin.go
package route
-// Created by cgo -godefs - DO NOT EDIT
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_dragonfly.go
package route
-// Created by cgo -godefs - DO NOT EDIT
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_freebsd.go
package route
-// Created by cgo -godefs - DO NOT EDIT
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_freebsd.go
package route
-// Created by cgo -godefs - DO NOT EDIT
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_freebsd.go
package route
-// Created by cgo -godefs - DO NOT EDIT
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_netbsd.go
package route
-// Created by cgo -godefs - DO NOT EDIT
+// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs defs_openbsd.go
package route
--- /dev/null
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cfg holds configuration shared by the Go command and internal/testenv.
+// Definitions that don't need to be exposed outside of cmd/go should be in
+// cmd/go/internal/cfg instead of this package.
+package cfg
+
+// KnownEnv is a list of environment variables that affect the operation
+// of the Go command.
+const KnownEnv = `
+ AR
+ CC
+ CGO_CFLAGS
+ CGO_CFLAGS_ALLOW
+ CGO_CFLAGS_DISALLOW
+ CGO_CPPFLAGS
+ CGO_CPPFLAGS_ALLOW
+ CGO_CPPFLAGS_DISALLOW
+ CGO_CXXFLAGS
+ CGO_CXXFLAGS_ALLOW
+ CGO_CXXFLAGS_DISALLOW
+ CGO_ENABLED
+ CGO_FFLAGS
+ CGO_FFLAGS_ALLOW
+ CGO_FFLAGS_DISALLOW
+ CGO_LDFLAGS
+ CGO_LDFLAGS_ALLOW
+ CGO_LDFLAGS_DISALLOW
+ CXX
+ FC
+ GCCGO
+ GO111MODULE
+ GO386
+ GOARCH
+ GOARM
+ GOBIN
+ GOCACHE
+ GOENV
+ GOEXE
+ GOFLAGS
+ GOGCCFLAGS
+ GOHOSTARCH
+ GOHOSTOS
+ GOMIPS
+ GOMIPS64
+ GONOPROXY
+ GONOSUMDB
+ GOOS
+ GOPATH
+ GOPPC64
+ GOPRIVATE
+ GOPROXY
+ GOROOT
+ GOSUMDB
+ GOTMPDIR
+ GOTOOLDIR
+ GOWASM
+ GO_EXTLINK_ENABLED
+ PKG_CONFIG
+`
ErrExist = errors.New("file already exists")
ErrNotExist = errors.New("file does not exist")
ErrClosed = errors.New("file already closed")
- ErrTemporary = temporaryError{}
- ErrTimeout = timeoutError{}
)
-
-type timeoutError struct{}
-
-func (timeoutError) Error() string { return "deadline exceeded" }
-func (timeoutError) Timeout() bool { return true }
-
-type temporaryError struct{}
-
-func (temporaryError) Error() string { return "temporary error" }
-func (temporaryError) Temporary() bool { return true }
-
-// IsTimeout reports whether err indicates a timeout.
-func IsTimeout(err error) bool {
- for err != nil {
- if err == ErrTimeout {
- return true
- }
- if x, ok := err.(interface{ Timeout() bool }); ok {
- return x.Timeout()
- }
- if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(ErrTimeout) {
- return true
- }
- err = errors.Unwrap(err)
- }
- return false
-}
-
-// IsTemporary reports whether err indicates a temporary condition.
-func IsTemporary(err error) bool {
- for err != nil {
- if err == ErrTemporary {
- return true
- }
- if x, ok := err.(interface{ Temporary() bool }); ok {
- return x.Temporary()
- }
- if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(ErrTemporary) {
- return true
- }
- err = errors.Unwrap(err)
- }
- return false
-}
+++ /dev/null
-package oserror_test
-
-import (
- "errors"
- "fmt"
- "internal/oserror"
- "os"
- "testing"
-)
-
-type ttError struct {
- timeout bool
- temporary bool
-}
-
-func (e ttError) Error() string {
- return fmt.Sprintf("ttError{timeout:%v temporary:%v}", e.timeout, e.temporary)
-}
-func (e ttError) Timeout() bool { return e.timeout }
-func (e ttError) Temporary() bool { return e.temporary }
-
-type isError struct {
- err error
-}
-
-func (e isError) Error() string { return fmt.Sprintf("isError(%v)", e.err) }
-func (e isError) Is(target error) bool { return e.err == target }
-
-func TestIsTimeout(t *testing.T) {
- for _, test := range []struct {
- want bool
- err error
- }{
- {true, ttError{timeout: true}},
- {true, isError{os.ErrTimeout}},
- {true, os.ErrTimeout},
- {true, fmt.Errorf("wrap: %w", os.ErrTimeout)},
- {false, ttError{timeout: false}},
- {false, errors.New("error")},
- } {
- if got, want := oserror.IsTimeout(test.err), test.want; got != want {
- t.Errorf("IsTimeout(err) = %v, want %v\n%+v", got, want, test.err)
- }
- }
-}
-
-func TestIsTemporary(t *testing.T) {
- for _, test := range []struct {
- want bool
- err error
- }{
- {true, ttError{temporary: true}},
- {true, isError{os.ErrTemporary}},
- {true, os.ErrTemporary},
- {true, fmt.Errorf("wrap: %w", os.ErrTemporary)},
- {false, ttError{temporary: false}},
- {false, errors.New("error")},
- } {
- if got, want := oserror.IsTemporary(test.err), test.want; got != want {
- t.Errorf("IsTemporary(err) = %v, want %v\n%+v", got, want, test.err)
- }
- }
-}
import (
"errors"
- "internal/oserror"
)
// ErrNetClosing is returned when a network descriptor is used after
func (e *TimeoutError) Timeout() bool { return true }
func (e *TimeoutError) Temporary() bool { return true }
-func (e *TimeoutError) Is(target error) bool {
- return target == oserror.ErrTimeout || target == oserror.ErrTemporary
-}
-
// ErrNotPollable is returned when the file or socket is not suitable
// for event notification.
var ErrNotPollable = errors.New("not pollable")
import (
"errors"
"flag"
+ "internal/cfg"
"os"
"os/exec"
"path/filepath"
if err != nil {
t.Fatal(err)
}
+ // Add all environment variables that affect the Go command to test metadata.
+ // Cached test results will be invalidate when these variables change.
+ // See golang.org/issue/32285.
+ for _, envVar := range strings.Fields(cfg.KnownEnv) {
+ os.Getenv(envVar)
+ }
return path
}
import (
"context"
- "os"
"syscall"
"unsafe"
)
func (eai addrinfoErrno) Temporary() bool { return eai == syscall.EAI_AGAIN }
func (eai addrinfoErrno) Timeout() bool { return false }
-func (eai addrinfoErrno) Is(target error) bool {
- switch target {
- case os.ErrTemporary:
- return eai.Temporary()
- case os.ErrTimeout:
- return eai.Timeout()
- }
- return false
-}
-
type portLookupResult struct {
port int
err error
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
// Error starting or closing listener:
- log.Printf("HTTP server ListenAndServe: %v", err)
+ log.Fatalf("HTTP server ListenAndServe: %v", err)
}
<-idleConnsClosed
return 0
}
-func (t *Transport) IdleConnChMapSizeForTesting() int {
+func (t *Transport) IdleConnWaitMapSizeForTesting() int {
t.idleMu.Lock()
defer t.idleMu.Unlock()
- return len(t.idleConnCh)
+ return len(t.idleConnWait)
}
func (t *Transport) IsIdleForTesting() bool {
t.idleMu.Lock()
defer t.idleMu.Unlock()
- return t.wantIdle
+ return t.closeIdle
}
-func (t *Transport) RequestIdleConnChForTesting() {
- t.getIdleConnCh(connectMethod{nil, "http", "example.com", false})
+func (t *Transport) QueueForIdleConnForTesting() {
+ t.queueForIdleConn(nil)
}
+// PutIdleTestConn reports whether it was able to insert a fresh
+// persistConn for scheme, addr into the idle connection pool.
func (t *Transport) PutIdleTestConn(scheme, addr string) bool {
c, _ := net.Pipe()
key := connectMethodKey{"", scheme, addr, false}
- select {
- case <-t.incHostConnCount(key):
- default:
- return false
+
+ if t.MaxConnsPerHost > 0 {
+ // Transport is tracking conns-per-host.
+ // Increment connection count to account
+ // for new persistConn created below.
+ t.connsPerHostMu.Lock()
+ if t.connsPerHost == nil {
+ t.connsPerHost = make(map[connectMethodKey]int)
+ }
+ t.connsPerHost[key]++
+ t.connsPerHostMu.Unlock()
}
+
return t.tryPutIdleConn(&persistConn{
t: t,
conn: c, // dummy
}
const (
- http2prefaceTimeout = 10 * time.Second
- http2firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
- http2handlerChunkWriteSize = 4 << 10
- http2defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+ http2prefaceTimeout = 10 * time.Second
+ http2firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ http2handlerChunkWriteSize = 4 << 10
+ http2defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+ http2maxQueuedControlFrames = 10000
)
var (
return http2defaultMaxStreams
}
+// maxQueuedControlFrames is the maximum number of control frames like
+// SETTINGS, PING and RST_STREAM that will be queued for writing before
+// the connection is closed to prevent memory exhaustion attacks.
+func (s *http2Server) maxQueuedControlFrames() int {
+ // TODO: if anybody asks, add a Server field, and remember to define the
+ // behavior of negative values.
+ return http2maxQueuedControlFrames
+}
+
type http2serverInternalState struct {
mu sync.Mutex
activeConns map[*http2serverConn]struct{}
sawFirstSettings bool // got the initial SETTINGS frame after the preface
needToSendSettingsAck bool
unackedSettings int // how many SETTINGS have we sent without ACKs?
+ queuedControlFrames int // control frames in the writeSched queue
clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client
}
}
+ // If the peer is causing us to generate a lot of control frames,
+ // but not reading them from us, assume they are trying to make us
+ // run out of memory.
+ if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
+ sc.vlogf("http2: too many control frames in send queue, closing connection")
+ return
+ }
+
// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
// with no error code (graceful shutdown), don't start the timer until
// all open streams have been completed.
}
if !ignoreWrite {
+ if wr.isControl() {
+ sc.queuedControlFrames++
+ // For extra safety, detect wraparounds, which should not happen,
+ // and pull the plug.
+ if sc.queuedControlFrames < 0 {
+ sc.conn.Close()
+ }
+ }
sc.writeSched.Push(wr)
}
sc.scheduleFrameWrite()
// If a frame is already being written, nothing happens. This will be called again
// when the frame is done being written.
//
-// If a frame isn't being written we need to send one, the best frame
-// to send is selected, preferring first things that aren't
-// stream-specific (e.g. ACKing settings), and then finding the
-// highest priority stream.
+// If a frame isn't being written and we need to send one, the best frame
+// to send is selected by writeSched.
//
// If a frame isn't being written and there's nothing else to send, we
// flush the write buffer.
}
if !sc.inGoAway || sc.goAwayCode == http2ErrCodeNo {
if wr, ok := sc.writeSched.Pop(); ok {
+ if wr.isControl() {
+ sc.queuedControlFrames--
+ }
sc.startFrameWrite(wr)
continue
}
if err := f.ForeachSetting(sc.processSetting); err != nil {
return err
}
+ // TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
+ // acknowledged individually, even if multiple are received before the ACK.
sc.needToSendSettingsAck = true
sc.scheduleFrameWrite()
return nil
req.Method != "HEAD" {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
- // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
+ // See: https://zlib.net/zlib_faq.html#faq39
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
// Pop dequeues the next frame to write. Returns false if no frames can
// be written. Frames with a given wr.StreamID() are Pop'd in the same
- // order they are Push'd.
+ // order they are Push'd. No frames should be discarded except by CloseStream.
Pop() (wr http2FrameWriteRequest, ok bool)
}
return wr.stream.id
}
+// isControl reports whether wr is a control frame for MaxQueuedControlFrames
+// purposes. That includes non-stream frames and RST_STREAM frames.
+func (wr http2FrameWriteRequest) isControl() bool {
+ return wr.stream == nil
+}
+
// DataSize returns the number of flow control bytes that must be consumed
// to write this entire frame. This is 0 for non-DATA frames.
func (wr http2FrameWriteRequest) DataSize() int {
return h.writeSubset(w, nil, trace)
}
-// Clone returns a copy of h.
+// Clone returns a copy of h or nil if h is nil.
func (h Header) Clone() Header {
+ if h == nil {
+ return nil
+ }
+
// Find total number of values.
nv := 0
for _, vv := range h {
}
}
+func TestNilHeaderClone(t *testing.T) {
+ t1 := Header(nil)
+ t2 := t1.Clone()
+ if t2 != nil {
+ t.Errorf("cloned header does not match original: got: %+v; want: %+v", t2, nil)
+ }
+}
+
var testHeader = Header{
"Content-Length": {"123"},
"Content-Type": {"text/plain"},
if req.ContentLength == 0 {
outreq.Body = nil // Issue 16036: nil Body for http.Transport retries
}
+ if outreq.Header == nil {
+ outreq.Header = make(http.Header) // Issue 33142: historical behavior was to always allocate
+ }
p.Director(outreq)
outreq.Close = false
}
}
+// Issue 33142: always allocate the request headers
+func TestReverseProxy_AllocatedHeader(t *testing.T) {
+ proxyHandler := new(ReverseProxy)
+ proxyHandler.ErrorLog = log.New(ioutil.Discard, "", 0) // quiet for tests
+ proxyHandler.Director = func(*http.Request) {} // noop
+ proxyHandler.Transport = RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
+ if req.Header == nil {
+ t.Error("Header == nil; want a non-nil Header")
+ }
+ return nil, errors.New("done testing the interesting part; so force a 502 Gateway error")
+ })
+
+ proxyHandler.ServeHTTP(httptest.NewRecorder(), &http.Request{
+ Method: "GET",
+ URL: &url.URL{Scheme: "http", Host: "fake.tld", Path: "/"},
+ Proto: "HTTP/1.0",
+ ProtoMajor: 1,
+ })
+}
+
// Issue 14237. Test ModifyResponse and that an error from it
// causes the proxy to return StatusBadGateway, or StatusOK otherwise.
func TestReverseProxyModifyResponse(t *testing.T) {
Host string
// Form contains the parsed form data, including both the URL
- // field's query parameters and the POST or PUT form data.
+ // field's query parameters and the PATCH, POST, or PUT form data.
// This field is only available after ParseForm is called.
// The HTTP client ignores Form and uses Body instead.
Form url.Values
- // PostForm contains the parsed form data from POST, PATCH,
+ // PostForm contains the parsed form data from PATCH, POST
// or PUT body parameters.
//
// This field is only available after ParseForm is called.
// multipartByReader is a sentinel value.
// Its presence in Request.MultipartForm indicates that parsing of the request
-// body has been handed off to a MultipartReader instead of ParseMultipartFrom.
+// body has been handed off to a MultipartReader instead of ParseMultipartForm.
var multipartByReader = &multipart.Form{
Value: make(map[string][]string),
File: make(map[string][]*multipart.FileHeader),
// The Body is automatically dechunked if the server replied
// with a "chunked" Transfer-Encoding.
//
- // As of Go 1.12, the Body will be also implement io.Writer
+ // As of Go 1.12, the Body will also implement io.Writer
// on a successful "101 Switching Protocols" response,
// as used by WebSockets and HTTP/2's "h2c" mode.
Body io.ReadCloser
}
// See issues 8209 and 8414.
+// Both issues involved panics in the implementation of TimeoutHandler.
func TestTimeoutHandlerRaceHeader(t *testing.T) {
setParallel(t)
defer afterTest(t)
defer func() { <-gate }()
res, err := c.Get(ts.URL)
if err != nil {
- t.Error(err)
+ // We see ECONNRESET from the connection occasionally,
+ // and that's OK: this test is checking that the server does not panic.
+ t.Log(err)
return
}
defer res.Body.Close()
if a1 != a2 {
t.Fatal("expected first two requests on same connection")
}
- var idle0 int
- if !waitCondition(2*time.Second, 10*time.Millisecond, func() bool {
- idle0 = tr.IdleConnKeyCountForTesting()
- return idle0 == 1
- }) {
- t.Fatalf("idle count before SetKeepAlivesEnabled called = %v; want 1", idle0)
+ addr := strings.TrimPrefix(ts.URL, "http://")
+
+ // The two requests should have used the same connection,
+ // and there should not have been a second connection that
+ // was created by racing dial against reuse.
+ // (The first get was completed when the second get started.)
+ n := tr.IdleConnCountForTesting("http", addr)
+ if n != 1 {
+ t.Fatalf("idle count for %q after 2 gets = %d, want 1", addr, n)
}
+ // SetKeepAlivesEnabled should discard idle conns.
ts.Config.SetKeepAlivesEnabled(false)
var idle1 int
if !waitCondition(2*time.Second, 10*time.Millisecond, func() bool {
- idle1 = tr.IdleConnKeyCountForTesting()
+ idle1 = tr.IdleConnCountForTesting("http", addr)
return idle1 == 0
}) {
t.Fatalf("idle count after SetKeepAlivesEnabled called = %v; want 0", idle1)
var (
// ServerContextKey is a context key. It can be used in HTTP
- // handlers with context.WithValue to access the server that
+ // handlers with Context.Value to access the server that
// started the handler. The associated value will be of
// type *Server.
ServerContextKey = &contextKey{"http-server"}
// LocalAddrContextKey is a context key. It can be used in
- // HTTP handlers with context.WithValue to access the local
+ // HTTP handlers with Context.Value to access the local
// address the connection arrived on.
// The associated value will be of type net.Addr.
LocalAddrContextKey = &contextKey{"local-addr"}
// After such a timeout, writes by h to its ResponseWriter will return
// ErrHandlerTimeout.
//
-// TimeoutHandler buffers all Handler writes to memory and does not
-// support the Hijacker or Flusher interfaces.
+// TimeoutHandler supports the Flusher and Pusher interfaces but does not
+// support the Hijacker interface.
func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
return &timeoutHandler{
handler: h,
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
-//go:generate bundle -o socks_bundle.go -dst net/http -prefix socks -underscore golang.org/x/net/internal/socks
+//go:generate bundle -o socks_bundle.go -prefix socks golang.org/x/net/internal/socks
// Package socks provides a SOCKS version 5 client implementation.
//
// MaxIdleConnsPerHost.
const DefaultMaxIdleConnsPerHost = 2
-// connsPerHostClosedCh is a closed channel used by MaxConnsPerHost
-// for the property that receives from a closed channel return the
-// zero value.
-var connsPerHostClosedCh = make(chan struct{})
-
-func init() {
- close(connsPerHostClosedCh)
-}
-
// Transport is an implementation of RoundTripper that supports HTTP,
// HTTPS, and HTTP proxies (for either HTTP or HTTPS with CONNECT).
//
// request is treated as idempotent but the header is not sent on the
// wire.
type Transport struct {
- idleMu sync.Mutex
- wantIdle bool // user has requested to close all idle conns
- idleConn map[connectMethodKey][]*persistConn // most recently used at end
- idleConnCh map[connectMethodKey]chan *persistConn
- idleLRU connLRU
+ idleMu sync.Mutex
+ closeIdle bool // user has requested to close all idle conns
+ idleConn map[connectMethodKey][]*persistConn // most recently used at end
+ idleConnWait map[connectMethodKey]wantConnQueue // waiting getConns
+ idleLRU connLRU
reqMu sync.Mutex
reqCanceler map[*Request]func(error)
altMu sync.Mutex // guards changing altProto only
altProto atomic.Value // of nil or map[string]RoundTripper, key is URI scheme
- connCountMu sync.Mutex
- connPerHostCount map[connectMethodKey]int
- connPerHostAvailable map[connectMethodKey]chan struct{}
+ connsPerHostMu sync.Mutex
+ connsPerHost map[connectMethodKey]int
+ connsPerHostWait map[connectMethodKey]wantConnQueue // waiting getConns
// Proxy specifies a function to return a proxy for a given
// Request. If the function returns a non-nil error, the
// active, and idle states. On limit violation, dials will block.
//
// Zero means no limit.
- //
- // For HTTP/2, this currently only controls the number of new
- // connections being created at a time, instead of the total
- // number. In practice, hosts using HTTP/2 only have about one
- // idle connection, though.
MaxConnsPerHost int
// IdleConnTimeout is the maximum amount of time an idle
var resp *Response
if pconn.alt != nil {
// HTTP/2 path.
- t.putOrCloseIdleConn(pconn)
t.setReqCanceler(req, nil) // not cancelable with CancelRequest
resp, err = pconn.alt.RoundTrip(req)
} else {
}
if http2isNoCachedConnError(err) {
t.removeIdleConn(pconn)
- t.decHostConnCount(cm.key()) // clean up the persistent connection
} else if !pconn.shouldRetryRequest(req, err) {
// Issue 16465: return underlying net.Conn.Read error from peek,
// as we've historically done.
t.idleMu.Lock()
m := t.idleConn
t.idleConn = nil
- t.idleConnCh = nil
- t.wantIdle = true
+ t.closeIdle = true // close newly idle connections
t.idleLRU = connLRU{}
t.idleMu.Unlock()
for _, conns := range m {
}
func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectMethod, err error) {
+ // TODO: the validPort check is redundant after CL 189258, as url.URL.Port
+ // only returns valid ports now. golang.org/issue/33600
if port := treq.URL.Port(); !validPort(port) {
return cm, fmt.Errorf("invalid URL port %q", port)
}
var (
errKeepAlivesDisabled = errors.New("http: putIdleConn: keep alives disabled")
errConnBroken = errors.New("http: putIdleConn: connection is in bad state")
- errWantIdle = errors.New("http: putIdleConn: CloseIdleConnections was called")
+ errCloseIdle = errors.New("http: putIdleConn: CloseIdleConnections was called")
errTooManyIdle = errors.New("http: putIdleConn: too many idle connections")
errTooManyIdleHost = errors.New("http: putIdleConn: too many idle connections for host")
errCloseIdleConns = errors.New("http: CloseIdleConnections called")
return errConnBroken
}
pconn.markReused()
- key := pconn.cacheKey
t.idleMu.Lock()
defer t.idleMu.Unlock()
- waitingDialer := t.idleConnCh[key]
- select {
- case waitingDialer <- pconn:
- // We're done with this pconn and somebody else is
- // currently waiting for a conn of this type (they're
- // actively dialing, but this conn is ready
- // first). Chrome calls this socket late binding. See
- // https://insouciant.org/tech/connection-management-in-chromium/
+ // HTTP/2 (pconn.alt != nil) connections do not come out of the idle list,
+ // because multiple goroutines can use them simultaneously.
+ // If this is an HTTP/2 connection being “returned,” we're done.
+ if pconn.alt != nil && t.idleLRU.m[pconn] != nil {
return nil
- default:
- if waitingDialer != nil {
- // They had populated this, but their dial won
- // first, so we can clean up this map entry.
- delete(t.idleConnCh, key)
+ }
+
+ // Deliver pconn to goroutine waiting for idle connection, if any.
+ // (They may be actively dialing, but this conn is ready first.
+ // Chrome calls this socket late binding.
+ // See https://insouciant.org/tech/connection-management-in-chromium/.)
+ key := pconn.cacheKey
+ if q, ok := t.idleConnWait[key]; ok {
+ done := false
+ if pconn.alt == nil {
+ // HTTP/1.
+ // Loop over the waiting list until we find a w that isn't done already, and hand it pconn.
+ for q.len() > 0 {
+ w := q.popFront()
+ if w.tryDeliver(pconn, nil) {
+ done = true
+ break
+ }
+ }
+ } else {
+ // HTTP/2.
+ // Can hand the same pconn to everyone in the waiting list,
+ // and we still won't be done: we want to put it in the idle
+ // list unconditionally, for any future clients too.
+ for q.len() > 0 {
+ w := q.popFront()
+ w.tryDeliver(pconn, nil)
+ }
+ }
+ if q.len() == 0 {
+ delete(t.idleConnWait, key)
+ } else {
+ t.idleConnWait[key] = q
+ }
+ if done {
+ return nil
}
}
- if t.wantIdle {
- return errWantIdle
+
+ if t.closeIdle {
+ return errCloseIdle
}
if t.idleConn == nil {
t.idleConn = make(map[connectMethodKey][]*persistConn)
oldest.close(errTooManyIdle)
t.removeIdleConnLocked(oldest)
}
- if t.IdleConnTimeout > 0 {
+
+ // Set idle timer, but only for HTTP/1 (pconn.alt == nil).
+ // The HTTP/2 implementation manages the idle timer itself
+ // (see idleConnTimeout in h2_bundle.go).
+ if t.IdleConnTimeout > 0 && pconn.alt == nil {
if pconn.idleTimer != nil {
pconn.idleTimer.Reset(t.IdleConnTimeout)
} else {
- // idleTimer does not apply to HTTP/2
- if pconn.alt == nil {
- pconn.idleTimer = time.AfterFunc(t.IdleConnTimeout, pconn.closeConnIfStillIdle)
- }
+ pconn.idleTimer = time.AfterFunc(t.IdleConnTimeout, pconn.closeConnIfStillIdle)
}
}
pconn.idleAt = time.Now()
return nil
}
-// getIdleConnCh returns a channel to receive and return idle
-// persistent connection for the given connectMethod.
-// It may return nil, if persistent connections are not being used.
-func (t *Transport) getIdleConnCh(cm connectMethod) chan *persistConn {
+// queueForIdleConn queues w to receive the next idle connection for w.cm.
+// As an optimization hint to the caller, queueForIdleConn reports whether
+// it successfully delivered an already-idle connection.
+func (t *Transport) queueForIdleConn(w *wantConn) (delivered bool) {
if t.DisableKeepAlives {
- return nil
+ return false
}
- key := cm.key()
+
t.idleMu.Lock()
defer t.idleMu.Unlock()
- t.wantIdle = false
- if t.idleConnCh == nil {
- t.idleConnCh = make(map[connectMethodKey]chan *persistConn)
- }
- ch, ok := t.idleConnCh[key]
- if !ok {
- ch = make(chan *persistConn)
- t.idleConnCh[key] = ch
+
+ // Stop closing connections that become idle - we might want one.
+ // (That is, undo the effect of t.CloseIdleConnections.)
+ t.closeIdle = false
+
+ if w == nil {
+ // Happens in test hook.
+ return false
}
- return ch
-}
-func (t *Transport) getIdleConn(cm connectMethod) (pconn *persistConn, idleSince time.Time) {
- key := cm.key()
- t.idleMu.Lock()
- defer t.idleMu.Unlock()
- for {
- pconns, ok := t.idleConn[key]
- if !ok {
- return nil, time.Time{}
+ // Look for most recently-used idle connection.
+ if list, ok := t.idleConn[w.key]; ok {
+ stop := false
+ delivered := false
+ for len(list) > 0 && !stop {
+ pconn := list[len(list)-1]
+ if pconn.isBroken() {
+ // persistConn.readLoop has marked the connection broken,
+ // but Transport.removeIdleConn has not yet removed it from the idle list.
+ // Drop on floor on behalf of Transport.removeIdleConn.
+ list = list[:len(list)-1]
+ continue
+ }
+ delivered = w.tryDeliver(pconn, nil)
+ if delivered {
+ if pconn.alt != nil {
+ // HTTP/2: multiple clients can share pconn.
+ // Leave it in the list.
+ } else {
+ // HTTP/1: only one client can use pconn.
+ // Remove it from the list.
+ t.idleLRU.remove(pconn)
+ list = list[:len(list)-1]
+ }
+ }
+ stop = true
}
- if len(pconns) == 1 {
- pconn = pconns[0]
- delete(t.idleConn, key)
+ if len(list) > 0 {
+ t.idleConn[w.key] = list
} else {
- // 2 or more cached connections; use the most
- // recently used one at the end.
- pconn = pconns[len(pconns)-1]
- t.idleConn[key] = pconns[:len(pconns)-1]
+ delete(t.idleConn, w.key)
}
- t.idleLRU.remove(pconn)
- if pconn.isBroken() {
- // There is a tiny window where this is
- // possible, between the connecting dying and
- // the persistConn readLoop calling
- // Transport.removeIdleConn. Just skip it and
- // carry on.
- continue
+ if stop {
+ return delivered
}
- return pconn, pconn.idleAt
}
+
+ // Register to receive next connection that becomes idle.
+ if t.idleConnWait == nil {
+ t.idleConnWait = make(map[connectMethodKey]wantConnQueue)
+ }
+ q := t.idleConnWait[w.key]
+ q.cleanFront()
+ q.pushBack(w)
+ t.idleConnWait[w.key] = q
+ return false
}
// removeIdleConn marks pconn as dead.
return zeroDialer.DialContext(ctx, network, addr)
}
+// A wantConn records state about a wanted connection
+// (that is, an active call to getConn).
+// The conn may be gotten by dialing or by finding an idle connection,
+// or a cancellation may make the conn no longer wanted.
+// These three options are racing against each other and use
+// wantConn to coordinate and agree about the winning outcome.
+type wantConn struct {
+ cm connectMethod
+ key connectMethodKey // cm.key()
+ ctx context.Context // context for dial
+ ready chan struct{} // closed when pc, err pair is delivered
+
+ // hooks for testing to know when dials are done
+ // beforeDial is called in the getConn goroutine when the dial is queued.
+ // afterDial is called when the dial is completed or cancelled.
+ beforeDial func()
+ afterDial func()
+
+ mu sync.Mutex // protects pc, err, close(ready)
+ pc *persistConn
+ err error
+}
+
+// waiting reports whether w is still waiting for an answer (connection or error).
+func (w *wantConn) waiting() bool {
+ select {
+ case <-w.ready:
+ return false
+ default:
+ return true
+ }
+}
+
+// tryDeliver attempts to deliver pc, err to w and reports whether it succeeded.
+func (w *wantConn) tryDeliver(pc *persistConn, err error) bool {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.pc != nil || w.err != nil {
+ return false
+ }
+
+ w.pc = pc
+ w.err = err
+ if w.pc == nil && w.err == nil {
+ panic("net/http: internal error: misuse of tryDeliver")
+ }
+ close(w.ready)
+ return true
+}
+
+// cancel marks w as no longer wanting a result (for example, due to cancellation).
+// If a connection has been delivered already, cancel returns it with t.putOrCloseIdleConn.
+func (w *wantConn) cancel(t *Transport, err error) {
+ w.mu.Lock()
+ if w.pc == nil && w.err == nil {
+ close(w.ready) // catch misbehavior in future delivery
+ }
+ pc := w.pc
+ w.pc = nil
+ w.err = err
+ w.mu.Unlock()
+
+ if pc != nil {
+ t.putOrCloseIdleConn(pc)
+ }
+}
+
+// A wantConnQueue is a queue of wantConns.
+type wantConnQueue struct {
+ // This is a queue, not a deque.
+ // It is split into two stages - head[headPos:] and tail.
+ // popFront is trivial (headPos++) on the first stage, and
+ // pushBack is trivial (append) on the second stage.
+ // If the first stage is empty, popFront can swap the
+ // first and second stages to remedy the situation.
+ //
+ // This two-stage split is analogous to the use of two lists
+ // in Okasaki's purely functional queue but without the
+ // overhead of reversing the list when swapping stages.
+ head []*wantConn
+ headPos int
+ tail []*wantConn
+}
+
+// len returns the number of items in the queue.
+func (q *wantConnQueue) len() int {
+ return len(q.head) - q.headPos + len(q.tail)
+}
+
+// pushBack adds w to the back of the queue.
+func (q *wantConnQueue) pushBack(w *wantConn) {
+ q.tail = append(q.tail, w)
+}
+
+// popFront removes and returns the wantConn at the front of the queue.
+func (q *wantConnQueue) popFront() *wantConn {
+ if q.headPos >= len(q.head) {
+ if len(q.tail) == 0 {
+ return nil
+ }
+ // Pick up tail as new head, clear tail.
+ q.head, q.headPos, q.tail = q.tail, 0, q.head[:0]
+ }
+ w := q.head[q.headPos]
+ q.head[q.headPos] = nil
+ q.headPos++
+ return w
+}
+
+// peekFront returns the wantConn at the front of the queue without removing it.
+func (q *wantConnQueue) peekFront() *wantConn {
+ if q.headPos < len(q.head) {
+ return q.head[q.headPos]
+ }
+ if len(q.tail) > 0 {
+ return q.tail[0]
+ }
+ return nil
+}
+
+// cleanFront pops any wantConns that are no longer waiting from the head of the
+// queue, reporting whether any were popped.
+func (q *wantConnQueue) cleanFront() (cleaned bool) {
+ for {
+ w := q.peekFront()
+ if w == nil || w.waiting() {
+ return cleaned
+ }
+ q.popFront()
+ cleaned = true
+ }
+}
+
// getConn dials and creates a new persistConn to the target as
// specified in the connectMethod. This includes doing a proxy CONNECT
// and/or setting up TLS. If this doesn't return an error, the persistConn
// is ready to write requests to.
-func (t *Transport) getConn(treq *transportRequest, cm connectMethod) (*persistConn, error) {
+func (t *Transport) getConn(treq *transportRequest, cm connectMethod) (pc *persistConn, err error) {
req := treq.Request
trace := treq.trace
ctx := req.Context()
if trace != nil && trace.GetConn != nil {
trace.GetConn(cm.addr())
}
- if pc, idleSince := t.getIdleConn(cm); pc != nil {
+
+ w := &wantConn{
+ cm: cm,
+ key: cm.key(),
+ ctx: ctx,
+ ready: make(chan struct{}, 1),
+ beforeDial: testHookPrePendingDial,
+ afterDial: testHookPostPendingDial,
+ }
+ defer func() {
+ if err != nil {
+ w.cancel(t, err)
+ }
+ }()
+
+ // Queue for idle connection.
+ if delivered := t.queueForIdleConn(w); delivered {
+ pc := w.pc
if trace != nil && trace.GotConn != nil {
- trace.GotConn(pc.gotIdleConnTrace(idleSince))
+ trace.GotConn(pc.gotIdleConnTrace(pc.idleAt))
}
// set request canceler to some non-nil function so we
// can detect whether it was cleared between now and when
return pc, nil
}
- type dialRes struct {
- pc *persistConn
- err error
- }
- dialc := make(chan dialRes)
- cmKey := cm.key()
-
- // Copy these hooks so we don't race on the postPendingDial in
- // the goroutine we launch. Issue 11136.
- testHookPrePendingDial := testHookPrePendingDial
- testHookPostPendingDial := testHookPostPendingDial
-
- handlePendingDial := func() {
- testHookPrePendingDial()
- go func() {
- if v := <-dialc; v.err == nil {
- t.putOrCloseIdleConn(v.pc)
- } else {
- t.decHostConnCount(cmKey)
- }
- testHookPostPendingDial()
- }()
- }
-
cancelc := make(chan error, 1)
t.setReqCanceler(req, func(err error) { cancelc <- err })
- if t.MaxConnsPerHost > 0 {
- select {
- case <-t.incHostConnCount(cmKey):
- // count below conn per host limit; proceed
- case pc := <-t.getIdleConnCh(cm):
- if trace != nil && trace.GotConn != nil {
- trace.GotConn(httptrace.GotConnInfo{Conn: pc.conn, Reused: pc.isReused()})
- }
- return pc, nil
- case <-req.Cancel:
- return nil, errRequestCanceledConn
- case <-req.Context().Done():
- return nil, req.Context().Err()
- case err := <-cancelc:
- if err == errRequestCanceled {
- err = errRequestCanceledConn
- }
- return nil, err
- }
- }
+ // Queue for permission to dial.
+ t.queueForDial(w)
- go func() {
- pc, err := t.dialConn(ctx, cm)
- dialc <- dialRes{pc, err}
- }()
-
- idleConnCh := t.getIdleConnCh(cm)
+ // Wait for completion or cancellation.
select {
- case v := <-dialc:
- // Our dial finished.
- if v.pc != nil {
- if trace != nil && trace.GotConn != nil && v.pc.alt == nil {
- trace.GotConn(httptrace.GotConnInfo{Conn: v.pc.conn})
+ case <-w.ready:
+ // Trace success but only for HTTP/1.
+ // HTTP/2 calls trace.GotConn itself.
+ if w.pc != nil && w.pc.alt == nil && trace != nil && trace.GotConn != nil {
+ trace.GotConn(httptrace.GotConnInfo{Conn: w.pc.conn, Reused: w.pc.isReused()})
+ }
+ if w.err != nil {
+ // If the request has been cancelled, that's probably
+ // what caused w.err; if so, prefer to return the
+ // cancellation error (see golang.org/issue/16049).
+ select {
+ case <-req.Cancel:
+ return nil, errRequestCanceledConn
+ case <-req.Context().Done():
+ return nil, req.Context().Err()
+ case err := <-cancelc:
+ if err == errRequestCanceled {
+ err = errRequestCanceledConn
+ }
+ return nil, err
+ default:
+ // return below
}
- return v.pc, nil
}
- // Our dial failed. See why to return a nicer error
- // value.
- t.decHostConnCount(cmKey)
- select {
- case <-req.Cancel:
- // It was an error due to cancellation, so prioritize that
- // error value. (Issue 16049)
- return nil, errRequestCanceledConn
- case <-req.Context().Done():
- return nil, req.Context().Err()
- case err := <-cancelc:
- if err == errRequestCanceled {
- err = errRequestCanceledConn
- }
- return nil, err
- default:
- // It wasn't an error due to cancellation, so
- // return the original error message:
- return nil, v.err
- }
- case pc := <-idleConnCh:
- // Another request finished first and its net.Conn
- // became available before our dial. Or somebody
- // else's dial that they didn't use.
- // But our dial is still going, so give it away
- // when it finishes:
- handlePendingDial()
- if trace != nil && trace.GotConn != nil {
- trace.GotConn(httptrace.GotConnInfo{Conn: pc.conn, Reused: pc.isReused()})
- }
- return pc, nil
+ return w.pc, w.err
case <-req.Cancel:
- handlePendingDial()
return nil, errRequestCanceledConn
case <-req.Context().Done():
- handlePendingDial()
return nil, req.Context().Err()
case err := <-cancelc:
- handlePendingDial()
if err == errRequestCanceled {
err = errRequestCanceledConn
}
}
}
-// incHostConnCount increments the count of connections for a
-// given host. It returns an already-closed channel if the count
-// is not at its limit; otherwise it returns a channel which is
-// notified when the count is below the limit.
-func (t *Transport) incHostConnCount(cmKey connectMethodKey) <-chan struct{} {
+// queueForDial queues w to wait for permission to begin dialing.
+// Once w receives permission to dial, it will do so in a separate goroutine.
+func (t *Transport) queueForDial(w *wantConn) {
+ w.beforeDial()
if t.MaxConnsPerHost <= 0 {
- return connsPerHostClosedCh
+ go t.dialConnFor(w)
+ return
}
- t.connCountMu.Lock()
- defer t.connCountMu.Unlock()
- if t.connPerHostCount[cmKey] == t.MaxConnsPerHost {
- if t.connPerHostAvailable == nil {
- t.connPerHostAvailable = make(map[connectMethodKey]chan struct{})
- }
- ch, ok := t.connPerHostAvailable[cmKey]
- if !ok {
- ch = make(chan struct{})
- t.connPerHostAvailable[cmKey] = ch
+
+ t.connsPerHostMu.Lock()
+ defer t.connsPerHostMu.Unlock()
+
+ if n := t.connsPerHost[w.key]; n < t.MaxConnsPerHost {
+ if t.connsPerHost == nil {
+ t.connsPerHost = make(map[connectMethodKey]int)
}
- return ch
+ t.connsPerHost[w.key] = n + 1
+ go t.dialConnFor(w)
+ return
}
- if t.connPerHostCount == nil {
- t.connPerHostCount = make(map[connectMethodKey]int)
+
+ if t.connsPerHostWait == nil {
+ t.connsPerHostWait = make(map[connectMethodKey]wantConnQueue)
}
- t.connPerHostCount[cmKey]++
- // return a closed channel to avoid race: if decHostConnCount is called
- // after incHostConnCount and during the nil check, decHostConnCount
- // will delete the channel since it's not being listened on yet.
- return connsPerHostClosedCh
+ q := t.connsPerHostWait[w.key]
+ q.cleanFront()
+ q.pushBack(w)
+ t.connsPerHostWait[w.key] = q
}
-// decHostConnCount decrements the count of connections
-// for a given host.
-// See Transport.MaxConnsPerHost.
-func (t *Transport) decHostConnCount(cmKey connectMethodKey) {
+// dialConnFor dials on behalf of w and delivers the result to w.
+// dialConnFor has received permission to dial w.cm and is counted in t.connCount[w.cm.key()].
+// If the dial is cancelled or unsuccessful, dialConnFor decrements t.connCount[w.cm.key()].
+func (t *Transport) dialConnFor(w *wantConn) {
+ defer w.afterDial()
+
+ pc, err := t.dialConn(w.ctx, w.cm)
+ delivered := w.tryDeliver(pc, err)
+ if err == nil && (!delivered || pc.alt != nil) {
+ // pconn was not passed to w,
+ // or it is HTTP/2 and can be shared.
+ // Add to the idle connection pool.
+ t.putOrCloseIdleConn(pc)
+ }
+ if err != nil {
+ t.decConnsPerHost(w.key)
+ }
+}
+
+// decConnsPerHost decrements the per-host connection count for key,
+// which may in turn give a different waiting goroutine permission to dial.
+func (t *Transport) decConnsPerHost(key connectMethodKey) {
if t.MaxConnsPerHost <= 0 {
return
}
- t.connCountMu.Lock()
- defer t.connCountMu.Unlock()
- t.connPerHostCount[cmKey]--
- select {
- case t.connPerHostAvailable[cmKey] <- struct{}{}:
- default:
- // close channel before deleting avoids getConn waiting forever in
- // case getConn has reference to channel but hasn't started waiting.
- // This could lead to more than MaxConnsPerHost in the unlikely case
- // that > 1 go routine has fetched the channel but none started waiting.
- if t.connPerHostAvailable[cmKey] != nil {
- close(t.connPerHostAvailable[cmKey])
+
+ t.connsPerHostMu.Lock()
+ defer t.connsPerHostMu.Unlock()
+ n := t.connsPerHost[key]
+ if n == 0 {
+ // Shouldn't happen, but if it does, the counting is buggy and could
+ // easily lead to a silent deadlock, so report the problem loudly.
+ panic("net/http: internal error: connCount underflow")
+ }
+
+ // Can we hand this count to a goroutine still waiting to dial?
+ // (Some goroutines on the wait list may have timed out or
+ // gotten a connection another way. If they're all gone,
+ // we don't want to kick off any spurious dial operations.)
+ if q := t.connsPerHostWait[key]; q.len() > 0 {
+ done := false
+ for q.len() > 0 {
+ w := q.popFront()
+ if w.waiting() {
+ go t.dialConnFor(w)
+ done = true
+ break
+ }
+ }
+ if q.len() == 0 {
+ delete(t.connsPerHostWait, key)
+ } else {
+ // q is a value (like a slice), so we have to store
+ // the updated q back into the map.
+ t.connsPerHostWait[key] = q
+ }
+ if done {
+ return
}
- delete(t.connPerHostAvailable, cmKey)
- }
- if t.connPerHostCount[cmKey] == 0 {
- delete(t.connPerHostCount, cmKey)
}
-}
-// connCloseListener wraps a connection, the transport that dialed it
-// and the connected-to host key so the host connection count can be
-// transparently decremented by whatever closes the embedded connection.
-type connCloseListener struct {
- net.Conn
- t *Transport
- cmKey connectMethodKey
- didClose int32
-}
-
-func (c *connCloseListener) Close() error {
- if atomic.AddInt32(&c.didClose, 1) != 1 {
- return nil
+ // Otherwise, decrement the recorded count.
+ if n--; n == 0 {
+ delete(t.connsPerHost, key)
+ } else {
+ t.connsPerHost[key] = n
}
- err := c.Conn.Close()
- c.t.decHostConnCount(c.cmKey)
- return err
}
// The connect method and the transport can both specify a TLS
return nil
}
-func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (*persistConn, error) {
- pconn := &persistConn{
+func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *persistConn, err error) {
+ pconn = &persistConn{
t: t,
cacheKey: cm.key(),
reqch: make(chan requestAndChan, 1),
}
}
- if t.MaxConnsPerHost > 0 {
- pconn.conn = &connCloseListener{Conn: pconn.conn, t: t, cmKey: pconn.cacheKey}
- }
pconn.br = bufio.NewReaderSize(pconn, t.readBufferSize())
pconn.bw = bufio.NewWriterSize(persistConnWriter{pconn}, t.writeBufferSize())
return pc.canceledErr
}
-// isReused reports whether this connection is in a known broken state.
+// isReused reports whether this connection has been used before.
func (pc *persistConn) isReused() bool {
pc.mu.Lock()
r := pc.reused
// but the server has already replied. In this case, we don't
// want to wait too long, and we want to return false so this
// connection isn't re-used.
+ t := time.NewTimer(maxWriteWaitBeforeConnReuse)
+ defer t.Stop()
select {
case err := <-pc.writeErrCh:
return err == nil
- case <-time.After(maxWriteWaitBeforeConnReuse):
+ case <-t.C:
return false
}
}
func (e *httpError) Timeout() bool { return e.timeout }
func (e *httpError) Temporary() bool { return true }
-func (e *httpError) Is(target error) bool {
- switch target {
- case os.ErrTimeout:
- return e.timeout
- case os.ErrTemporary:
- return true
- }
- return false
-}
-
var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true}
// errRequestCanceled is set to be identical to the one from h2 to facilitate
pc.broken = true
if pc.closed == nil {
pc.closed = err
- if pc.alt != nil {
- // Clean up any host connection counting.
- pc.t.decHostConnCount(pc.cacheKey)
- } else {
+ pc.t.decConnsPerHost(pc.cacheKey)
+ // Close HTTP/1 (pc.alt == nil) connection.
+ // HTTP/2 closes its connection itself.
+ if pc.alt == nil {
if err != errCallerOwnsConn {
pc.conn.Close()
}
func (tlsHandshakeTimeoutError) Temporary() bool { return true }
func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" }
-func (tlsHandshakeTimeoutError) Is(target error) bool {
- return target == os.ErrTimeout || target == os.ErrTemporary
-}
-
// fakeLocker is a sync.Locker which does nothing. It's used to guard
// test-only fields when not under test, to avoid runtime atomic
// overhead.
expected := int32(tr.MaxConnsPerHost)
if dialCnt != expected {
- t.Errorf("Too many dials (%s): %d", scheme, dialCnt)
+ t.Errorf("round 1: too many dials (%s): %d != %d", scheme, dialCnt, expected)
}
if gotConnCnt != expected {
- t.Errorf("Too many get connections (%s): %d", scheme, gotConnCnt)
+ t.Errorf("round 1: too many get connections (%s): %d != %d", scheme, gotConnCnt, expected)
}
if ts.TLS != nil && tlsHandshakeCnt != expected {
- t.Errorf("Too many tls handshakes (%s): %d", scheme, tlsHandshakeCnt)
+ t.Errorf("round 1: too many tls handshakes (%s): %d != %d", scheme, tlsHandshakeCnt, expected)
+ }
+
+ if t.Failed() {
+ t.FailNow()
}
(<-connCh).Close()
doReq()
expected++
if dialCnt != expected {
- t.Errorf("Too many dials (%s): %d", scheme, dialCnt)
+ t.Errorf("round 2: too many dials (%s): %d", scheme, dialCnt)
}
if gotConnCnt != expected {
- t.Errorf("Too many get connections (%s): %d", scheme, gotConnCnt)
+ t.Errorf("round 2: too many get connections (%s): %d != %d", scheme, gotConnCnt, expected)
}
if ts.TLS != nil && tlsHandshakeCnt != expected {
- t.Errorf("Too many tls handshakes (%s): %d", scheme, tlsHandshakeCnt)
+ t.Errorf("round 2: too many tls handshakes (%s): %d != %d", scheme, tlsHandshakeCnt, expected)
}
}
}
}
+// A countedConn is a net.Conn that decrements an atomic counter when finalized.
+type countedConn struct {
+ net.Conn
+}
+
+// A countingDialer dials connections and counts the number that remain reachable.
+type countingDialer struct {
+ dialer net.Dialer
+ mu sync.Mutex
+ total, live int64
+}
+
+func (d *countingDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+ conn, err := d.dialer.DialContext(ctx, network, address)
+ if err != nil {
+ return nil, err
+ }
+
+ counted := new(countedConn)
+ counted.Conn = conn
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ d.total++
+ d.live++
+
+ runtime.SetFinalizer(counted, d.decrement)
+ return counted, nil
+}
+
+func (d *countingDialer) decrement(*countedConn) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ d.live--
+}
+
+func (d *countingDialer) Read() (total, live int64) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ return d.total, d.live
+}
+
+func TestTransportPersistConnLeakNeverIdle(t *testing.T) {
+ defer afterTest(t)
+
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ // Close every connection so that it cannot be kept alive.
+ conn, _, err := w.(Hijacker).Hijack()
+ if err != nil {
+ t.Errorf("Hijack failed unexpectedly: %v", err)
+ return
+ }
+ conn.Close()
+ }))
+ defer ts.Close()
+
+ var d countingDialer
+ c := ts.Client()
+ c.Transport.(*Transport).DialContext = d.DialContext
+
+ body := []byte("Hello")
+ for i := 0; ; i++ {
+ total, live := d.Read()
+ if live < total {
+ break
+ }
+ if i >= 1<<12 {
+ t.Fatalf("Count of live client net.Conns (%d) not lower than total (%d) after %d Do / GC iterations.", live, total, i)
+ }
+
+ req, err := NewRequest("POST", ts.URL, bytes.NewReader(body))
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = c.Do(req)
+ if err == nil {
+ t.Fatal("expected broken connection")
+ }
+
+ runtime.GC()
+ }
+}
+
+type countedContext struct {
+ context.Context
+}
+
+type contextCounter struct {
+ mu sync.Mutex
+ live int64
+}
+
+func (cc *contextCounter) Track(ctx context.Context) context.Context {
+ counted := new(countedContext)
+ counted.Context = ctx
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.live++
+ runtime.SetFinalizer(counted, cc.decrement)
+ return counted
+}
+
+func (cc *contextCounter) decrement(*countedContext) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.live--
+}
+
+func (cc *contextCounter) Read() (live int64) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.live
+}
+
+func TestTransportPersistConnContextLeakMaxConnsPerHost(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("fails with conservative stack GC")
+ }
+
+ defer afterTest(t)
+
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ runtime.Gosched()
+ w.WriteHeader(StatusOK)
+ }))
+ defer ts.Close()
+
+ c := ts.Client()
+ c.Transport.(*Transport).MaxConnsPerHost = 1
+
+ ctx := context.Background()
+ body := []byte("Hello")
+ doPosts := func(cc *contextCounter) {
+ var wg sync.WaitGroup
+ for n := 64; n > 0; n-- {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ ctx := cc.Track(ctx)
+ req, err := NewRequest("POST", ts.URL, bytes.NewReader(body))
+ if err != nil {
+ t.Error(err)
+ }
+
+ _, err = c.Do(req.WithContext(ctx))
+ if err != nil {
+ t.Errorf("Do failed with error: %v", err)
+ }
+ }()
+ }
+ wg.Wait()
+ }
+
+ var initialCC contextCounter
+ doPosts(&initialCC)
+
+ // flushCC exists only to put pressure on the GC to finalize the initialCC
+ // contexts: the flushCC allocations should eventually displace the initialCC
+ // allocations.
+ var flushCC contextCounter
+ for i := 0; ; i++ {
+ live := initialCC.Read()
+ if live == 0 {
+ break
+ }
+ if i >= 100 {
+ t.Fatalf("%d Contexts still not finalized after %d GC cycles.", live, i)
+ }
+ doPosts(&flushCC)
+ runtime.GC()
+ }
+}
+
// This used to crash; https://golang.org/issue/3266
func TestTransportIdleConnCrash(t *testing.T) {
defer afterTest(t)
<-didRead
}
- if got := tr.IdleConnChMapSizeForTesting(); got != 0 {
- t.Fatalf("ForDisableKeepAlives = %v, map size = %d; want 0", disableKeep, got)
+ if got := tr.IdleConnWaitMapSizeForTesting(); got != 0 {
+ t.Fatalf("for DisableKeepAlives = %v, map size = %d; want 0", disableKeep, got)
}
}
}
}
wantIdle("after second put", 0)
- tr.RequestIdleConnChForTesting() // should toggle the transport out of idle mode
+ tr.QueueForIdleConnForTesting() // should toggle the transport out of idle mode
if tr.IsIdleForTesting() {
- t.Error("shouldn't be idle after RequestIdleConnChForTesting")
+ t.Error("shouldn't be idle after QueueForIdleConnForTesting")
}
if !tr.PutIdleTestConn("http", "example.com") {
t.Fatal("after re-activation")
ln := newLocalListener(t)
defer ln.Close()
- handledPendingDial := make(chan bool, 1)
- SetPendingDialHooks(nil, func() { handledPendingDial <- true })
+ var wg sync.WaitGroup
+ SetPendingDialHooks(func() { wg.Add(1) }, wg.Done)
defer SetPendingDialHooks(nil, nil)
testDone := make(chan struct{})
doReturned <- true
<-madeRoundTripper
- <-handledPendingDial
+ wg.Wait()
}
func TestTransportReuseConnection_Gzip_Chunked(t *testing.T) {
t.Fatalf("got %#v; want *url.Error", err)
}
got := ue.Err.Error()
- want := `invalid URL port "123foo"`
+ want := `invalid port ":123foo" after host`
if got != want {
t.Errorf("got error %q; want %q", got, want)
}
// Network returns the address's network name, "ip+net".
func (n *IPNet) Network() string { return "ip+net" }
-// String returns the CIDR notation of n like "192.0.2.1/24"
+// String returns the CIDR notation of n like "192.0.2.0/24"
// or "2001:db8::/48" as defined in RFC 4632 and RFC 4291.
// If the mask is not in the canonical form, it returns the
// string which consists of an IP address, followed by a slash
// character and a mask expressed as hexadecimal form with no
-// punctuation like "198.51.100.1/c000ff00".
+// punctuation like "198.51.100.0/c000ff00".
func (n *IPNet) String() string {
nn, m := networkNumberAndMask(n)
if nn == nil || m == nil {
Address string // user@domain
}
-// Parses a single RFC 5322 address, e.g. "Barry Gibbs <bg@example.com>"
+// ParseAddress parses a single RFC 5322 address, e.g. "Barry Gibbs <bg@example.com>"
func ParseAddress(address string) (*Address, error) {
return (&addrParser{s: address}).parseSingleAddress()
}
// the deadline after successful Read or Write calls.
//
// A zero value for t means I/O operations will not time out.
+ //
+ // Note that if a TCP connection has keep-alive turned on,
+ // which is the default unless overridden by Dialer.KeepAlive
+ // or ListenConfig.KeepAlive, then a keep-alive failure may
+ // also return a timeout error. On Unix systems a keep-alive
+ // failure on I/O can be detected using
+ // errors.Is(err, syscall.ETIMEDOUT).
SetDeadline(t time.Time) error
// SetReadDeadline sets the deadline for future Read calls
return ok && t.Temporary()
}
-func (e *OpError) Is(target error) bool {
- switch target {
- case os.ErrTemporary:
- return e.Temporary()
- case os.ErrTimeout:
- return e.Timeout()
- }
- return false
-}
-
// A ParseError is the error type of literal network address parsers.
type ParseError struct {
// Type is the type of string that was expected, such as
// error and return a DNSError for which Temporary returns false.
func (e *DNSError) Temporary() bool { return e.IsTimeout || e.IsTemporary }
-func (e *DNSError) Is(target error) bool {
- switch target {
- case os.ErrTemporary:
- return e.Temporary()
- case os.ErrTimeout:
- return e.Timeout()
- }
- return false
-}
-
type writerOnly struct {
io.Writer
}
if runtime.GOOS == "freebsd" {
testenv.SkipFlaky(t, 25289)
}
+ if runtime.GOOS == "aix" {
+ testenv.SkipFlaky(t, 29685)
+ }
t.Parallel()
server := func(cs *TCPConn) error {
cs.SetLinger(0)
import (
"io"
- "os"
"sync"
"time"
)
func (timeoutError) Timeout() bool { return true }
func (timeoutError) Temporary() bool { return true }
-func (timeoutError) Is(target error) bool {
- return target == os.ErrTemporary || target == os.ErrTimeout
-}
-
type pipeAddr struct{}
func (pipeAddr) Network() string { return "pipe" }
package net
import (
- "errors"
"fmt"
- "internal/oserror"
"internal/poll"
"internal/testenv"
"io"
if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
t.Fatalf("#%d: %v", i, err)
}
- if !errors.Is(err, oserror.ErrTimeout) {
- t.Fatalf("#%d: Dial error is not os.ErrTimeout: %v", i, err)
- }
}
}
}
d time.Duration
}
- ch := make(chan error, 1)
- pasvch := make(chan result)
handler := func(ls *localServer, ln Listener) {
for {
c, err := ln.Accept()
if err != nil {
- ch <- err
- return
+ break
}
- // The server, with no timeouts of its own,
- // sending bytes to clients as fast as it can.
- go func() {
- t0 := time.Now()
- n, err := io.Copy(c, neverEnding('a'))
- dt := time.Since(t0)
- c.Close()
- pasvch <- result{n, err, dt}
- }()
+ c.Read(make([]byte, 1)) // wait for client to close connection
+ c.Close()
}
}
ls, err := newLocalServer("tcp")
}
}
for run := 0; run < numRuns; run++ {
- name := fmt.Sprintf("%v run %d/%d", timeout, run+1, numRuns)
+ name := fmt.Sprintf("%v %d/%d", timeout, run, numRuns)
t.Log(name)
+ tooSlow := time.NewTimer(5 * time.Second)
+ defer tooSlow.Stop()
+
c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
- tooLong := 5 * time.Second
- max := time.NewTimer(tooLong)
- defer max.Stop()
- actvch := make(chan result)
+ ch := make(chan result, 1)
go func() {
t0 := time.Now()
if err := c.SetDeadline(t0.Add(timeout)); err != nil {
n, err := io.Copy(ioutil.Discard, c)
dt := time.Since(t0)
c.Close()
- actvch <- result{n, err, dt}
+ ch <- result{n, err, dt}
}()
select {
- case res := <-actvch:
+ case res := <-ch:
if nerr, ok := res.err.(Error); ok && nerr.Timeout() {
- t.Logf("for %v, good client timeout after %v, reading %d bytes", name, res.d, res.n)
+ t.Logf("%v: good timeout after %v; %d bytes", name, res.d, res.n)
} else {
- t.Fatalf("for %v, client Copy = %d, %v; want timeout", name, res.n, res.err)
+ t.Fatalf("%v: Copy = %d, %v; want timeout", name, res.n, res.err)
}
- case <-max.C:
- t.Fatalf("for %v, timeout (%v) waiting for client to timeout (%v) reading", name, tooLong, timeout)
- }
-
- select {
- case res := <-pasvch:
- t.Logf("for %v, server in %v wrote %d: %v", name, res.d, res.n, res.err)
- case err := <-ch:
- t.Fatalf("for %v, Accept = %v", name, err)
- case <-max.C:
- t.Fatalf("for %v, timeout waiting for server to finish writing", name)
+ case <-tooSlow.C:
+ t.Fatalf("%v: client stuck in Dial+Copy", name)
}
}
}
import (
"errors"
"fmt"
- "internal/oserror"
"sort"
"strconv"
"strings"
Err error
}
-func (e *Error) Unwrap() error { return e.Err }
-func (e *Error) Error() string { return e.Op + " " + e.URL + ": " + e.Err.Error() }
-func (e *Error) Timeout() bool { return oserror.IsTimeout(e.Err) }
-func (e *Error) Temporary() bool { return oserror.IsTemporary(e.Err) }
+func (e *Error) Unwrap() error { return e.Err }
+func (e *Error) Error() string { return e.Op + " " + e.URL + ": " + e.Err.Error() }
+
+func (e *Error) Timeout() bool {
+ t, ok := e.Err.(interface {
+ Timeout() bool
+ })
+ return ok && t.Timeout()
+}
+
+func (e *Error) Temporary() bool {
+ t, ok := e.Err.(interface {
+ Temporary() bool
+ })
+ return ok && t.Temporary()
+}
func ishex(c byte) bool {
switch {
}
return host1 + host2 + host3, nil
}
+ } else if i := strings.LastIndex(host, ":"); i != -1 {
+ colonPort := host[i:]
+ if !validOptionalPort(colonPort) {
+ return "", fmt.Errorf("invalid port %q after host", colonPort)
+ }
}
var err error
return result
}
-// Hostname returns u.Host, without any port number.
+// Hostname returns u.Host, stripping any valid port number if present.
//
-// If Host is an IPv6 literal with a port number, Hostname returns the
-// IPv6 literal without the square brackets. IPv6 literals may include
-// a zone identifier.
+// If the result is enclosed in square brackets, as literal IPv6 addresses are,
+// the square brackets are removed from the result.
func (u *URL) Hostname() string {
- return stripPort(u.Host)
+ host, _ := splitHostPort(u.Host)
+ return host
}
// Port returns the port part of u.Host, without the leading colon.
-// If u.Host doesn't contain a port, Port returns an empty string.
+//
+// If u.Host doesn't contain a valid numeric port, Port returns an empty string.
func (u *URL) Port() string {
- return portOnly(u.Host)
+ _, port := splitHostPort(u.Host)
+ return port
}
-func stripPort(hostport string) string {
- colon := strings.IndexByte(hostport, ':')
- if colon == -1 {
- return hostport
- }
- if i := strings.IndexByte(hostport, ']'); i != -1 {
- return strings.TrimPrefix(hostport[:i], "[")
- }
- return hostport[:colon]
-}
+// splitHostPort separates host and port. If the port is not valid, it returns
+// the entire input as host, and it doesn't check the validity of the host.
+// Unlike net.SplitHostPort, but per RFC 3986, it requires ports to be numeric.
+func splitHostPort(hostport string) (host, port string) {
+ host = hostport
-func portOnly(hostport string) string {
- colon := strings.IndexByte(hostport, ':')
- if colon == -1 {
- return ""
+ colon := strings.LastIndexByte(host, ':')
+ if colon != -1 && validOptionalPort(host[colon:]) {
+ host, port = host[:colon], host[colon+1:]
}
- if i := strings.Index(hostport, "]:"); i != -1 {
- return hostport[i+len("]:"):]
- }
- if strings.Contains(hostport, "]") {
- return ""
+
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ host = host[1 : len(host)-1]
}
- return hostport[colon+len(":"):]
+
+ return
}
// Marshaling interface implementations.
},
// worst case host, still round trips
{
- "scheme://!$&'()*+,;=hello!:port/path",
+ "scheme://!$&'()*+,;=hello!:1/path",
&URL{
Scheme: "scheme",
- Host: "!$&'()*+,;=hello!:port",
+ Host: "!$&'()*+,;=hello!:1",
Path: "/path",
},
"",
{"http://[::1]", false},
{"http://[::1]:80", false},
{"http://[::1]:namedport", true}, // rfc3986 3.2.3
+ {"http://x:namedport", true}, // rfc3986 3.2.3
{"http://[::1]/", false},
{"http://[::1]a", true},
{"http://[::1]%23", true},
{"http://[::1%25en0]", false}, // valid zone id
{"http://[::1]:", false}, // colon, but no port OK
+ {"http://x:", false}, // colon, but no port OK
{"http://[::1]:%38%30", true}, // not allowed: % encoding only for non-ASCII
{"http://[::1%25%41]", false}, // RFC 6874 allows over-escaping in zone
{"http://[%10::1]", true}, // no %xx escapes in IP address
}
}
-func TestURLHostname(t *testing.T) {
+func TestURLHostnameAndPort(t *testing.T) {
tests := []struct {
- host string // URL.Host field
- want string
+ in string // URL.Host field
+ host string
+ port string
}{
- {"foo.com:80", "foo.com"},
- {"foo.com", "foo.com"},
- {"FOO.COM", "FOO.COM"}, // no canonicalization (yet?)
- {"1.2.3.4", "1.2.3.4"},
- {"1.2.3.4:80", "1.2.3.4"},
- {"[1:2:3:4]", "1:2:3:4"},
- {"[1:2:3:4]:80", "1:2:3:4"},
- {"[::1]:80", "::1"},
- {"[::1]", "::1"},
- {"localhost", "localhost"},
- {"localhost:443", "localhost"},
- {"some.super.long.domain.example.org:8080", "some.super.long.domain.example.org"},
- {"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:17000", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"},
- {"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"},
+ {"foo.com:80", "foo.com", "80"},
+ {"foo.com", "foo.com", ""},
+ {"foo.com:", "foo.com", ""},
+ {"FOO.COM", "FOO.COM", ""}, // no canonicalization
+ {"1.2.3.4", "1.2.3.4", ""},
+ {"1.2.3.4:80", "1.2.3.4", "80"},
+ {"[1:2:3:4]", "1:2:3:4", ""},
+ {"[1:2:3:4]:80", "1:2:3:4", "80"},
+ {"[::1]:80", "::1", "80"},
+ {"[::1]", "::1", ""},
+ {"[::1]:", "::1", ""},
+ {"localhost", "localhost", ""},
+ {"localhost:443", "localhost", "443"},
+ {"some.super.long.domain.example.org:8080", "some.super.long.domain.example.org", "8080"},
+ {"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:17000", "2001:0db8:85a3:0000:0000:8a2e:0370:7334", "17000"},
+ {"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]", "2001:0db8:85a3:0000:0000:8a2e:0370:7334", ""},
+
+ // Ensure that even when not valid, Host is one of "Hostname",
+ // "Hostname:Port", "[Hostname]" or "[Hostname]:Port".
+ // See https://golang.org/issue/29098.
+ {"[google.com]:80", "google.com", "80"},
+ {"google.com]:80", "google.com]", "80"},
+ {"google.com:80_invalid_port", "google.com:80_invalid_port", ""},
+ {"[::1]extra]:80", "::1]extra", "80"},
+ {"google.com]extra:extra", "google.com]extra:extra", ""},
}
for _, tt := range tests {
- u := &URL{Host: tt.host}
- got := u.Hostname()
- if got != tt.want {
- t.Errorf("Hostname for Host %q = %q; want %q", tt.host, got, tt.want)
+ u := &URL{Host: tt.in}
+ host, port := u.Hostname(), u.Port()
+ if host != tt.host {
+ t.Errorf("Hostname for Host %q = %q; want %q", tt.in, host, tt.host)
}
- }
-}
-
-func TestURLPort(t *testing.T) {
- tests := []struct {
- host string // URL.Host field
- want string
- }{
- {"foo.com", ""},
- {"foo.com:80", "80"},
- {"1.2.3.4", ""},
- {"1.2.3.4:80", "80"},
- {"[1:2:3:4]", ""},
- {"[1:2:3:4]:80", "80"},
- }
- for _, tt := range tests {
- u := &URL{Host: tt.host}
- got := u.Port()
- if got != tt.want {
- t.Errorf("Port for Host %q = %q; want %q", tt.host, got, tt.want)
+ if port != tt.port {
+ t.Errorf("Port for Host %q = %q; want %q", tt.in, port, tt.port)
}
}
}
return f.readdir(n)
}
-// Readdirnames reads and returns a slice of names from the directory f.
+// Readdirnames reads the contents of the directory associated with file
+// and returns a slice of up to n names of files in the directory,
+// in directory order. Subsequent calls on the same file will yield
+// further names.
//
// If n > 0, Readdirnames returns at most n names. In this case, if
// Readdirnames returns an empty slice, it will return a non-nil error
ErrExist = errExist() // "file already exists"
ErrNotExist = errNotExist() // "file does not exist"
ErrClosed = errClosed() // "file already closed"
- ErrTimeout = errTimeout() // "deadline exceeded"
- ErrTemporary = errTemporary() // "temporary error"
ErrNoDeadline = errNoDeadline() // "file type does not support deadline"
)
func errExist() error { return oserror.ErrExist }
func errNotExist() error { return oserror.ErrNotExist }
func errClosed() error { return oserror.ErrClosed }
-func errTimeout() error { return oserror.ErrTimeout }
-func errTemporary() error { return oserror.ErrTemporary }
func errNoDeadline() error { return poll.ErrNoDeadline }
type timeout interface {
if err == target {
return true
}
- e, ok := err.(interface{ Is(error) bool })
+ // To preserve prior behavior, only examine syscall errors.
+ e, ok := err.(syscallErrorType)
return ok && e.Is(target)
}
--- /dev/null
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package os
+
+import "syscall"
+
+type syscallErrorType = syscall.Errno
--- /dev/null
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package os
+
+import "syscall"
+
+type syscallErrorType = syscall.ErrorString
t.Error("errors.Is failed, wanted success")
}
}
+
+type myErrorIs struct{ error }
+
+func (e myErrorIs) Is(target error) bool { return target == e.error }
+
+func TestErrorIsMethods(t *testing.T) {
+ if os.IsPermission(myErrorIs{os.ErrPermission}) {
+ t.Error("os.IsPermission(err) = true when err.Is(os.ErrPermission), wanted false")
+ }
+}
// }
// fmt.Printf("read %d bytes: %q\n", count, data[:count])
//
+// Note: The maximum number of concurrent operations on a File may be limited by
+// the OS or the system. The number should be high, but exceeding it may degrade
+// performance or cause other issues.
+//
package os
import (
t.Skip("skipping on Plan 9; does not support runtime poller")
case "js":
t.Skip("skipping on js; no support for os.Pipe")
+ case "darwin":
+ t.Skip("skipping on Darwin; issue 33953")
}
threads := 100
var r int
for {
var e error
- r, e = unix.Openat(dirfd, name, O_RDONLY, 0)
+ r, e = unix.Openat(dirfd, name, O_RDONLY|syscall.O_CLOEXEC, 0)
if e == nil {
break
}
Ctty: int(slave.Fd()),
}
- // Test ctty management by sending enough child fd to overlap the
- // parent's fd intended for child's ctty.
- for 2+len(cmd.ExtraFiles) < cmd.SysProcAttr.Ctty {
- dummy, err := os.Open(os.DevNull)
- if err != nil {
- t.Fatal(err)
- }
- defer dummy.Close()
- cmd.ExtraFiles = append(cmd.ExtraFiles, dummy)
- }
-
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
func TestJoin(t *testing.T) {
for _, test := range jointests {
if p := Join(test.elem...); p != test.path {
- t.Errorf("join(%q) = %q, want %q", test.elem, p, test.path)
+ t.Errorf("Join(%q) = %q, want %q", test.elem, p, test.path)
}
}
}
return tot
}
+// This will be index 5.
+func (p *Point) Int64Method(x int64) int64 {
+ return x
+}
+
+// This will be index 6.
+func (p *Point) Int32Method(x int32) int32 {
+ return x
+}
+
func TestMethod(t *testing.T) {
// Non-curried method of type.
p := Point{3, 4}
if i != 425 {
t.Errorf("Interface MethodByName returned %d; want 425", i)
}
+
+ // For issue #33628: method args are not stored at the right offset
+ // on amd64p32.
+ m64 := ValueOf(&p).MethodByName("Int64Method").Interface().(func(int64) int64)
+ if x := m64(123); x != 123 {
+ t.Errorf("Int64Method returned %d; want 123", x)
+ }
+ m32 := ValueOf(&p).MethodByName("Int32Method").Interface().(func(int32) int32)
+ if x := m32(456); x != 456 {
+ t.Errorf("Int32Method returned %d; want 456", x)
+ }
}
func TestVariadicMethodValue(t *testing.T) {
// 300 words per second.
// Hopefully a normal Go thread will get the profiling
// signal at least once every few seconds.
- extra [1000]uintptr
- numExtra int
- lostExtra uint64 // count of frames lost because extra is full
+ extra [1000]uintptr
+ numExtra int
+ lostExtra uint64 // count of frames lost because extra is full
+ lostAtomic uint64 // count of frames lost because of being in atomic64 on mips/arm; updated racily
}
var cpuprof cpuProfile
}
if prof.hz != 0 { // implies cpuprof.log != nil
- if p.numExtra > 0 || p.lostExtra > 0 {
+ if p.numExtra > 0 || p.lostExtra > 0 || p.lostAtomic > 0 {
p.addExtra()
}
hdr := [1]uint64{1}
_LostExternalCodePC + sys.PCQuantum,
_ExternalCodePC + sys.PCQuantum,
}
- cpuprof.log.write(nil, 0, hdr[:], lostStk[:])
+ p.log.write(nil, 0, hdr[:], lostStk[:])
p.lostExtra = 0
}
-}
-func (p *cpuProfile) addLostAtomic64(count uint64) {
- hdr := [1]uint64{count}
- lostStk := [2]uintptr{
- _LostSIGPROFDuringAtomic64PC + sys.PCQuantum,
- _SystemPC + sys.PCQuantum,
+ if p.lostAtomic > 0 {
+ hdr := [1]uint64{p.lostAtomic}
+ lostStk := [2]uintptr{
+ _LostSIGPROFDuringAtomic64PC + sys.PCQuantum,
+ _SystemPC + sys.PCQuantum,
+ }
+ p.log.write(nil, 0, hdr[:], lostStk[:])
+ p.lostAtomic = 0
}
- cpuprof.log.write(nil, 0, hdr[:], lostStk[:])
+
}
// CPUProfile panics.
t.mTreap.treap.walkTreap(checkTreapNode)
t.mTreap.treap.validateInvariants()
}
+
+func RunGetgThreadSwitchTest() {
+ // Test that getg works correctly with thread switch.
+ // With gccgo, if we generate getg inlined, the backend
+ // may cache the address of the TLS variable, which
+ // will become invalid after a thread switch. This test
+ // checks that the bad caching doesn't happen.
+
+ ch := make(chan int)
+ go func(ch chan int) {
+ ch <- 5
+ LockOSThread()
+ }(ch)
+
+ g1 := getg()
+
+ // Block on a receive. This is likely to get us a thread
+ // switch. If we yield to the sender goroutine, it will
+ // lock the thread, forcing us to resume on a different
+ // thread.
+ <-ch
+
+ g2 := getg()
+ if g1 != g2 {
+ panic("g1 != g2")
+ }
+
+ // Also test getg after some control flow, as the
+ // backend is sensitive to control flow.
+ g3 := getg()
+ if g1 != g3 {
+ panic("g1 != g3")
+ }
+}
var physPageSize uintptr
// physHugePageSize is the size in bytes of the OS's default physical huge
-// page size whose allocation is opaque to the application.
+// page size whose allocation is opaque to the application. It is assumed
+// and verified to be a power of two.
//
// If set, this must be set by the OS init code (typically in osinit) before
// mallocinit. However, setting it at all is optional, and leaving the default
// value is always safe (though potentially less efficient).
-var physHugePageSize uintptr
+//
+// Since physHugePageSize is always assumed to be a power of two,
+// physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
+// The purpose of physHugePageShift is to avoid doing divisions in
+// performance critical functions.
+var (
+ physHugePageSize uintptr
+ physHugePageShift uint
+)
// OS memory management abstraction layer
//
print("system page size (", physPageSize, ") must be a power of 2\n")
throw("bad system page size")
}
+ if physHugePageSize&(physHugePageSize-1) != 0 {
+ print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
+ throw("bad system huge page size")
+ }
+ if physHugePageSize != 0 {
+ // Since physHugePageSize is a power of 2, it suffices to increase
+ // physHugePageShift until 1<<physHugePageShift == physHugePageSize.
+ for 1<<physHugePageShift != physHugePageSize {
+ physHugePageShift++
+ }
+ }
// Initialize the heap.
mheap_.init()
if debug.sbrk != 0 {
align := uintptr(16)
if typ != nil {
- align = uintptr(typ.align)
+ // TODO(austin): This should be just
+ // align = uintptr(typ.align)
+ // but that's only 4 on 32-bit platforms,
+ // even if there's a uint64 field in typ (see #599).
+ // This causes 64-bit atomic accesses to panic.
+ // Hence, we use stricter alignment that matches
+ // the normal allocator better.
+ if size&7 == 0 {
+ align = 8
+ } else if size&3 == 0 {
+ align = 4
+ } else if size&1 == 0 {
+ align = 2
+ } else {
+ align = 1
+ }
}
return persistentalloc(size, align, &memstats.other_sys)
}
}
if rate := MemProfileRate; rate > 0 {
- if rate != 1 && int32(size) < c.next_sample {
- c.next_sample -= int32(size)
+ if rate != 1 && size < c.next_sample {
+ c.next_sample -= size
} else {
mp := acquirem()
profilealloc(mp, x, size)
// processes, the distance between two samples follows the exponential
// distribution (exp(MemProfileRate)), so the best return value is a random
// number taken from an exponential distribution whose mean is MemProfileRate.
-func nextSample() int32 {
+func nextSample() uintptr {
if GOOS == "plan9" {
// Plan 9 doesn't support floating point in note handler.
if g := getg(); g == g.m.gsignal {
}
}
- return fastexprand(MemProfileRate)
+ return uintptr(fastexprand(MemProfileRate))
}
// fastexprand returns a random number from an exponential distribution with
// nextSampleNoFP is similar to nextSample, but uses older,
// simpler code to avoid floating point.
-func nextSampleNoFP() int32 {
+func nextSampleNoFP() uintptr {
// Set first allocation sample size.
rate := MemProfileRate
if rate > 0x3fffffff { // make 2*rate not overflow
rate = 0x3fffffff
}
if rate != 0 {
- return int32(fastrand() % uint32(2*rate))
+ return uintptr(fastrand() % uint32(2*rate))
}
return 0
}
type mcache struct {
// The following members are accessed on every malloc,
// so they are grouped here for better caching.
- next_sample int32 // trigger heap sample after allocating this many bytes
+ next_sample uintptr // trigger heap sample after allocating this many bytes
local_scan uintptr // bytes of scannable heap allocated
// Allocator cache for tiny objects w/o pointers.
if physHugePageSize != 0 {
// Start by computing the amount of free memory we have in huge pages
// in total. Trivially, this is all the huge page work we need to do.
- hugeWork := uint64(mheap_.free.unscavHugePages * physHugePageSize)
+ hugeWork := uint64(mheap_.free.unscavHugePages) << physHugePageShift
// ...but it could turn out that there's more huge work to do than
// total work, so cap it at total work. This might happen for very large
// that there are free chunks of memory larger than a huge page that we don't want
// to scavenge.
if hugeWork >= totalWork {
- hugePages := totalWork / uint64(physHugePageSize)
- hugeWork = hugePages * uint64(physHugePageSize)
+ hugePages := totalWork >> physHugePageShift
+ hugeWork = hugePages << physHugePageShift
}
// Everything that's not huge work is regular work. At this point we
// know huge work so we can calculate how much time that will take
// based on scavengePageRate (which applies to pages of any size).
regularWork = totalWork - hugeWork
- hugeTime = hugeWork / uint64(physHugePageSize) * scavengeHugePagePeriod
+ hugeTime = (hugeWork >> physHugePageShift) * scavengeHugePagePeriod
}
// Finally, we can compute how much time it'll take to do the regular work
// and the total time to do all the work.
h.free.insert(other)
}
- hpBefore := s.hugePages()
+ hpMiddle := s.hugePages()
// Coalesce with earlier, later spans.
+ var hpBefore uintptr
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
if s.scavenged == before.scavenged {
+ hpBefore = before.hugePages()
merge(before, s, before)
} else {
realign(before, s, before)
}
// Now check to see if next (greater addresses) span is free and can be coalesced.
+ var hpAfter uintptr
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
if s.scavenged == after.scavenged {
+ hpAfter = after.hugePages()
merge(s, after, after)
} else {
realign(s, after, after)
}
}
-
- if !s.scavenged && s.hugePages() > hpBefore {
+ if !s.scavenged && s.hugePages() > hpBefore+hpMiddle+hpAfter {
// If s has grown such that it now may contain more huge pages than it
- // did before, then mark the whole region as huge-page-backable.
+ // and its now-coalesced neighbors did before, then mark the whole region
+ // as huge-page-backable.
//
// Otherwise, on systems where we break up huge pages (like Linux)
// s may not be backed by huge pages because it could be made up of
// pieces which are broken up in the underlying VMA. The primary issue
// with this is that it can lead to a poor estimate of the amount of
// free memory backed by huge pages for determining the scavenging rate.
+ //
+ // TODO(mknyszek): Measure the performance characteristics of sysHugePage
+ // and determine whether it makes sense to only sysHugePage on the pages
+ // that matter, or if it's better to just mark the whole region.
sysHugePage(unsafe.Pointer(s.base()), s.npages*pageSize)
}
}
end &^= physHugePageSize - 1
}
if start < end {
- return (end - start) / physHugePageSize
+ return (end - start) >> physHugePageShift
}
return 0
}
}
// Same as above, but calling from the runtime is allowed.
+//
+// Using this function is necessary for any panic that may be
+// generated by runtime.sigpanic, since those are always called by the
+// runtime.
func panicCheck2(err string) {
+ // panic allocates, so to avoid recursive malloc, turn panics
+ // during malloc into throws.
gp := getg()
if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
throw(err)
}
}
+// Many of the following panic entry-points turn into throws when they
+// happen in various runtime contexts. These should never happen in
+// the runtime, and if they do, they indicate a serious issue and
+// should not be caught by user code.
+//
// The panic{Index,Slice,divide,shift} functions are called by
// code generated by the compiler for out of bounds index expressions,
// out of bounds slice expressions, division by zero, and shift by negative.
func runtime_getProfLabel() unsafe.Pointer
// SetGoroutineLabels sets the current goroutine's labels to match ctx.
+// A new goroutine inherits the labels of the goroutine that created it.
// This is a lower-level API than Do, which should be used instead when possible.
func SetGoroutineLabels(ctx context.Context) {
ctxLabels, _ := ctx.Value(labelContextKey{}).(*labelMap)
// Do calls f with a copy of the parent context with the
// given labels added to the parent's label map.
+// Goroutines spawned while executing f will inherit the augmented label-set.
// Each key/value pair in labels is inserted into the label map in the
// order provided, overriding any previous value for the same key.
// The augmented label map will be set for the duration of the call to f
func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
func _VDSO() { _VDSO() }
-// Counts SIGPROFs received while in atomic64 critical section, on mips{,le}
-var lostAtomic64Count uint64
-
var _SystemPC = funcPC(_System)
var _ExternalCodePC = funcPC(_ExternalCode)
var _LostExternalCodePC = funcPC(_LostExternalCode)
}
if prof.hz != 0 {
- if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm") && lostAtomic64Count > 0 {
- cpuprof.addLostAtomic64(lostAtomic64Count)
- lostAtomic64Count = 0
- }
cpuprof.add(gp, stk[:n])
}
getg().m.mallocing--
})
}
}
+
+func TestGetgThreadSwitch(t *testing.T) {
+ runtime.RunGetgThreadSwitchTest()
+}
break Send
case sigReceiving:
if atomic.Cas(&sig.state, sigReceiving, sigIdle) {
+ if GOOS == "darwin" {
+ sigNoteWakeup(&sig.note)
+ break Send
+ }
notewakeup(&sig.note)
break Send
}
throw("signal_recv: inconsistent state")
case sigIdle:
if atomic.Cas(&sig.state, sigIdle, sigReceiving) {
+ if GOOS == "darwin" {
+ sigNoteSleep(&sig.note)
+ break Receive
+ }
notetsleepg(&sig.note, -1)
noteclear(&sig.note)
break Receive
// to use for initialization. It does not pass
// signal information in m.
sig.inuse = true // enable reception of signals; cannot disable
+ if GOOS == "darwin" {
+ sigNoteSetup(&sig.note)
+ return
+ }
noteclear(&sig.note)
return
}
--- /dev/null
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The current implementation of notes on Darwin is not async-signal-safe,
+// so on Darwin the sigqueue code uses different functions to wake up the
+// signal_recv thread. This file holds the non-Darwin implementations of
+// those functions. These functions will never be called.
+
+// +build !darwin
+// +build !plan9
+
+package runtime
+
+func sigNoteSetup(*note) {
+ throw("sigNoteSetup")
+}
+
+func sigNoteSleep(*note) {
+ throw("sigNoteSleep")
+}
+
+func sigNoteWakeup(*note) {
+ throw("sigNoteWakeup")
+}
// ParseFloat returns the nearest floating-point number rounded
// using IEEE754 unbiased rounding.
// (Parsing a hexadecimal floating-point value only rounds when
-// there are more bits in the hexadecimal representatiton than
+// there are more bits in the hexadecimal representation than
// will fit in the mantissa.)
//
// The errors that ParseFloat returns have concrete type *NumError
//
// If base == 0, the base is implied by the string's prefix:
// base 2 for "0b", base 8 for "0" or "0o", base 16 for "0x",
-// and base 10 otherwise.
+// and base 10 otherwise. Also, for base == 0 only, underscore
+// characters are permitted per the Go integer literal syntax.
// If base is below 0, is 1, or is above 36, an error is returned.
//
// The bitSize argument specifies the integer type
}
func ExampleTitle() {
+ // Compare this example to the ToTitle example.
fmt.Println(strings.Title("her royal highness"))
- // Output: Her Royal Highness
+ fmt.Println(strings.Title("loud noises"))
+ fmt.Println(strings.Title("хлеб"))
+ // Output:
+ // Her Royal Highness
+ // Loud Noises
+ // Хлеб
}
func ExampleToTitle() {
+ // Compare this example to the Title example.
+ fmt.Println(strings.ToTitle("her royal highness"))
fmt.Println(strings.ToTitle("loud noises"))
fmt.Println(strings.ToTitle("хлеб"))
// Output:
+ // HER ROYAL HIGHNESS
// LOUD NOISES
// ХЛЕБ
}
// NewReplacer returns a new Replacer from a list of old, new string
// pairs. Replacements are performed in the order they appear in the
-// target string, without overlapping matches.
+// target string, without overlapping matches. The old string
+// comparisons are done in argument order.
//
// NewReplacer panics if given an odd number of arguments.
func NewReplacer(oldnew ...string) *Replacer {
return Map(unicode.ToLower, s)
}
-// ToTitle returns a copy of the string s with all Unicode letters mapped to their title case.
+// ToTitle returns a copy of the string s with all Unicode letters mapped to
+// their Unicode title case.
func ToTitle(s string) string { return Map(unicode.ToTitle, s) }
// ToUpperSpecial returns a copy of the string s with all Unicode letters mapped to their
}
// ToTitleSpecial returns a copy of the string s with all Unicode letters mapped to their
-// title case, giving priority to the special casing rules.
+// Unicode title case, giving priority to the special casing rules.
func ToTitleSpecial(c unicode.SpecialCase, s string) string {
return Map(c.ToTitle, s)
}
}
// Title returns a copy of the string s with all Unicode letters that begin words
-// mapped to their title case.
+// mapped to their Unicode title case.
//
// BUG(rsc): The rule Title uses for word boundaries does not handle Unicode punctuation properly.
func Title(s string) string {
}
func NewPoolDequeue(n int) PoolDequeue {
- return &poolDequeue{
+ d := &poolDequeue{
vals: make([]eface, n),
}
+ // For testing purposes, set the head and tail indexes close
+ // to wrapping around.
+ d.headTail = d.pack(1<<dequeueBits-500, 1<<dequeueBits-500)
+ return d
}
func (d *poolDequeue) PushHead(val interface{}) bool {
// without calling f.
//
func (o *Once) Do(f func()) {
+ // Note: Here is an incorrect implementation of Do:
+ //
+ // if atomic.CompareAndSwapUint32(&o.done, 0, 1) {
+ // f()
+ // }
+ //
+ // Do guarantees that when it returns, f has finished.
+ // This implementation would not implement that guarantee:
+ // given two simultaneous calls, the winner of the cas would
+ // call f, and the second would return immediately, without
+ // waiting for the first's call to f to complete.
+ // This is why the slow path falls back to a mutex, and why
+ // the atomic.StoreUint32 must be delayed until after f returns.
+
if atomic.LoadUint32(&o.done) == 0 {
// Outlined slow-path to allow inlining of the fast-path.
o.doSlow(f)
func testPoolDequeue(t *testing.T, d PoolDequeue) {
const P = 10
- // In long mode, do enough pushes to wrap around the 21-bit
- // indexes.
- N := 1<<21 + 1000
+ var N int = 2e6
if testing.Short() {
N = 1e3
}
have := make([]int32, N)
var stop int32
var wg WaitGroup
+ record := func(val int) {
+ atomic.AddInt32(&have[val], 1)
+ if val == N-1 {
+ atomic.StoreInt32(&stop, 1)
+ }
+ }
// Start P-1 consumers.
for i := 1; i < P; i++ {
val, ok := d.PopTail()
if ok {
fail = 0
- atomic.AddInt32(&have[val.(int)], 1)
- if val.(int) == N-1 {
- atomic.StoreInt32(&stop, 1)
- }
+ record(val.(int))
} else {
// Speed up the test by
// allowing the pusher to run.
val, ok := d.PopHead()
if ok {
nPopHead++
- atomic.AddInt32(&have[val.(int)], 1)
+ record(val.(int))
}
}
}
t.Errorf("expected have[%d] = 1, got %d", i, count)
}
}
- if nPopHead == 0 {
- // In theory it's possible in a valid schedule for
- // popHead to never succeed, but in practice it almost
- // always succeeds, so this is unlikely to flake.
+ // Check that at least some PopHeads succeeded. We skip this
+ // check in short mode because it's common enough that the
+ // queue will stay nearly empty all the time and a PopTail
+ // will happen during the window between every PushHead and
+ // PopHead.
+ if !testing.Short() && nPopHead == 0 {
t.Errorf("popHead never succeeded")
}
}
}
}
- // Detach fd 0 from tty
- if sys.Noctty {
- _, err1 = raw_ioctl(0, TIOCNOTTY, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Set the controlling TTY to Ctty
- if sys.Setctty {
- if TIOCSCTTY == 0 {
- err1 = ENOSYS
- goto childerror
- }
- _, err1 = raw_ioctl(sys.Ctty, TIOCSCTTY, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
// Pass 1: look for fd[i] < i and move those up above len(fd)
// so that pass 2 won't stomp on an fd it needs later.
if pipe < nextfd {
raw_close(i)
}
+ // Detach fd 0 from tty
+ if sys.Noctty {
+ _, err1 = raw_ioctl(0, TIOCNOTTY, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Set the controlling TTY to Ctty
+ if sys.Setctty {
+ if TIOCSCTTY == 0 {
+ err1 = ENOSYS
+ goto childerror
+ }
+ _, err1 = raw_ioctl(sys.Ctty, TIOCSCTTY, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
// Time to exec.
err1 = raw_execve(argv0, &argv[0], &envv[0])
}
}
- // Detach fd 0 from tty
- if sys.Noctty {
- _, _, err1 = rawSyscall(funcPC(libc_ioctl_trampoline), 0, uintptr(TIOCNOTTY), 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Set the controlling TTY to Ctty
- if sys.Setctty {
- _, _, err1 = rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(sys.Ctty), uintptr(TIOCSCTTY), 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
// Pass 1: look for fd[i] < i and move those up above len(fd)
// so that pass 2 won't stomp on an fd it needs later.
if pipe < nextfd {
rawSyscall(funcPC(libc_close_trampoline), uintptr(i), 0, 0)
}
+ // Detach fd 0 from tty
+ if sys.Noctty {
+ _, _, err1 = rawSyscall(funcPC(libc_ioctl_trampoline), 0, uintptr(TIOCNOTTY), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Set the controlling TTY to Ctty
+ if sys.Setctty {
+ _, _, err1 = rawSyscall(funcPC(libc_ioctl_trampoline), uintptr(sys.Ctty), uintptr(TIOCSCTTY), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
// Time to exec.
_, _, err1 = rawSyscall(funcPC(libc_execve_trampoline),
uintptr(unsafe.Pointer(argv0)),
}
}
- // Detach fd 0 from tty
- if sys.Noctty {
- _, err1 = raw_ioctl(0, TIOCNOTTY, 0)
- if err1 != 0 {
- goto childerror
- }
- }
-
- // Set the controlling TTY to Ctty
- if sys.Setctty {
- _, err1 = raw_ioctl(sys.Ctty, TIOCSCTTY, 1)
- if err1 != 0 {
- goto childerror
- }
- }
-
// Pass 1: look for fd[i] < i and move those up above len(fd)
// so that pass 2 won't stomp on an fd it needs later.
if pipe < nextfd {
raw_close(i)
}
+ // Detach fd 0 from tty
+ if sys.Noctty {
+ _, err1 = raw_ioctl(0, TIOCNOTTY, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Set the controlling TTY to Ctty
+ if sys.Setctty {
+ _, err1 = raw_ioctl(sys.Ctty, TIOCSCTTY, 1)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
// Enable tracing if requested.
// Do this right before exec so that we don't unnecessarily trace the runtime
// setting up after the fork. See issue #21428.
func (e Errno) Is(target error) bool {
switch target {
- case oserror.ErrTemporary:
- return e.Temporary()
- case oserror.ErrTimeout:
- return e.Timeout()
case oserror.ErrPermission:
return e == EACCES || e == EPERM
case oserror.ErrExist:
const (
_SYS_FSTAT_FREEBSD12 = 551 // { int fstat(int fd, _Out_ struct stat *sb); }
- _SYS_FSTATAT_FREEBSD12 = 552 // { int fstatat(int fd, _In_z_ char *path, \
- _SYS_GETDIRENTRIES_FREEBSD12 = 554 // { ssize_t getdirentries(int fd, \
- _SYS_STATFS_FREEBSD12 = 555 // { int statfs(_In_z_ char *path, \
- _SYS_FSTATFS_FREEBSD12 = 556 // { int fstatfs(int fd, \
- _SYS_GETFSSTAT_FREEBSD12 = 557 // { int getfsstat( \
- _SYS_MKNODAT_FREEBSD12 = 559 // { int mknodat(int fd, _In_z_ char *path, \
+ _SYS_FSTATAT_FREEBSD12 = 552 // { int fstatat(int fd, _In_z_ char *path, _Out_ struct stat *buf, int flag); }
+ _SYS_GETDIRENTRIES_FREEBSD12 = 554 // { ssize_t getdirentries(int fd, _Out_writes_bytes_(count) char *buf, size_t count, _Out_ off_t *basep); }
+ _SYS_STATFS_FREEBSD12 = 555 // { int statfs(_In_z_ char *path, _Out_ struct statfs *buf); }
+ _SYS_FSTATFS_FREEBSD12 = 556 // { int fstatfs(int fd, _Out_ struct statfs *buf); }
+ _SYS_GETFSSTAT_FREEBSD12 = 557 // { int getfsstat(_Out_writes_bytes_opt_(bufsize) struct statfs *buf, long bufsize, int mode); }
+ _SYS_MKNODAT_FREEBSD12 = 559 // { int mknodat(int fd, _In_z_ char *path, mode_t mode, dev_t dev); }
)
// See https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html.
func (e Errno) Is(target error) bool {
switch target {
- case oserror.ErrTemporary:
- return e.Temporary()
- case oserror.ErrTimeout:
- return e.Timeout()
case oserror.ErrPermission:
return e == EACCES || e == EPERM
case oserror.ErrExist:
// It is not meant to be called directly and is not subject to the Go 1 compatibility document.
// It may change signature from release to release.
func MainStart(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) *M {
- // In most cases, Init has already been called by the testinginit code
- // that 'go test' injects into test packages.
- // Call it again here to handle cases such as:
- // - test packages that don't import "testing" (such as example-only packages)
- // - direct use of MainStart (though that isn't well-supported)
Init()
return &M{
deps: deps,
// For instance, if the mode is ScanIdents (not ScanStrings), the string
// "foo" is scanned as the token sequence '"' Ident '"'.
//
+// Use GoTokens to configure the Scanner such that it accepts all Go
+// literal tokens including Go identifiers. Comments will be skipped.
+//
const (
- ScanIdents = 1 << -Ident
- ScanInts = 1 << -Int
- ScanFloats = 1 << -Float // includes Ints and hexadecimal floats
- ScanChars = 1 << -Char
- ScanStrings = 1 << -String
- ScanRawStrings = 1 << -RawString
- ScanComments = 1 << -Comment
- SkipComments = 1 << -skipComment // if set with ScanComments, comments become white space
- AllowDigitSeparators = 1 << -allowDigitSeparators // if set, number literals may contain underscores as digit separators
- GoTokens = ScanIdents | ScanFloats | ScanChars | ScanStrings | ScanRawStrings | ScanComments | SkipComments | AllowDigitSeparators
+ ScanIdents = 1 << -Ident
+ ScanInts = 1 << -Int
+ ScanFloats = 1 << -Float // includes Ints and hexadecimal floats
+ ScanChars = 1 << -Char
+ ScanStrings = 1 << -String
+ ScanRawStrings = 1 << -RawString
+ ScanComments = 1 << -Comment
+ SkipComments = 1 << -skipComment // if set with ScanComments, comments become white space
+ GoTokens = ScanIdents | ScanFloats | ScanChars | ScanStrings | ScanRawStrings | ScanComments | SkipComments
)
// The result of Scan is one of these tokens or a Unicode character.
// internal use only
skipComment
- allowDigitSeparators
)
var tokenString = map[rune]string{
func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
func isHex(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' }
-// digits accepts the sequence { digit } (if AllowDigitSeparators is not set)
-// or { digit | '_' } (if AllowDigitSeparators is set), starting with ch0.
+// digits accepts the sequence { digit | '_' } starting with ch0.
// If base <= 10, digits accepts any decimal digit but records
// the first invalid digit >= base in *invalid if *invalid == 0.
// digits returns the first rune that is not part of the sequence
ch = ch0
if base <= 10 {
max := rune('0' + base)
- for isDecimal(ch) || ch == '_' && s.Mode&AllowDigitSeparators != 0 {
+ for isDecimal(ch) || ch == '_' {
ds := 1
if ch == '_' {
ds = 2
ch = s.next()
}
} else {
- for isHex(ch) || ch == '_' && s.Mode&AllowDigitSeparators != 0 {
+ for isHex(ch) || ch == '_' {
ds := 1
if ch == '_' {
ds = 2
{"foo01.bar31.xx-0-1-1-0", "01 31 0 1 1 0", ScanInts},
{"foo0/12/0/5.67", "0 12 0 5 67", ScanInts},
{"xxx1e0yyy", "1 0", ScanInts},
- {"1_2", "1 2", ScanInts}, // don't consume _ as part of a number if not explicitly enabled
- {"1_2", "1_2", ScanInts | AllowDigitSeparators},
+ {"1_2", "1_2", ScanInts},
{"xxx1.0yyy2e3ee", "1 0 2 3", ScanInts},
{"xxx1.0yyy2e3ee", "1.0 2e3", ScanFloats},
} {
// Stop does not close the channel, to prevent a read from the channel succeeding
// incorrectly.
//
-// To prevent a timer created with NewTimer from firing after a call to Stop,
-// check the return value and drain the channel.
+// To ensure the channel is empty after a call to Stop, check the
+// return value and drain the channel.
// For example, assuming the program has not received from t.C already:
//
// if !t.Stop() {
// It returns true if the timer had been active, false if the timer had
// expired or been stopped.
//
-// Resetting a timer must take care not to race with the send into t.C
-// that happens when the current timer expires.
+// Reset should be invoked only on stopped or expired timers with drained channels.
// If a program has already received a value from t.C, the timer is known
-// to have expired, and t.Reset can be used directly.
+// to have expired and the channel drained, so t.Reset can be used directly.
// If a program has not yet received a value from t.C, however,
// the timer must be stopped and—if Stop reports that the timer expired
// before being stopped—the channel explicitly drained:
}
return d
}
-
- ts, us := t.sec(), u.sec()
-
- var sec, nsec, d int64
-
- ssub := ts - us
- if (ssub < ts) != (us > 0) {
- goto overflow
- }
-
- if ssub < int64(minDuration/Second) || ssub > int64(maxDuration/Second) {
- goto overflow
- }
- sec = ssub * int64(Second)
-
- nsec = int64(t.nsec() - u.nsec())
- d = sec + nsec
- if (d > sec) != (nsec > 0) {
- goto overflow
- }
- return Duration(d)
-
-overflow:
- if t.Before(u) {
+ d := Duration(t.sec()-u.sec())*Second + Duration(t.nsec()-u.nsec())
+ // Check for overflow or underflow.
+ switch {
+ case u.Add(d).Equal(t):
+ return d // d is correct
+ case t.Before(u):
return minDuration // t - u is negative out of range
+ default:
+ return maxDuration // t - u is positive out of range
}
- return maxDuration // t - u is positive out of range
}
// Since returns the time elapsed since t.
{Date(2300, 1, 1, 0, 0, 0, 0, UTC), Date(2000, 1, 1, 0, 0, 0, 0, UTC), Duration(maxDuration)},
{Date(2000, 1, 1, 0, 0, 0, 0, UTC), Date(2290, 1, 1, 0, 0, 0, 0, UTC), -290*365*24*Hour - 71*24*Hour},
{Date(2000, 1, 1, 0, 0, 0, 0, UTC), Date(2300, 1, 1, 0, 0, 0, 0, UTC), Duration(minDuration)},
+ {Date(2311, 11, 26, 02, 16, 47, 63535996, UTC), Date(2019, 8, 16, 2, 29, 30, 268436582, UTC), 9223372036795099414},
{MinMonoTime, MaxMonoTime, minDuration},
{MaxMonoTime, MinMonoTime, maxDuration},
}
}
}
-func BenchmarkSub(b *testing.B) {
- for i := 0; i < b.N; i++ {
- for _, st := range subTests {
- st.t.Sub(st.u)
- }
- }
-}
-
var nsDurationTests = []struct {
d Duration
want int64
image/png
index/suffixarray
internal/bytealg
+internal/cfg
internal/cpu
internal/fmtsort
internal/goroot
"issue18452.go",
"issue18889.go",
"issue28721.go",
+ "issue33061.go",
} {
check(t, file)
}
// Storing a Go pointer into C memory should fail.
name: "barrier",
c: `#include <stdlib.h>
- char **f14a() { return malloc(sizeof(char*)); }
- void f14b(char **p) {}`,
+ char **f14a() { return malloc(sizeof(char*)); }
+ void f14b(char **p) {}`,
body: `p := C.f14a(); *p = new(C.char); C.f14b(p)`,
fail: true,
expensive: true,
// large value should fail.
name: "barrierstruct",
c: `#include <stdlib.h>
- struct s15 { char *a[10]; };
- struct s15 *f15() { return malloc(sizeof(struct s15)); }
- void f15b(struct s15 *p) {}`,
+ struct s15 { char *a[10]; };
+ struct s15 *f15() { return malloc(sizeof(struct s15)); }
+ void f15b(struct s15 *p) {}`,
body: `p := C.f15(); p.a = [10]*C.char{new(C.char)}; C.f15b(p)`,
fail: true,
expensive: true,
// copy should fail.
name: "barrierslice",
c: `#include <stdlib.h>
- struct s16 { char *a[10]; };
- struct s16 *f16() { return malloc(sizeof(struct s16)); }
- void f16b(struct s16 *p) {}`,
+ struct s16 { char *a[10]; };
+ struct s16 *f16() { return malloc(sizeof(struct s16)); }
+ void f16b(struct s16 *p) {}`,
body: `p := C.f16(); copy(p.a[:], []*C.char{new(C.char)}); C.f16b(p)`,
fail: true,
expensive: true,
// different code path.
name: "barriergcprogarray",
c: `#include <stdlib.h>
- struct s17 { char *a[32769]; };
- struct s17 *f17() { return malloc(sizeof(struct s17)); }
- void f17b(struct s17 *p) {}`,
+ struct s17 { char *a[32769]; };
+ struct s17 *f17() { return malloc(sizeof(struct s17)); }
+ void f17b(struct s17 *p) {}`,
body: `p := C.f17(); p.a = [32769]*C.char{new(C.char)}; C.f17b(p)`,
fail: true,
expensive: true,
// Similar case, with a source on the heap.
name: "barriergcprogarrayheap",
c: `#include <stdlib.h>
- struct s18 { char *a[32769]; };
- struct s18 *f18() { return malloc(sizeof(struct s18)); }
- void f18b(struct s18 *p) {}
- void f18c(void *p) {}`,
+ struct s18 { char *a[32769]; };
+ struct s18 *f18() { return malloc(sizeof(struct s18)); }
+ void f18b(struct s18 *p) {}
+ void f18c(void *p) {}`,
imports: []string{"unsafe"},
body: `p := C.f18(); n := &[32769]*C.char{new(C.char)}; p.a = *n; C.f18b(p); n[0] = nil; C.f18c(unsafe.Pointer(n))`,
fail: true,
// A GC program with a struct.
name: "barriergcprogstruct",
c: `#include <stdlib.h>
- struct s19a { char *a[32769]; };
- struct s19b { struct s19a f; };
- struct s19b *f19() { return malloc(sizeof(struct s19b)); }
- void f19b(struct s19b *p) {}`,
+ struct s19a { char *a[32769]; };
+ struct s19b { struct s19a f; };
+ struct s19b *f19() { return malloc(sizeof(struct s19b)); }
+ void f19b(struct s19b *p) {}`,
body: `p := C.f19(); p.f = C.struct_s19a{[32769]*C.char{new(C.char)}}; C.f19b(p)`,
fail: true,
expensive: true,
// Similar case, with a source on the heap.
name: "barriergcprogstructheap",
c: `#include <stdlib.h>
- struct s20a { char *a[32769]; };
- struct s20b { struct s20a f; };
- struct s20b *f20() { return malloc(sizeof(struct s20b)); }
- void f20b(struct s20b *p) {}
- void f20c(void *p) {}`,
+ struct s20a { char *a[32769]; };
+ struct s20b { struct s20a f; };
+ struct s20b *f20() { return malloc(sizeof(struct s20b)); }
+ void f20b(struct s20b *p) {}
+ void f20c(void *p) {}`,
imports: []string{"unsafe"},
body: `p := C.f20(); n := &C.struct_s20a{[32769]*C.char{new(C.char)}}; p.f = *n; C.f20b(p); n.a[0] = nil; C.f20c(unsafe.Pointer(n))`,
fail: true,
name: "export1",
c: `extern unsigned char *GoFn21();`,
support: `//export GoFn21
- func GoFn21() *byte { return new(byte) }`,
+ func GoFn21() *byte { return new(byte) }`,
body: `C.GoFn21()`,
fail: true,
},
// Returning a C pointer is fine.
name: "exportok",
c: `#include <stdlib.h>
- extern unsigned char *GoFn22();`,
+ extern unsigned char *GoFn22();`,
support: `//export GoFn22
- func GoFn22() *byte { return (*byte)(C.malloc(1)) }`,
+ func GoFn22() *byte { return (*byte)(C.malloc(1)) }`,
body: `C.GoFn22()`,
},
{
// Passing a Go string is fine.
name: "passstring",
c: `#include <stddef.h>
- typedef struct { const char *p; ptrdiff_t n; } gostring23;
- gostring23 f23(gostring23 s) { return s; }`,
+ typedef struct { const char *p; ptrdiff_t n; } gostring23;
+ gostring23 f23(gostring23 s) { return s; }`,
imports: []string{"unsafe"},
body: `s := "a"; r := C.f23(*(*C.gostring23)(unsafe.Pointer(&s))); if *(*string)(unsafe.Pointer(&r)) != s { panic(r) }`,
},
c: `extern void f25();`,
imports: []string{"strings"},
support: `//export GoStr25
- func GoStr25() string { return strings.Repeat("a", 2) }`,
+ func GoStr25() string { return strings.Repeat("a", 2) }`,
body: `C.f25()`,
c1: `#include <stddef.h>
- typedef struct { const char *p; ptrdiff_t n; } gostring25;
- extern gostring25 GoStr25();
- void f25() { GoStr25(); }`,
+ typedef struct { const char *p; ptrdiff_t n; } gostring25;
+ extern gostring25 GoStr25();
+ void f25() { GoStr25(); }`,
fail: true,
},
{
// that is, we are testing something that is not unsafe.
name: "ptrdata1",
c: `#include <stdlib.h>
- void f26(void* p) {}`,
+ void f26(void* p) {}`,
imports: []string{"unsafe"},
support: `type S26 struct { p *int; a [8*8]byte; u uintptr }`,
body: `i := 0; p := &S26{u:uintptr(unsafe.Pointer(&i))}; q := (*S26)(C.malloc(C.size_t(unsafe.Sizeof(*p)))); *q = *p; C.f26(unsafe.Pointer(q))`,
// Like ptrdata1, but with a type that uses a GC program.
name: "ptrdata2",
c: `#include <stdlib.h>
- void f27(void* p) {}`,
+ void f27(void* p) {}`,
imports: []string{"unsafe"},
support: `type S27 struct { p *int; a [32769*8]byte; q *int; u uintptr }`,
body: `i := 0; p := S27{u:uintptr(unsafe.Pointer(&i))}; q := (*S27)(C.malloc(C.size_t(unsafe.Sizeof(p)))); *q = p; C.f27(unsafe.Pointer(q))`,
body: `t := reflect.StructOf([]reflect.StructField{{Name: "MyInt38", Type: reflect.TypeOf(MyInt38(0)), Anonymous: true}}); v := reflect.New(t).Elem(); v.Interface().(Getter38).Get()`,
fail: false,
},
+ {
+ // Test that a converted address of a struct field results
+ // in a check for just that field and not the whole struct.
+ name: "structfieldcast",
+ c: `struct S40i { int i; int* p; }; void f40(struct S40i* p) {}`,
+ support: `type S40 struct { p *int; a C.struct_S40i }`,
+ body: `s := &S40{p: new(int)}; C.f40((*C.struct_S40i)(&s.a))`,
+ fail: false,
+ },
}
func TestPointerChecks(t *testing.T) {
if err := os.MkdirAll(src, 0777); err != nil {
t.Fatal(err)
}
+ if err := ioutil.WriteFile(filepath.Join(src, "go.mod"), []byte("module ptrtest"), 0666); err != nil {
+ t.Fatal(err)
+ }
// Prepare two cgo inputs: one for standard cgo and one for //export cgo.
// (The latter cannot have C definitions, only declarations.)
--- /dev/null
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// cgo shouldn't crash if there is an extra argument with a C reference.
+
+package main
+
+// void F(void* p) {};
+import "C"
+
+import "unsafe"
+
+func F() {
+ var i int
+ C.F(unsafe.Pointer(&i), C.int(0)) // ERROR HERE
+}
func TestGeneratedMethod(t *testing.T) {
goCmd(t, "install", "-buildmode=shared", "-linkshared", "./issue25065")
}
+
+// Test use of shared library struct with generated hash function.
+// Issue 30768.
+func TestGeneratedHash(t *testing.T) {
+ goCmd(nil, "install", "-buildmode=shared", "-linkshared", "./issue30768/issue30768lib")
+ goCmd(nil, "test", "-linkshared", "./issue30768")
+}
--- /dev/null
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue30768lib
+
+// S is a struct that requires a generated hash function.
+type S struct {
+ A string
+ B int
+}
--- /dev/null
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue30768_test
+
+import (
+ "testing"
+
+ "testshared/issue30768/issue30768lib"
+)
+
+type s struct {
+ s issue30768lib.S
+}
+
+func Test30768(t *testing.T) {
+ // Calling t.Log will convert S to an empty interface,
+ // which will force a reference to the generated hash function,
+ // defined in the shared library.
+ t.Log(s{})
+}