-bfe3a9b26c8b2e1b9ef34a7232a2d1529e639bbf
+6743db0ed81e313acf66c00a4ed0e2dcaaca2c9f
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
-71bdbf431b79dff61944f22c25c7e085ccfc25d5
+fe8a0d12b14108cbe2408b417afcaab722b0727c
The first line of this file holds the git revision number of the
last merge done from the master library sources.
continue
}
if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 {
+ // Ignore VCS type "mod", which is new Go modules.
+ // This code is for old go get and must ignore the new mod lines.
+ // Otherwise matchGoImport will complain about two
+ // different metaImport lines for the same Prefix.
+ if f[1] == "mod" {
+ continue
+ }
imports = append(imports, metaImport{
Prefix: f[0],
VCS: f[1],
// download runs the download half of the get command
// for the package named by the argument.
func download(arg string, parent *load.Package, stk *load.ImportStack, mode int) {
- if mode&load.UseVendor != 0 {
+ if mode&load.ResolveImport != 0 {
// Caller is responsible for expanding vendor paths.
panic("internal error: download mode has useVendor set")
}
if parent == nil {
return load.LoadPackage(path, stk)
}
- return load.LoadImport(path, parent.Dir, parent, stk, nil, mode)
+ return load.LoadImport(path, parent.Dir, parent, stk, nil, mode|load.ResolveModule)
}
p := load1(arg, mode)
base.Errorf("%s", err)
continue
}
- // If this is a test import, apply vendor lookup now.
- // We cannot pass useVendor to download, because
+ // If this is a test import, apply module and vendor lookup now.
+ // We cannot pass ResolveImport to download, because
// download does caching based on the value of path,
// so it must be the fully qualified path already.
if i >= len(p.Imports) {
- path = load.VendoredImportPath(p, path)
+ path = load.ResolveImportPath(p, path)
}
download(path, p, stk, 0)
}
{"baz/quux", "git", "http://github.com/rsc/baz/quux"},
},
},
+ {
+ `<meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">
+ <meta name="go-import" content="foo/bar mod http://github.com/rsc/baz/quux">`,
+ []metaImport{
+ {"foo/bar", "git", "https://github.com/rsc/foo/bar"},
+ },
+ },
+ {
+ `<meta name="go-import" content="foo/bar mod http://github.com/rsc/baz/quux">
+ <meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">`,
+ []metaImport{
+ {"foo/bar", "git", "https://github.com/rsc/foo/bar"},
+ },
+ },
{
`<head>
<meta name="go-import" content="foo/bar git https://github.com/rsc/foo/bar">
for _, pkg := range pkgs {
// Show vendor-expanded paths in listing
- pkg.TestImports = pkg.Vendored(pkg.TestImports)
- pkg.XTestImports = pkg.Vendored(pkg.XTestImports)
+ pkg.TestImports = pkg.Resolve(pkg.TestImports)
+ pkg.XTestImports = pkg.Resolve(pkg.XTestImports)
do(&pkg.PackagePublic)
}
+++ /dev/null
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package load
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "io/ioutil"
-)
-
-// DebugDeprecatedImportcfg is installed as the undocumented -debug-deprecated-importcfg build flag.
-// It is useful for debugging subtle problems in the go command logic but not something
-// we want users to depend on. The hope is that the "deprecated" will make that clear.
-// We intend to remove this flag in Go 1.11.
-var DebugDeprecatedImportcfg debugDeprecatedImportcfgFlag
-
-type debugDeprecatedImportcfgFlag struct {
- enabled bool
- Import map[string]string
- Pkg map[string]*debugDeprecatedImportcfgPkg
-}
-
-type debugDeprecatedImportcfgPkg struct {
- Dir string
- Import map[string]string
-}
-
-var (
- debugDeprecatedImportcfgMagic = []byte("# debug-deprecated-importcfg\n")
- errImportcfgSyntax = errors.New("malformed syntax")
-)
-
-func (f *debugDeprecatedImportcfgFlag) String() string { return "" }
-
-func (f *debugDeprecatedImportcfgFlag) Set(x string) error {
- if x == "" {
- *f = debugDeprecatedImportcfgFlag{}
- return nil
- }
- data, err := ioutil.ReadFile(x)
- if err != nil {
- return err
- }
-
- if !bytes.HasPrefix(data, debugDeprecatedImportcfgMagic) {
- return errImportcfgSyntax
- }
- data = data[len(debugDeprecatedImportcfgMagic):]
-
- f.Import = nil
- f.Pkg = nil
- if err := json.Unmarshal(data, &f); err != nil {
- return errImportcfgSyntax
- }
- f.enabled = true
- return nil
-}
-
-func (f *debugDeprecatedImportcfgFlag) lookup(parent *Package, path string) (dir, newPath string) {
- newPath = path
- if p := f.Import[path]; p != "" {
- newPath = p
- }
- if parent != nil {
- if p1 := f.Pkg[parent.ImportPath]; p1 != nil {
- if p := p1.Import[path]; p != "" {
- newPath = p
- }
- }
- }
- if p2 := f.Pkg[newPath]; p2 != nil {
- return p2.Dir, newPath
- }
- return "", ""
-}
package load
import (
+ "bytes"
"fmt"
"go/build"
"go/token"
pathpkg "path"
"path/filepath"
"sort"
+ "strconv"
"strings"
"unicode"
"unicode/utf8"
return "no Go files in " + e.Package.Dir
}
-// Vendored returns the vendor-resolved version of imports,
+// Resolve returns the resolved version of imports,
// which should be p.TestImports or p.XTestImports, NOT p.Imports.
// The imports in p.TestImports and p.XTestImports are not recursively
// loaded during the initial load of p, so they list the imports found in
// can produce better error messages if it starts with the original paths.
// The initial load of p loads all the non-test imports and rewrites
// the vendored paths, so nothing should ever call p.vendored(p.Imports).
-func (p *Package) Vendored(imports []string) []string {
+func (p *Package) Resolve(imports []string) []string {
if len(imports) > 0 && len(p.Imports) > 0 && &imports[0] == &p.Imports[0] {
- panic("internal error: p.vendored(p.Imports) called")
+ panic("internal error: p.Resolve(p.Imports) called")
}
seen := make(map[string]bool)
var all []string
for _, path := range imports {
- path = VendoredImportPath(p, path)
+ path = ResolveImportPath(p, path)
if !seen[path] {
seen[path] = true
all = append(all, path)
// Mode flags for loadImport and download (in get.go).
const (
- // UseVendor means that loadImport should do vendor expansion
- // (provided the vendoring experiment is enabled).
- // That is, useVendor means that the import path came from
- // a source file and has not been vendor-expanded yet.
- // Every import path should be loaded initially with useVendor,
- // and then the expanded version (with the /vendor/ in it) gets
- // recorded as the canonical import path. At that point, future loads
- // of that package must not pass useVendor, because
+ // ResolveImport means that loadImport should do import path expansion.
+ // That is, ResolveImport means that the import path came from
+ // a source file and has not been expanded yet to account for
+ // vendoring or possible module adjustment.
+ // Every import path should be loaded initially with ResolveImport,
+ // and then the expanded version (for example with the /vendor/ in it)
+ // gets recorded as the canonical import path. At that point, future loads
+ // of that package must not pass ResolveImport, because
// disallowVendor will reject direct use of paths containing /vendor/.
- UseVendor = 1 << iota
+ ResolveImport = 1 << iota
+
+ // ResolveModule is for download (part of "go get") and indicates
+ // that the module adjustment should be done, but not vendor adjustment.
+ ResolveModule
// GetTestDeps is for download (part of "go get") and indicates
// that test dependencies should be fetched too.
importPath := path
origPath := path
isLocal := build.IsLocalImport(path)
- var debugDeprecatedImportcfgDir string
if isLocal {
importPath = dirToImportPath(filepath.Join(srcDir, path))
- } else if DebugDeprecatedImportcfg.enabled {
- if d, i := DebugDeprecatedImportcfg.lookup(parent, path); d != "" {
- debugDeprecatedImportcfgDir = d
- importPath = i
- }
- } else if mode&UseVendor != 0 {
- // We do our own vendor resolution, because we want to
+ } else if mode&ResolveImport != 0 {
+ // We do our own path resolution, because we want to
// find out the key to use in packageCache without the
// overhead of repeated calls to buildContext.Import.
// The code is also needed in a few other places anyway.
- path = VendoredImportPath(parent, path)
+ path = ResolveImportPath(parent, path)
+ importPath = path
+ } else if mode&ResolveModule != 0 {
+ path = ModuleImportPath(parent, path)
importPath = path
}
// Load package.
// Import always returns bp != nil, even if an error occurs,
// in order to return partial information.
- var bp *build.Package
- var err error
- if debugDeprecatedImportcfgDir != "" {
- bp, err = cfg.BuildContext.ImportDir(debugDeprecatedImportcfgDir, 0)
- } else if DebugDeprecatedImportcfg.enabled {
- bp = new(build.Package)
- err = fmt.Errorf("unknown import path %q: not in import cfg", importPath)
- } else {
- buildMode := build.ImportComment
- if mode&UseVendor == 0 || path != origPath {
- // Not vendoring, or we already found the vendored path.
- buildMode |= build.IgnoreVendor
- }
- bp, err = cfg.BuildContext.Import(path, srcDir, buildMode)
+ buildMode := build.ImportComment
+ if mode&ResolveImport == 0 || path != origPath {
+ // Not vendoring, or we already found the vendored path.
+ buildMode |= build.IgnoreVendor
}
+ bp, err := cfg.BuildContext.Import(path, srcDir, buildMode)
bp.ImportPath = importPath
if cfg.GOBIN != "" {
bp.BinDir = cfg.GOBIN
}
- if debugDeprecatedImportcfgDir == "" && err == nil && !isLocal && bp.ImportComment != "" && bp.ImportComment != path &&
+ if err == nil && !isLocal && bp.ImportComment != "" && bp.ImportComment != path &&
!strings.Contains(path, "/vendor/") && !strings.HasPrefix(path, "vendor/") {
err = fmt.Errorf("code in directory %s expects import %q", bp.Dir, bp.ImportComment)
}
p = setErrorPos(p, importPos)
}
- if debugDeprecatedImportcfgDir == "" && origPath != cleanImport(origPath) {
+ if origPath != cleanImport(origPath) {
p.Error = &PackageError{
ImportStack: stk.Copy(),
Err: fmt.Sprintf("non-canonical import path: %q should be %q", origPath, pathpkg.Clean(origPath)),
if perr := disallowInternal(srcDir, p, stk); perr != p {
return setErrorPos(perr, importPos)
}
- if mode&UseVendor != 0 {
+ if mode&ResolveImport != 0 {
if perr := disallowVendor(srcDir, origPath, p, stk); perr != p {
return setErrorPos(perr, importPos)
}
return result
}
-// VendoredImportPath returns the expansion of path when it appears in parent.
-// If parent is x/y/z, then path might expand to x/y/z/vendor/path, x/y/vendor/path,
-// x/vendor/path, vendor/path, or else stay path if none of those exist.
-// VendoredImportPath returns the expanded path or, if no expansion is found, the original.
-func VendoredImportPath(parent *Package, path string) (found string) {
- if DebugDeprecatedImportcfg.enabled {
- if d, i := DebugDeprecatedImportcfg.lookup(parent, path); d != "" {
- return i
- }
- return path
- }
-
- if parent == nil || parent.Root == "" {
- return path
- }
+// ResolveImportPath returns the true meaning of path when it appears in parent.
+// There are two different resolutions applied.
+// First, there is Go 1.5 vendoring (golang.org/s/go15vendor).
+// If vendor expansion doesn't trigger, then the path is also subject to
+// Go 1.11 vgo legacy conversion (golang.org/issue/25069).
+func ResolveImportPath(parent *Package, path string) (found string) {
+ found = VendoredImportPath(parent, path)
+ if found != path {
+ return found
+ }
+ return ModuleImportPath(parent, path)
+}
- dir := filepath.Clean(parent.Dir)
- root := filepath.Join(parent.Root, "src")
- if !str.HasFilePathPrefix(dir, root) || parent.ImportPath != "command-line-arguments" && filepath.Join(root, parent.ImportPath) != dir {
+// dirAndRoot returns the source directory and workspace root
+// for the package p, guaranteeing that root is a path prefix of dir.
+func dirAndRoot(p *Package) (dir, root string) {
+ dir = filepath.Clean(p.Dir)
+ root = filepath.Join(p.Root, "src")
+ if !str.HasFilePathPrefix(dir, root) || p.ImportPath != "command-line-arguments" && filepath.Join(root, p.ImportPath) != dir {
// Look for symlinks before reporting error.
dir = expandPath(dir)
root = expandPath(root)
}
- if !str.HasFilePathPrefix(dir, root) || len(dir) <= len(root) || dir[len(root)] != filepath.Separator || parent.ImportPath != "command-line-arguments" && !parent.Internal.Local && filepath.Join(root, parent.ImportPath) != dir {
+ if !str.HasFilePathPrefix(dir, root) || len(dir) <= len(root) || dir[len(root)] != filepath.Separator || p.ImportPath != "command-line-arguments" && !p.Internal.Local && filepath.Join(root, p.ImportPath) != dir {
base.Fatalf("unexpected directory layout:\n"+
" import path: %s\n"+
" root: %s\n"+
" expand root: %s\n"+
" expand dir: %s\n"+
" separator: %s",
- parent.ImportPath,
- filepath.Join(parent.Root, "src"),
- filepath.Clean(parent.Dir),
+ p.ImportPath,
+ filepath.Join(p.Root, "src"),
+ filepath.Clean(p.Dir),
root,
dir,
string(filepath.Separator))
}
+ return dir, root
+}
+
+// VendoredImportPath returns the vendor-expansion of path when it appears in parent.
+// If parent is x/y/z, then path might expand to x/y/z/vendor/path, x/y/vendor/path,
+// x/vendor/path, vendor/path, or else stay path if none of those exist.
+// VendoredImportPath returns the expanded path or, if no expansion is found, the original.
+func VendoredImportPath(parent *Package, path string) (found string) {
+ if parent == nil || parent.Root == "" {
+ return path
+ }
+
+ dir, root := dirAndRoot(parent)
+
vpath := "vendor/" + path
for i := len(dir); i >= len(root); i-- {
if i < len(dir) && dir[i] != filepath.Separator {
return path
}
+var (
+ modulePrefix = []byte("\nmodule ")
+ goModPathCache = make(map[string]string)
+)
+
+// goModPath returns the module path in the go.mod in dir, if any.
+func goModPath(dir string) (path string) {
+ path, ok := goModPathCache[dir]
+ if ok {
+ return path
+ }
+ defer func() {
+ goModPathCache[dir] = path
+ }()
+
+ data, err := ioutil.ReadFile(filepath.Join(dir, "go.mod"))
+ if err != nil {
+ return ""
+ }
+ var i int
+ if bytes.HasPrefix(data, modulePrefix[1:]) {
+ i = 0
+ } else {
+ i = bytes.Index(data, modulePrefix)
+ if i < 0 {
+ return ""
+ }
+ i++
+ }
+ line := data[i:]
+
+ // Cut line at \n, drop trailing \r if present.
+ if j := bytes.IndexByte(line, '\n'); j >= 0 {
+ line = line[:j]
+ }
+ if line[len(line)-1] == '\r' {
+ line = line[:len(line)-1]
+ }
+ line = line[len("module "):]
+
+ // If quoted, unquote.
+ path = strings.TrimSpace(string(line))
+ if path != "" && path[0] == '"' {
+ s, err := strconv.Unquote(path)
+ if err != nil {
+ return ""
+ }
+ path = s
+ }
+ return path
+}
+
+// findVersionElement returns the slice indices of the final version element /vN in path.
+// If there is no such element, it returns -1, -1.
+func findVersionElement(path string) (i, j int) {
+ j = len(path)
+ for i = len(path) - 1; i >= 0; i-- {
+ if path[i] == '/' {
+ if isVersionElement(path[i:j]) {
+ return i, j
+ }
+ j = i
+ }
+ }
+ return -1, -1
+}
+
+// isVersionElement reports whether s is a well-formed path version element:
+// v2, v3, v10, etc, but not v0, v05, v1.
+func isVersionElement(s string) bool {
+ if len(s) < 3 || s[0] != '/' || s[1] != 'v' || s[2] == '0' || s[2] == '1' && len(s) == 3 {
+ return false
+ }
+ for i := 2; i < len(s); i++ {
+ if s[i] < '0' || '9' < s[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// ModuleImportPath translates import paths found in go modules
+// back down to paths that can be resolved in ordinary builds.
+//
+// Define “new” code as code with a go.mod file in the same directory
+// or a parent directory. If an import in new code says x/y/v2/z but
+// x/y/v2/z does not exist and x/y/go.mod says “module x/y/v2”,
+// then go build will read the import as x/y/z instead.
+// See golang.org/issue/25069.
+func ModuleImportPath(parent *Package, path string) (found string) {
+ if parent == nil || parent.Root == "" {
+ return path
+ }
+
+ // If there are no vN elements in path, leave it alone.
+ // (The code below would do the same, but only after
+ // some other file system accesses that we can avoid
+ // here by returning early.)
+ if i, _ := findVersionElement(path); i < 0 {
+ return path
+ }
+
+ dir, root := dirAndRoot(parent)
+
+ // Consider dir and parents, up to and including root.
+ for i := len(dir); i >= len(root); i-- {
+ if i < len(dir) && dir[i] != filepath.Separator {
+ continue
+ }
+ if goModPath(dir[:i]) != "" {
+ goto HaveGoMod
+ }
+ }
+ // This code is not in a tree with a go.mod,
+ // so apply no changes to the path.
+ return path
+
+HaveGoMod:
+ // This import is in a tree with a go.mod.
+ // Allow it to refer to code in GOPATH/src/x/y/z as x/y/v2/z
+ // if GOPATH/src/x/y/go.mod says module "x/y/v2",
+
+ // If x/y/v2/z exists, use it unmodified.
+ if bp, _ := cfg.BuildContext.Import(path, "", build.IgnoreVendor); bp.Dir != "" {
+ return path
+ }
+
+ // Otherwise look for a go.mod supplying a version element.
+ // Some version-like elements may appear in paths but not
+ // be module versions; we skip over those to look for module
+ // versions. For example the module m/v2 might have a
+ // package m/v2/api/v1/foo.
+ limit := len(path)
+ for limit > 0 {
+ i, j := findVersionElement(path[:limit])
+ if i < 0 {
+ return path
+ }
+ if bp, _ := cfg.BuildContext.Import(path[:i], "", build.IgnoreVendor); bp.Dir != "" {
+ if mpath := goModPath(bp.Dir); mpath != "" {
+ // Found a valid go.mod file, so we're stopping the search.
+ // If the path is m/v2/p and we found m/go.mod that says
+ // "module m/v2", then we return "m/p".
+ if mpath == path[:j] {
+ return path[:i] + path[j:]
+ }
+ // Otherwise just return the original path.
+ // We didn't find anything worth rewriting,
+ // and the go.mod indicates that we should
+ // not consider parent directories.
+ return path
+ }
+ }
+ limit = i
+ }
+ return path
+}
+
// hasGoFiles reports whether dir contains any files with names ending in .go.
// For a vendor check we must exclude directories that contain no .go files.
// Otherwise it is not possible to vendor just a/b/c and still import the
if path == "C" {
continue
}
- p1 := LoadImport(path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], UseVendor)
+ p1 := LoadImport(path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport)
if p.Standard && p.Error == nil && !p1.Standard && p1.Error == nil {
p.Error = &PackageError{
ImportStack: stk.Copy(),
stk.Push(p.ImportPath + " (test)")
rawTestImports := str.StringList(p.TestImports)
for i, path := range p.TestImports {
- p1 := LoadImport(path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], UseVendor)
+ p1 := LoadImport(path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport)
if p1.Error != nil {
return nil, nil, p1.Error
}
pxtestNeedsPtest := false
rawXTestImports := str.StringList(p.XTestImports)
for i, path := range p.XTestImports {
- p1 := LoadImport(path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], UseVendor)
+ p1 := LoadImport(path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport)
if p1.Error != nil {
return nil, nil, p1.Error
}
for _, path := range p.Imports {
deps[path] = true
}
- for _, path := range p.Vendored(p.TestImports) {
+ for _, path := range p.Resolve(p.TestImports) {
deps[path] = true
}
- for _, path := range p.Vendored(p.XTestImports) {
+ for _, path := range p.Resolve(p.XTestImports) {
deps[path] = true
}
}
// Undocumented, unstable debugging flags.
cmd.Flag.StringVar(&cfg.DebugActiongraph, "debug-actiongraph", "", "")
- cmd.Flag.Var(&load.DebugDeprecatedImportcfg, "debug-deprecated-importcfg", "")
}
// fileExtSplit expects a filename and returns the name
var validCompilerFlags = []*regexp.Regexp{
re(`-D([A-Za-z_].*)`),
+ re(`-F([^@\-].*)`),
re(`-I([^@\-].*)`),
re(`-O`),
re(`-O([^@\-].*)`),
re(`-W`),
re(`-W([^@,]+)`), // -Wall but not -Wa,-foo.
re(`-Wa,-mbig-obj`),
+ re(`-Wp,-D([A-Za-z_].*)`),
re(`-ansi`),
+ re(`-f(no-)?asynchronous-unwind-tables`),
re(`-f(no-)?blocks`),
+ re(`-f(no-)builtin-[a-zA-Z0-9_]*`),
re(`-f(no-)?common`),
re(`-f(no-)?constant-cfstrings`),
re(`-fdiagnostics-show-note-include-stack`),
+ re(`-f(no-)?eliminate-unused-debug-types`),
re(`-f(no-)?exceptions`),
+ re(`-f(no-)?fast-math`),
re(`-f(no-)?inline-functions`),
re(`-finput-charset=([^@\-].*)`),
re(`-f(no-)?fat-lto-objects`),
+ re(`-f(no-)?keep-inline-dllexport`),
re(`-f(no-)?lto`),
re(`-fmacro-backtrace-limit=(.+)`),
re(`-fmessage-length=(.+)`),
re(`-f(no-)?modules`),
re(`-f(no-)?objc-arc`),
+ re(`-f(no-)?objc-nonfragile-abi`),
+ re(`-f(no-)?objc-legacy-dispatch`),
re(`-f(no-)?omit-frame-pointer`),
re(`-f(no-)?openmp(-simd)?`),
re(`-f(no-)?permissive`),
re(`-f(no-)?(pic|PIC|pie|PIE)`),
+ re(`-f(no-)?plt`),
re(`-f(no-)?rtti`),
re(`-f(no-)?split-stack`),
re(`-f(no-)?stack-(.+)`),
re(`-f(no-)?strict-aliasing`),
re(`-f(un)signed-char`),
re(`-f(no-)?use-linker-plugin`), // safe if -B is not used; we don't permit -B
+ re(`-f(no-)?visibility-inlines-hidden`),
re(`-fsanitize=(.+)`),
re(`-ftemplate-depth-(.+)`),
re(`-fvisibility=(.+)`),
re(`-g([^@\-].*)?`),
re(`-m32`),
re(`-m64`),
- re(`-m(arch|cpu|fpu|tune)=([^@\-].*)`),
+ re(`-m(abi|arch|cpu|fpu|tune)=([^@\-].*)`),
+ re(`-marm`),
+ re(`-mfloat-abi=([^@\-].*)`),
+ re(`-mfpmath=[0-9a-z,+]*`),
re(`-m(no-)?avx[0-9a-z.]*`),
re(`-m(no-)?ms-bitfields`),
re(`-m(no-)?stack-(.+)`),
re(`-miphoneos-version-min=(.+)`),
re(`-mnop-fun-dllimport`),
re(`-m(no-)?sse[0-9.]*`),
+ re(`-mthumb(-interwork)?`),
+ re(`-mthreads`),
re(`-mwindows`),
+ re(`--param=ssp-buffer-size=[0-9]*`),
re(`-pedantic(-errors)?`),
re(`-pipe`),
re(`-pthread`),
re(`-?-std=([^@\-].*)`),
re(`-?-stdlib=([^@\-].*)`),
+ re(`--sysroot=([^@\-].*)`),
re(`-w`),
re(`-x([^@\-].*)`),
}
re(`-O`),
re(`-O([^@\-].*)`),
re(`-f(no-)?(pic|PIC|pie|PIE)`),
+ re(`-f(no-)?openmp(-simd)?`),
re(`-fsanitize=([^@\-].*)`),
re(`-g([^@\-].*)?`),
- re(`-m(arch|cpu|fpu|tune)=([^@\-].*)`),
+ re(`-headerpad_max_install_names`),
+ re(`-m(abi|arch|cpu|fpu|tune)=([^@\-].*)`),
+ re(`-mfloat-abi=([^@\-].*)`),
re(`-mmacosx-(.+)`),
re(`-mios-simulator-version-min=(.+)`),
re(`-miphoneos-version-min=(.+)`),
+ re(`-mthreads`),
re(`-mwindows`),
re(`-(pic|PIC|pie|PIE)`),
re(`-pthread`),
+ re(`-rdynamic`),
re(`-shared`),
re(`-?-static([-a-z0-9+]*)`),
re(`-?-stdlib=([^@\-].*)`),
// in a wildcard would allow tunnelling arbitrary additional
// linker arguments through one of these.
re(`-Wl,--(no-)?allow-multiple-definition`),
+ re(`-Wl,--(no-)?allow-shlib-undefined`),
re(`-Wl,--(no-)?as-needed`),
re(`-Wl,-Bdynamic`),
re(`-Wl,-Bstatic`),
+ re(`-WL,-O([^@,\-][^,]*)?`),
re(`-Wl,-d[ny]`),
re(`-Wl,--disable-new-dtags`),
+ re(`-Wl,-e[=,][a-zA-Z0-9]*`),
re(`-Wl,--enable-new-dtags`),
re(`-Wl,--end-group`),
re(`-Wl,-framework,[^,@\-][^,]+`),
re(`-Wl,-headerpad_max_install_names`),
re(`-Wl,--no-undefined`),
- re(`-Wl,-rpath[=,]([^,@\-][^,]+)`),
+ re(`-Wl,-rpath(-link)?[=,]([^,@\-][^,]+)`),
+ re(`-Wl,-s`),
re(`-Wl,-search_paths_first`),
re(`-Wl,-sectcreate,([^,@\-][^,]+),([^,@\-][^,]+),([^,@\-][^,]+)`),
re(`-Wl,--start-group`),
re(`-Wl,-?-static`),
- re(`-Wl,--subsystem,(native|windows|console|posix|xbox)`),
+ re(`-Wl,-?-subsystem,(native|windows|console|posix|xbox)`),
+ re(`-Wl,-syslibroot[=,]([^,@\-][^,]+)`),
re(`-Wl,-undefined[=,]([^,@\-][^,]+)`),
re(`-Wl,-?-unresolved-symbols=[^,]+`),
re(`-Wl,--(no-)?warn-([^,]+)`),
re(`-Wl,-z,relro`),
re(`[a-zA-Z0-9_/].*\.(a|o|obj|dll|dylib|so)`), // direct linker inputs: x.o or libfoo.so (but not -foo.o or @foo.o)
+ re(`\./.*\.(a|o|obj|dll|dylib|so)`),
}
var validLinkerFlagsWithNextArg = []string{
var goodCompilerFlags = [][]string{
{"-DFOO"},
{"-Dfoo=bar"},
+ {"-F/Qt"},
{"-I/"},
{"-I/etc/passwd"},
{"-I."},
var badCompilerFlags = [][]string{
{"-D@X"},
{"-D-X"},
+ {"-F@dir"},
+ {"-F-dir"},
{"-I@dir"},
{"-I-dir"},
{"-O@1"},
{"-Wl,--no-warn-error"},
{"foo.so"},
{"_世界.dll"},
+ {"./x.o"},
{"libcgosotest.dylib"},
{"-F", "framework"},
{"-l", "."},
{"-x", "--c"},
{"-x", "@obj"},
{"-Wl,-rpath,@foo"},
+ {"../x.o"},
}
func TestCheckLinkerFlags(t *testing.T) {
--- /dev/null
+module "new/v2"
--- /dev/null
+package new
+
+import _ "new/v2/p2"
--- /dev/null
+package p1
+
+import _ "old/p2"
+import _ "new/v2"
+import _ "new/v2/p2"
+import _ "new/sub/v2/x/v1/y" // v2 is module, v1 is directory in module
+import _ "new/sub/inner/x" // new/sub/inner/go.mod overrides new/sub/go.mod
--- /dev/null
+package p2
--- /dev/null
+module new/sub/v2
--- /dev/null
+module new/sub/inner
--- /dev/null
+package p1
+
+import _ "old/p2"
+import _ "new/p1"
+import _ "new"
--- /dev/null
+package p2
"bytes"
"fmt"
"internal/testenv"
+ "os"
"path/filepath"
"regexp"
"strings"
tg.grepStderrNot("panic", "panicked")
tg.grepStderr(`cannot find package "x"`, "wrong error")
}
+
+// Module legacy support does path rewriting very similar to vendoring.
+
+func TestModLegacy(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata/modlegacy"))
+ tg.run("list", "-f", "{{.Imports}}", "old/p1")
+ tg.grepStdout("new/p1", "old/p1 should import new/p1")
+ tg.run("list", "-f", "{{.Imports}}", "new/p1")
+ tg.grepStdout("new/p2", "new/p1 should import new/p2 (not new/v2/p2)")
+ tg.grepStdoutNot("new/v2", "new/p1 should NOT import new/v2*")
+ tg.grepStdout("new/sub/x/v1/y", "new/p1 should import new/sub/x/v1/y (not new/sub/v2/x/v1/y)")
+ tg.grepStdoutNot("new/sub/v2", "new/p1 should NOT import new/sub/v2*")
+ tg.grepStdout("new/sub/inner/x", "new/p1 should import new/sub/inner/x (no rewrites)")
+ tg.run("build", "old/p1", "new/p1")
+}
+
+func TestModLegacyGet(t *testing.T) {
+ testenv.MustHaveExternalNetwork(t)
+
+ tg := testgo(t)
+ defer tg.cleanup()
+ tg.makeTempdir()
+ tg.setenv("GOPATH", tg.path("d1"))
+ tg.run("get", "vcs-test.golang.org/git/modlegacy1-old.git/p1")
+ tg.run("list", "-f", "{{.Deps}}", "vcs-test.golang.org/git/modlegacy1-old.git/p1")
+ tg.grepStdout("new.git/p2", "old/p1 should depend on new/p2")
+ tg.grepStdoutNot("new.git/v2/p2", "old/p1 should NOT depend on new/v2/p2")
+ tg.run("build", "vcs-test.golang.org/git/modlegacy1-old.git/p1", "vcs-test.golang.org/git/modlegacy1-new.git/p1")
+
+ tg.setenv("GOPATH", tg.path("d2"))
+
+ tg.must(os.RemoveAll(tg.path("d2")))
+ tg.run("get", "github.com/rsc/vgotest5")
+ tg.run("get", "github.com/rsc/vgotest4")
+ tg.run("get", "github.com/myitcv/vgo_example_compat")
+
+ if testing.Short() {
+ return
+ }
+
+ tg.must(os.RemoveAll(tg.path("d2")))
+ tg.run("get", "github.com/rsc/vgotest4")
+ tg.run("get", "github.com/rsc/vgotest5")
+ tg.run("get", "github.com/myitcv/vgo_example_compat")
+
+ tg.must(os.RemoveAll(tg.path("d2")))
+ tg.run("get", "github.com/rsc/vgotest4", "github.com/rsc/vgotest5")
+ tg.run("get", "github.com/myitcv/vgo_example_compat")
+
+ tg.must(os.RemoveAll(tg.path("d2")))
+ tg.run("get", "github.com/rsc/vgotest5", "github.com/rsc/vgotest4")
+ tg.run("get", "github.com/myitcv/vgo_example_compat")
+
+ tg.must(os.RemoveAll(tg.path("d2")))
+ tg.run("get", "github.com/myitcv/vgo_example_compat")
+ tg.run("get", "github.com/rsc/vgotest4", "github.com/rsc/vgotest5")
+
+ pkgs := []string{"github.com/myitcv/vgo_example_compat", "github.com/rsc/vgotest4", "github.com/rsc/vgotest5"}
+ for i := 0; i < 3; i++ {
+ for j := 0; j < 3; j++ {
+ for k := 0; k < 3; k++ {
+ if i == j || i == k || k == j {
+ continue
+ }
+ tg.must(os.RemoveAll(tg.path("d2")))
+ tg.run("get", pkgs[i], pkgs[j], pkgs[k])
+ }
+ }
+ }
+}
},
},
- // #63: A specified key usage in an intermediate forbids other usages
- // in the leaf.
+ // #63: An intermediate with enumerated EKUs causes a failure if we
+ // test for an EKU not in that set. (ServerAuth is required by
+ // default.)
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{},
sans: []string{"dns:example.com"},
ekus: []string{"serverAuth"},
},
- expectedError: "EKU not permitted",
+ expectedError: "incompatible key usage",
},
- // #64: A specified key usage in an intermediate forbids other usages
- // in the leaf, even if we don't recognise them.
+ // #64: an unknown EKU in the leaf doesn't break anything, even if it's not
+ // correctly nested.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{},
sans: []string{"dns:example.com"},
ekus: []string{"other"},
},
- expectedError: "EKU not permitted",
+ requestedEKUs: []ExtKeyUsage{ExtKeyUsageAny},
},
// #65: trying to add extra permitted key usages in an intermediate
},
},
- // #66: EKUs in roots are ignored.
+ // #66: EKUs in roots are not ignored.
nameConstraintsTest{
roots: []constraintsSpec{
constraintsSpec{
- ekus: []string{"serverAuth"},
+ ekus: []string{"email"},
},
},
intermediates: [][]constraintsSpec{
[]constraintsSpec{
constraintsSpec{
- ekus: []string{"serverAuth", "email"},
+ ekus: []string{"serverAuth"},
},
},
},
leaf: leafSpec{
sans: []string{"dns:example.com"},
- ekus: []string{"serverAuth", "email"},
+ ekus: []string{"serverAuth"},
},
+ expectedError: "incompatible key usage",
},
// #67: in order to support COMODO chains, SGC key usages permit
expectedError: "\"https://example.com/test\" is excluded",
},
- // #75: While serverAuth in a CA certificate permits clientAuth in a leaf,
- // serverAuth in a leaf shouldn't permit clientAuth when requested in
+ // #75: serverAuth in a leaf shouldn't permit clientAuth when requested in
// VerifyOptions.
nameConstraintsTest{
roots: []constraintsSpec{
},
requestedEKUs: []ExtKeyUsage{ExtKeyUsageClientAuth, ExtKeyUsageEmailProtection},
},
+
+ // #81: EKUs that are not asserted in VerifyOpts are not required to be
+ // nested.
+ nameConstraintsTest{
+ roots: []constraintsSpec{
+ constraintsSpec{},
+ },
+ intermediates: [][]constraintsSpec{
+ []constraintsSpec{
+ constraintsSpec{
+ ekus: []string{"serverAuth"},
+ },
+ },
+ },
+ leaf: leafSpec{
+ sans: []string{"dns:example.com"},
+ // There's no email EKU in the intermediate. This would be rejected if
+ // full nesting was required.
+ ekus: []string{"email", "serverAuth"},
+ },
+ },
}
func makeConstraintsCACert(constraints constraintsSpec, name string, key *ecdsa.PrivateKey, parent *Certificate, parentKey *ecdsa.PrivateKey) (*Certificate, error) {
return nil
}
+type _CertChainPolicyPara struct {
+ Size uint32
+ Flags uint32
+ ExtraPolicyPara unsafe.Pointer
+}
+
// checkChainSSLServerPolicy checks that the certificate chain in chainCtx is valid for
// use as a certificate chain for a SSL/TLS server.
func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContext, opts *VerifyOptions) error {
}
sslPara.Size = uint32(unsafe.Sizeof(*sslPara))
- para := &syscall.CertChainPolicyPara{
- ExtraPolicyPara: uintptr(unsafe.Pointer(sslPara)),
+ para := &_CertChainPolicyPara{
+ ExtraPolicyPara: unsafe.Pointer(sslPara),
}
para.Size = uint32(unsafe.Sizeof(*para))
status := syscall.CertChainPolicyStatus{}
- err = syscall.CertVerifyCertificateChainPolicy(syscall.CERT_CHAIN_POLICY_SSL, chainCtx, para, &status)
+ err = syscall.CertVerifyCertificateChainPolicy(syscall.CERT_CHAIN_POLICY_SSL, chainCtx, (*syscall.CertChainPolicyPara)(unsafe.Pointer(para)), &status)
if err != nil {
return err
}
// CPU time to verify.
TooManyConstraints
// CANotAuthorizedForExtKeyUsage results when an intermediate or root
- // certificate does not permit an extended key usage that is claimed by
- // the leaf certificate.
+ // certificate does not permit a requested extended key usage.
CANotAuthorizedForExtKeyUsage
)
case TooManyIntermediates:
return "x509: too many intermediates for path length constraint"
case IncompatibleUsage:
- return "x509: certificate specifies an incompatible key usage: " + e.Detail
+ return "x509: certificate specifies an incompatible key usage"
case NameMismatch:
return "x509: issuer name does not match subject from issuing certificate"
case NameConstraintsWithoutSANs:
// list means ExtKeyUsageServerAuth. To accept any key usage, include
// ExtKeyUsageAny.
//
- // Certificate chains are required to nest extended key usage values,
- // irrespective of this value. This matches the Windows CryptoAPI behavior,
- // but not the spec.
+ // Certificate chains are required to nest these extended key usage values.
+ // (This matches the Windows CryptoAPI behavior, but not the spec.)
KeyUsages []ExtKeyUsage
// MaxConstraintComparisions is the maximum number of comparisons to
// perform when checking a given certificate's name constraints. If
return nil
}
-const (
- checkingAgainstIssuerCert = iota
- checkingAgainstLeafCert
-)
-
-// ekuPermittedBy returns true iff the given extended key usage is permitted by
-// the given EKU from a certificate. Normally, this would be a simple
-// comparison plus a special case for the “any” EKU. But, in order to support
-// existing certificates, some exceptions are made.
-func ekuPermittedBy(eku, certEKU ExtKeyUsage, context int) bool {
- if certEKU == ExtKeyUsageAny || eku == certEKU {
- return true
- }
-
- // Some exceptions are made to support existing certificates. Firstly,
- // the ServerAuth and SGC EKUs are treated as a group.
- mapServerAuthEKUs := func(eku ExtKeyUsage) ExtKeyUsage {
- if eku == ExtKeyUsageNetscapeServerGatedCrypto || eku == ExtKeyUsageMicrosoftServerGatedCrypto {
- return ExtKeyUsageServerAuth
- }
- return eku
- }
-
- eku = mapServerAuthEKUs(eku)
- certEKU = mapServerAuthEKUs(certEKU)
-
- if eku == certEKU {
- return true
- }
-
- // If checking a requested EKU against the list in a leaf certificate there
- // are fewer exceptions.
- if context == checkingAgainstLeafCert {
- return false
- }
-
- // ServerAuth in a CA permits ClientAuth in the leaf.
- return (eku == ExtKeyUsageClientAuth && certEKU == ExtKeyUsageServerAuth) ||
- // Any CA may issue an OCSP responder certificate.
- eku == ExtKeyUsageOCSPSigning ||
- // Code-signing CAs can use Microsoft's commercial and
- // kernel-mode EKUs.
- (eku == ExtKeyUsageMicrosoftCommercialCodeSigning || eku == ExtKeyUsageMicrosoftKernelCodeSigning) && certEKU == ExtKeyUsageCodeSigning
-}
-
// isValid performs validity checks on c given that it is a candidate to append
// to the chain in currentChain.
func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
}
}
- checkEKUs := certType == intermediateCertificate
-
- // If no extended key usages are specified, then all are acceptable.
- if checkEKUs && (len(c.ExtKeyUsage) == 0 && len(c.UnknownExtKeyUsage) == 0) {
- checkEKUs = false
- }
-
- // If the “any” key usage is permitted, then no more checks are needed.
- if checkEKUs {
- for _, caEKU := range c.ExtKeyUsage {
- comparisonCount++
- if caEKU == ExtKeyUsageAny {
- checkEKUs = false
- break
- }
- }
- }
-
- if checkEKUs {
- NextEKU:
- for _, eku := range leaf.ExtKeyUsage {
- if comparisonCount > maxConstraintComparisons {
- return CertificateInvalidError{c, TooManyConstraints, ""}
- }
-
- for _, caEKU := range c.ExtKeyUsage {
- comparisonCount++
- if ekuPermittedBy(eku, caEKU, checkingAgainstIssuerCert) {
- continue NextEKU
- }
- }
-
- oid, _ := oidFromExtKeyUsage(eku)
- return CertificateInvalidError{c, CANotAuthorizedForExtKeyUsage, fmt.Sprintf("EKU not permitted: %#v", oid)}
- }
-
- NextUnknownEKU:
- for _, eku := range leaf.UnknownExtKeyUsage {
- if comparisonCount > maxConstraintComparisons {
- return CertificateInvalidError{c, TooManyConstraints, ""}
- }
-
- for _, caEKU := range c.UnknownExtKeyUsage {
- comparisonCount++
- if caEKU.Equal(eku) {
- continue NextUnknownEKU
- }
- }
-
- return CertificateInvalidError{c, CANotAuthorizedForExtKeyUsage, fmt.Sprintf("EKU not permitted: %#v", eku)}
- }
- }
-
// KeyUsage status flags are ignored. From Engineering Security, Peter
// Gutmann: A European government CA marked its signing certificates as
// being valid for encryption only, but no-one noticed. Another
}
}
- requestedKeyUsages := make([]ExtKeyUsage, len(opts.KeyUsages))
- copy(requestedKeyUsages, opts.KeyUsages)
- if len(requestedKeyUsages) == 0 {
- requestedKeyUsages = append(requestedKeyUsages, ExtKeyUsageServerAuth)
+ var candidateChains [][]*Certificate
+ if opts.Roots.contains(c) {
+ candidateChains = append(candidateChains, []*Certificate{c})
+ } else {
+ if candidateChains, err = c.buildChains(make(map[int][][]*Certificate), []*Certificate{c}, &opts); err != nil {
+ return nil, err
+ }
}
- // If no key usages are specified, then any are acceptable.
- checkEKU := len(c.ExtKeyUsage) > 0
-
- for _, eku := range requestedKeyUsages {
- if eku == ExtKeyUsageAny {
- checkEKU = false
- break
- }
+ keyUsages := opts.KeyUsages
+ if len(keyUsages) == 0 {
+ keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
}
- if checkEKU {
- foundMatch := false
- NextUsage:
- for _, eku := range requestedKeyUsages {
- for _, leafEKU := range c.ExtKeyUsage {
- if ekuPermittedBy(eku, leafEKU, checkingAgainstLeafCert) {
- foundMatch = true
- break NextUsage
- }
- }
+ // If any key usage is acceptable then we're done.
+ for _, usage := range keyUsages {
+ if usage == ExtKeyUsageAny {
+ return candidateChains, nil
}
+ }
- if !foundMatch {
- msg := "leaf contains the following, recognized EKUs: "
-
- for i, leafEKU := range c.ExtKeyUsage {
- oid, ok := oidFromExtKeyUsage(leafEKU)
- if !ok {
- continue
- }
-
- if i > 0 {
- msg += ", "
- }
- msg += formatOID(oid)
- }
-
- return nil, CertificateInvalidError{c, IncompatibleUsage, msg}
+ for _, candidate := range candidateChains {
+ if checkChainForKeyUsage(candidate, keyUsages) {
+ chains = append(chains, candidate)
}
}
- var candidateChains [][]*Certificate
- if opts.Roots.contains(c) {
- candidateChains = append(candidateChains, []*Certificate{c})
- } else {
- if candidateChains, err = c.buildChains(make(map[int][][]*Certificate), []*Certificate{c}, &opts); err != nil {
- return nil, err
- }
+ if len(chains) == 0 {
+ return nil, CertificateInvalidError{c, IncompatibleUsage, ""}
}
- return candidateChains, nil
+ return chains, nil
}
func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate {
return HostnameError{c, h}
}
+
+func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool {
+ usages := make([]ExtKeyUsage, len(keyUsages))
+ copy(usages, keyUsages)
+
+ if len(chain) == 0 {
+ return false
+ }
+
+ usagesRemaining := len(usages)
+
+ // We walk down the list and cross out any usages that aren't supported
+ // by each certificate. If we cross out all the usages, then the chain
+ // is unacceptable.
+
+NextCert:
+ for i := len(chain) - 1; i >= 0; i-- {
+ cert := chain[i]
+ if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 {
+ // The certificate doesn't have any extended key usage specified.
+ continue
+ }
+
+ for _, usage := range cert.ExtKeyUsage {
+ if usage == ExtKeyUsageAny {
+ // The certificate is explicitly good for any usage.
+ continue NextCert
+ }
+ }
+
+ const invalidUsage ExtKeyUsage = -1
+
+ NextRequestedUsage:
+ for i, requestedUsage := range usages {
+ if requestedUsage == invalidUsage {
+ continue
+ }
+
+ for _, usage := range cert.ExtKeyUsage {
+ if requestedUsage == usage {
+ continue NextRequestedUsage
+ } else if requestedUsage == ExtKeyUsageServerAuth &&
+ (usage == ExtKeyUsageNetscapeServerGatedCrypto ||
+ usage == ExtKeyUsageMicrosoftServerGatedCrypto) {
+ // In order to support COMODO
+ // certificate chains, we have to
+ // accept Netscape or Microsoft SGC
+ // usages as equal to ServerAuth.
+ continue NextRequestedUsage
+ }
+ }
+
+ usages[i] = invalidUsage
+ usagesRemaining--
+ if usagesRemaining == 0 {
+ return false
+ }
+ }
+ }
+
+ return true
+}
b = make([]byte, len(s)+utf8.UTFMax)
nbytes = copy(b, s[:i])
if r >= 0 {
- if r <= utf8.RuneSelf {
+ if r < utf8.RuneSelf {
b[nbytes] = byte(r)
nbytes++
} else {
r := mapping(c)
// common case
- if (0 <= r && r <= utf8.RuneSelf) && nbytes < len(b) {
+ if (0 <= r && r < utf8.RuneSelf) && nbytes < len(b) {
b[nbytes] = byte(r)
nbytes++
continue
{"longStrinGwitHmixofsmaLLandcAps", "LONGSTRINGWITHMIXOFSMALLANDCAPS"},
{"long\u0250string\u0250with\u0250nonascii\u2C6Fchars", "LONG\u2C6FSTRING\u2C6FWITH\u2C6FNONASCII\u2C6FCHARS"},
{"\u0250\u0250\u0250\u0250\u0250", "\u2C6F\u2C6F\u2C6F\u2C6F\u2C6F"}, // grows one byte per char
+ {"a\u0080\U0010FFFF", "A\u0080\U0010FFFF"}, // test utf8.RuneSelf and utf8.MaxRune
}
var lowerTests = []StringTest{
{"longStrinGwitHmixofsmaLLandcAps", "longstringwithmixofsmallandcaps"},
{"LONG\u2C6FSTRING\u2C6FWITH\u2C6FNONASCII\u2C6FCHARS", "long\u0250string\u0250with\u0250nonascii\u0250chars"},
{"\u2C6D\u2C6D\u2C6D\u2C6D\u2C6D", "\u0251\u0251\u0251\u0251\u0251"}, // shrinks one byte per char
+ {"A\u0080\U0010FFFF", "a\u0080\U0010FFFF"}, // test utf8.RuneSelf and utf8.MaxRune
}
const space = "\t\v\r\f\n\u0085\u00a0\u2000\u3000"
if m != expect {
t.Errorf("replace invalid sequence: expected %q got %q", expect, m)
}
+
+ // 8. Check utf8.RuneSelf and utf8.MaxRune encoding
+ encode := func(r rune) rune {
+ switch r {
+ case utf8.RuneSelf:
+ return unicode.MaxRune
+ case unicode.MaxRune:
+ return utf8.RuneSelf
+ }
+ return r
+ }
+ s := string(utf8.RuneSelf) + string(utf8.MaxRune)
+ r := string(utf8.MaxRune) + string(utf8.RuneSelf) // reverse of s
+ m = Map(encode, s)
+ if m != r {
+ t.Errorf("encoding not handled correctly: expected %q got %q", r, m)
+ }
+ m = Map(encode, r)
+ if m != s {
+ t.Errorf("encoding not handled correctly: expected %q got %q", s, m)
+ }
}
func TestToUpper(t *testing.T) { runStringTests(t, ToUpper, "ToUpper", upperTests) }
#include <time.h>
#include <unistd.h>
-#define fd (100)
+#define fd (30)
// Tests libgo2.so, which does not export any functions.
// Read a string from the file descriptor and print it.
// that the C code can also use.
const (
- fd = 100
+ fd = 30
)
func init() {