package main
-import (
- "net"
-)
+import "os"
+
+// Issue 481: closures and var declarations
+// with multiple variables assigned from one
+// function call.
func main() {
- var listen, _ = net.Listen("tcp", "127.0.0.1:0")
+ var listen, _ = Listen("tcp", "127.0.0.1:0")
go func() {
for {
}
}()
- var conn, _ = net.Dial("tcp", "", listen.Addr().String())
+ var conn, _ = Dial("tcp", "", listen.Addr().String())
_ = conn
}
+
+// Simulated net interface to exercise bug
+// without involving a real network.
+type T chan int
+
+var global T
+
+func Listen(x, y string) (T, string) {
+ global = make(chan int)
+ return global, y
+}
+
+func (t T) Addr() os.Error {
+ return os.ErrorString("stringer")
+}
+
+func (t T) Accept() (int, string) {
+ return <-t, ""
+}
+
+func Dial(x, y, z string) (int, string) {
+ global <- 1
+ return 0, ""
+}
+
-31d7feb9281b
+342e3b11f21a
The first line of this file holds the Mercurial revision number of the
last merge done from the master library sources.
endif # !LIBGO_IS_RTEMS
go_net_files = \
+ go/net/cgo_stub.go \
go/net/dial.go \
go/net/dnsclient.go \
go/net/dnsconfig.go \
$(go_net_newpollserver_file) \
go/net/fd.go \
$(go_net_fd_os_file) \
+ go/net/file.go \
go/net/hosts.go \
go/net/ip.go \
go/net/iprawsock.go \
go/net/ipsock.go \
+ go/net/lookup.go \
go/net/net.go \
go/net/parse.go \
go/net/pipe.go \
go/go/ast/ast.go \
go/go/ast/filter.go \
go/go/ast/print.go \
+ go/go/ast/resolve.go \
go/go/ast/scope.go \
go/go/ast/walk.go
go_go_doc_files = \
$(CHECK)
.PHONY: exp/eval/check
-go/ast.lo: $(go_go_ast_files) bytes.gox fmt.gox go/token.gox io.gox os.gox \
- reflect.gox unicode.gox utf8.gox
+go/ast.lo: $(go_go_ast_files) bytes.gox fmt.gox go/scanner.gox go/token.gox \
+ io.gox os.gox reflect.gox unicode.gox utf8.gox
$(BUILDPACKAGE)
go/ast/check: $(CHECK_DEPS)
@$(MKDIR_P) go/ast
@LIBGO_IS_LINUX_TRUE@@LIBGO_IS_RTEMS_FALSE@go_net_newpollserver_file = go/net/newpollserver.go
@LIBGO_IS_RTEMS_TRUE@go_net_newpollserver_file = go/net/newpollserver_rtems.go
go_net_files = \
+ go/net/cgo_stub.go \
go/net/dial.go \
go/net/dnsclient.go \
go/net/dnsconfig.go \
$(go_net_newpollserver_file) \
go/net/fd.go \
$(go_net_fd_os_file) \
+ go/net/file.go \
go/net/hosts.go \
go/net/ip.go \
go/net/iprawsock.go \
go/net/ipsock.go \
+ go/net/lookup.go \
go/net/net.go \
go/net/parse.go \
go/net/pipe.go \
go/go/ast/ast.go \
go/go/ast/filter.go \
go/go/ast/print.go \
+ go/go/ast/resolve.go \
go/go/ast/scope.go \
go/go/ast/walk.go
$(CHECK)
.PHONY: exp/eval/check
-go/ast.lo: $(go_go_ast_files) bytes.gox fmt.gox go/token.gox io.gox os.gox \
- reflect.gox unicode.gox utf8.gox
+go/ast.lo: $(go_go_ast_files) bytes.gox fmt.gox go/scanner.gox go/token.gox \
+ io.gox os.gox reflect.gox unicode.gox utf8.gox
$(BUILDPACKAGE)
go/ast/check: $(CHECK_DEPS)
@$(MKDIR_P) go/ast
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
i, bytediff(expected, actual))
}
+ if testing.Short() { // The second test is expensive.
+ break
+ }
}
}
if err != nil {
return
}
+ // We pretend that GENERAL STRINGs are PRINTABLE STRINGs so
+ // that a sequence of them can be parsed into a []string.
+ if t.tag == tagGeneralString {
+ t.tag = tagPrintableString
+ }
if t.class != classUniversal || t.isCompound != compoundType || t.tag != expectedTag {
err = StructuralError{"sequence tag mismatch"}
return
return
}
if params.explicit {
- if t.class == classContextSpecific && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
+ expectedClass := classContextSpecific
+ if params.application {
+ expectedClass = classApplication
+ }
+ if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
if t.length > 0 {
t, offset, err = parseTagAndLength(bytes, offset)
if err != nil {
if universalTag == tagPrintableString && t.tag == tagIA5String {
universalTag = tagIA5String
}
+ // Likewise for GeneralString
+ if universalTag == tagPrintableString && t.tag == tagGeneralString {
+ universalTag = tagGeneralString
+ }
// Special case for time: UTCTime and GeneralizedTime both map to the
// Go type time.Time.
expectedTag = *params.tag
}
+ if !params.explicit && params.application && params.tag != nil {
+ expectedClass = classApplication
+ expectedTag = *params.tag
+ }
+
// We have unwrapped any explicit tagging at this point.
if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType {
// Tags don't match. Again, it could be an optional element.
v, err = parseIA5String(innerBytes)
case tagT61String:
v, err = parseT61String(innerBytes)
+ case tagGeneralString:
+ // GeneralString is specified in ISO-2022/ECMA-35,
+ // A brief review suggests that it includes structures
+ // that allow the encoding to change midstring and
+ // such. We give up and pass it as an 8-bit string.
+ v, err = parseT61String(innerBytes)
default:
err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
}
// Other ASN.1 types are not supported; if it encounters them,
// Unmarshal returns a parse error.
func Unmarshal(b []byte, val interface{}) (rest []byte, err os.Error) {
+ return UnmarshalWithParams(b, val, "")
+}
+
+// UnmarshalWithParams allows field parameters to be specified for the
+// top-level element. The form of the params is the same as the field tags.
+func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err os.Error) {
v := reflect.NewValue(val).(*reflect.PtrValue).Elem()
- offset, err := parseField(v, b, 0, fieldParameters{})
+ offset, err := parseField(v, b, 0, parseFieldParameters(params))
if err != nil {
return nil, err
}
{"printable", fieldParameters{stringType: tagPrintableString}},
{"optional", fieldParameters{optional: true}},
{"explicit", fieldParameters{explicit: true, tag: new(int)}},
+ {"application", fieldParameters{application: true, tag: new(int)}},
{"optional,explicit", fieldParameters{optional: true, explicit: true, tag: new(int)}},
{"default:42", fieldParameters{defaultValue: newInt64(42)}},
{"tag:17", fieldParameters{tag: newInt(17)}},
{"optional,explicit,default:42,tag:17", fieldParameters{optional: true, explicit: true, defaultValue: newInt64(42), tag: newInt(17)}},
- {"optional,explicit,default:42,tag:17,rubbish1", fieldParameters{true, true, newInt64(42), newInt(17), 0, false}},
+ {"optional,explicit,default:42,tag:17,rubbish1", fieldParameters{true, true, false, newInt64(42), newInt(17), 0, false}},
{"set", fieldParameters{set: true}},
}
tagIA5String = 22
tagUTCTime = 23
tagGeneralizedTime = 24
+ tagGeneralString = 27
)
const (
// fieldParameters is the parsed representation of tag string from a structure field.
type fieldParameters struct {
optional bool // true iff the field is OPTIONAL
- explicit bool // true iff and EXPLICIT tag is in use.
+ explicit bool // true iff an EXPLICIT tag is in use.
+ application bool // true iff an APPLICATION tag is in use.
defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
stringType int // the string tag to use when marshaling.
ret.explicit = true
if ret.tag == nil {
ret.tag = new(int)
- *ret.tag = 0
}
case part == "ia5":
ret.stringType = tagIA5String
}
case part == "set":
ret.set = true
+ case part == "application":
+ ret.application = true
+ if ret.tag == nil {
+ ret.tag = new(int)
+ }
}
}
return
func TestProbablyPrime(t *testing.T) {
+ nreps := 20
+ if testing.Short() {
+ nreps = 1
+ }
for i, s := range primes {
p, _ := new(Int).SetString(s, 10)
- if !ProbablyPrime(p, 20) {
+ if !ProbablyPrime(p, nreps) {
t.Errorf("#%d prime found to be non-prime (%s)", i, s)
}
}
for i, s := range composites {
c, _ := new(Int).SetString(s, 10)
- if ProbablyPrime(c, 20) {
+ if ProbablyPrime(c, nreps) {
t.Errorf("#%d composite found to be prime (%s)", i, s)
}
+ if testing.Short() {
+ break
+ }
}
}
func TestLargeStringWrites(t *testing.T) {
var buf Buffer
- for i := 3; i < 30; i += 3 {
+ limit := 30
+ if testing.Short() {
+ limit = 9
+ }
+ for i := 3; i < limit; i += 3 {
s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data)
empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i))
}
func TestLargeByteWrites(t *testing.T) {
var buf Buffer
- for i := 3; i < 30; i += 3 {
+ limit := 30
+ if testing.Short() {
+ limit = 9
+ }
+ for i := 3; i < limit; i += 3 {
s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, bytes)
empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
}
}
b := make([]byte, n)
- bp := 0
- for i := 0; i < len(a); i++ {
- s := a[i]
- for j := 0; j < len(s); j++ {
- b[bp] = s[j]
- bp++
- }
- if i+1 < len(a) {
- s = sep
- for j := 0; j < len(s); j++ {
- b[bp] = s[j]
- bp++
- }
- }
+ bp := copy(b, a[0])
+ for _, s := range a[1:] {
+ bp += copy(b[bp:], sep)
+ bp += copy(b[bp:], s)
}
return b
}
// test a larger buffer with different sizes and alignments
func TestIndexByteBig(t *testing.T) {
- const n = 1024
+ var n = 1024
+ if testing.Short() {
+ n = 128
+ }
b := make([]byte, n)
for i := 0; i < n; i++ {
// different start alignments
func TestVectorNums(t *testing.T) {
+ if testing.Short() {
+ return
+ }
var v Vector
c := int(0)
runtime.GC()
func TestIntVectorNums(t *testing.T) {
+ if testing.Short() {
+ return
+ }
var v IntVector
c := int(0)
runtime.GC()
func TestStringVectorNums(t *testing.T) {
+ if testing.Short() {
+ return
+ }
var v StringVector
c := ""
runtime.GC()
// NewCTR returns a Stream which encrypts/decrypts using the given Block in
// counter mode. The length of iv must be the same as the Block's block size.
func NewCTR(block Block, iv []byte) Stream {
+ if len(iv) != block.BlockSize() {
+ panic("cipher.NewCTR: iv length must equal block size")
+ }
+
return &ctr{
b: block,
ctr: dup(iv),
return
}
if !c.IsOnCurve(priv.PublicKey.X, priv.PublicKey.Y) {
- t.Errorf("%s: public key invalid", tag, err)
+ t.Errorf("%s: public key invalid: %s", tag, err)
}
}
func TestKeyGeneration(t *testing.T) {
testKeyGeneration(t, elliptic.P224(), "p224")
+ if testing.Short() {
+ return
+ }
testKeyGeneration(t, elliptic.P256(), "p256")
testKeyGeneration(t, elliptic.P384(), "p384")
testKeyGeneration(t, elliptic.P521(), "p521")
func TestSignAndVerify(t *testing.T) {
testSignAndVerify(t, elliptic.P224(), "p224")
+ if testing.Short() {
+ return
+ }
testSignAndVerify(t, elliptic.P256(), "p256")
testSignAndVerify(t, elliptic.P384(), "p384")
testSignAndVerify(t, elliptic.P521(), "p521")
if Verify(&pub, hashed, r, s) != test.ok {
t.Errorf("%d: bad result", i)
}
+ if testing.Short() {
+ break
+ }
}
}
if fmt.Sprintf("%x", x) != e.x || fmt.Sprintf("%x", y) != e.y {
t.Errorf("%d: bad output for k=%s: got (%x, %s), want (%s, %s)", i, e.k, x, y, e.x, e.y)
}
+ if testing.Short() && i > 5 {
+ break
+ }
}
}
if !bytes.Equal(out, expected) {
t.Errorf("%d: output got: %x want: %x", i, out, expected)
}
+ if testing.Short() {
+ break
+ }
}
}
)
func TestRead(t *testing.T) {
- b := make([]byte, 4e6)
+ var n int = 4e6
+ if testing.Short() {
+ n = 1e5
+ }
+ b := make([]byte, n)
n, err := Read(b)
if n != len(b) || err != nil {
t.Fatalf("Read(buf) = %d, %s", n, err)
return true
}
- quick.Check(tryEncryptDecrypt, nil)
+ config := new(quick.Config)
+ if testing.Short() {
+ config.MaxCount = 10
+ }
+ quick.Check(tryEncryptDecrypt, config)
}
// These test vectors were generated with `openssl rsautl -pkcs -encrypt`
func TestKeyGeneration(t *testing.T) {
random := rand.Reader
- priv, err := GenerateKey(random, 1024)
+ size := 1024
+ if testing.Short() {
+ size = 128
+ }
+ priv, err := GenerateKey(random, size)
if err != nil {
t.Errorf("failed to generate key")
}
t.Errorf("#%d,%d (blind) bad result: %#v (want %#v)", i, j, out, message.in)
}
}
+ if testing.Short() {
+ break
+ }
}
}
// ConnectionState records basic TLS details about the connection.
type ConnectionState struct {
- HandshakeComplete bool
- CipherSuite uint16
- NegotiatedProtocol string
+ HandshakeComplete bool
+ CipherSuite uint16
+ NegotiatedProtocol string
+ NegotiatedProtocolIsMutual bool
// the certificate chain that was presented by the other side
PeerCertificates []*x509.Certificate
RootCAs *CASet
// NextProtos is a list of supported, application level protocols.
- // Currently only server-side handling is supported.
NextProtos []string
// ServerName is included in the client's handshake to support virtual
ocspResponse []byte // stapled OCSP response
peerCertificates []*x509.Certificate
- clientProtocol string
+ clientProtocol string
+ clientProtocolFallback bool
// first permanent error
errMutex sync.Mutex
state.HandshakeComplete = c.handshakeComplete
if c.handshakeComplete {
state.NegotiatedProtocol = c.clientProtocol
+ state.NegotiatedProtocolIsMutual = !c.clientProtocolFallback
state.CipherSuite = c.cipherSuite
state.PeerCertificates = c.peerCertificates
}
serverName: c.config.ServerName,
supportedCurves: []uint16{curveP256, curveP384, curveP521},
supportedPoints: []uint8{pointFormatUncompressed},
+ nextProtoNeg: len(c.config.NextProtos) > 0,
}
t := uint32(c.config.time())
return c.sendAlert(alertUnexpectedMessage)
}
+ if !hello.nextProtoNeg && serverHello.nextProtoNeg {
+ c.sendAlert(alertHandshakeFailure)
+ return os.ErrorString("server advertised unrequested NPN")
+ }
+
suite, suiteId := mutualCipherSuite(c.config.cipherSuites(), serverHello.cipherSuite)
if suite == nil {
return c.sendAlert(alertHandshakeFailure)
c.out.prepareCipherSpec(clientCipher, clientHash)
c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
+ if serverHello.nextProtoNeg {
+ nextProto := new(nextProtoMsg)
+ proto, fallback := mutualProtocol(c.config.NextProtos, serverHello.nextProtos)
+ nextProto.proto = proto
+ c.clientProtocol = proto
+ c.clientProtocolFallback = fallback
+
+ finishedHash.Write(nextProto.marshal())
+ c.writeRecord(recordTypeHandshake, nextProto.marshal())
+ }
+
finished := new(finishedMsg)
finished.verifyData = finishedHash.clientSum(masterSecret)
finishedHash.Write(finished.marshal())
c.cipherSuite = suiteId
return nil
}
+
+// mutualProtocol finds the mutual Next Protocol Negotiation protocol given the
+// set of client and server supported protocols. The set of client supported
+// protocols must not be empty. It returns the resulting protocol and flag
+// indicating if the fallback case was reached.
+func mutualProtocol(clientProtos, serverProtos []string) (string, bool) {
+ for _, s := range serverProtos {
+ for _, c := range clientProtos {
+ if s == c {
+ return s, false
+ }
+ }
+ }
+
+ return clientProtos[0], true
+}
testConfig.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_RC4_128_SHA}
- conn, err := Dial("tcp", "", "127.0.0.1:10443", testConfig)
+ conn, err := Dial("tcp", "127.0.0.1:10443", testConfig)
if err != nil {
t.Fatal(err)
}
for i, iface := range tests {
ty := reflect.NewValue(iface).Type()
- for j := 0; j < 100; j++ {
+ n := 100
+ if testing.Short() {
+ n = 5
+ }
+ for j := 0; j < n; j++ {
v, ok := quick.Value(ty, rand)
if !ok {
t.Errorf("#%d: failed to create value", i)
// Dial interprets a nil configuration as equivalent to
// the zero configuration; see the documentation of Config
// for the defaults.
-func Dial(network, laddr, raddr string, config *Config) (*Conn, os.Error) {
- c, err := net.Dial(network, laddr, raddr)
+func Dial(network, addr string, config *Config) (*Conn, os.Error) {
+ raddr := addr
+ c, err := net.Dial(network, raddr)
if err != nil {
return nil, err
}
KeyUsageDecipherOnly
)
+// RFC 5280, 4.2.1.12 Extended Key Usage
+//
+// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 }
+//
+// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 }
+//
+// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 }
+// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 }
+// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 }
+// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 }
+// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 }
+// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 }
+var (
+ oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0}
+ oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1}
+ oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2}
+ oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3}
+ oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4}
+ oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8}
+ oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9}
+)
+
+// ExtKeyUsage represents an extended set of actions that are valid for a given key.
+// Each of the ExtKeyUsage* constants define a unique action.
+type ExtKeyUsage int
+
+const (
+ ExtKeyUsageAny ExtKeyUsage = iota
+ ExtKeyUsageServerAuth
+ ExtKeyUsageClientAuth
+ ExtKeyUsageCodeSigning
+ ExtKeyUsageEmailProtection
+ ExtKeyUsageTimeStamping
+ ExtKeyUsageOCSPSigning
+)
+
// A Certificate represents an X.509 certificate.
type Certificate struct {
Raw []byte // Raw ASN.1 DER contents.
NotBefore, NotAfter *time.Time // Validity bounds.
KeyUsage KeyUsage
+ ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages.
+ UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package.
+
BasicConstraintsValid bool // if true then the next two fields are valid.
IsCA bool
MaxPathLen int
out.AuthorityKeyId = a.Id
continue
+ case 37:
+ // RFC 5280, 4.2.1.12. Extended Key Usage
+
+ // id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 }
+ //
+ // ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId
+ //
+ // KeyPurposeId ::= OBJECT IDENTIFIER
+
+ var keyUsage []asn1.ObjectIdentifier
+ _, err = asn1.Unmarshal(e.Value, &keyUsage)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, u := range keyUsage {
+ switch {
+ case u.Equal(oidExtKeyUsageAny):
+ out.ExtKeyUsage = append(out.ExtKeyUsage, ExtKeyUsageAny)
+ case u.Equal(oidExtKeyUsageServerAuth):
+ out.ExtKeyUsage = append(out.ExtKeyUsage, ExtKeyUsageServerAuth)
+ case u.Equal(oidExtKeyUsageClientAuth):
+ out.ExtKeyUsage = append(out.ExtKeyUsage, ExtKeyUsageClientAuth)
+ case u.Equal(oidExtKeyUsageCodeSigning):
+ out.ExtKeyUsage = append(out.ExtKeyUsage, ExtKeyUsageCodeSigning)
+ case u.Equal(oidExtKeyUsageEmailProtection):
+ out.ExtKeyUsage = append(out.ExtKeyUsage, ExtKeyUsageEmailProtection)
+ case u.Equal(oidExtKeyUsageTimeStamping):
+ out.ExtKeyUsage = append(out.ExtKeyUsage, ExtKeyUsageTimeStamping)
+ case u.Equal(oidExtKeyUsageOCSPSigning):
+ out.ExtKeyUsage = append(out.ExtKeyUsage, ExtKeyUsageOCSPSigning)
+ default:
+ out.UnknownExtKeyUsage = append(out.UnknownExtKeyUsage, u)
+ }
+ }
+
+ continue
+
case 14:
// RFC 5280, 4.2.1.2
var keyid []byte
}
}
-// gotest: if [ "$(uname)-$(uname -m)" = Linux-x86_64 -a "$GOARCH" = amd64 ]; then
-// gotest: mkdir -p _test && $AS pclinetest.s && $LD -E main -o _test/pclinetest pclinetest.$O
-// gotest: fi
func TestPCLine(t *testing.T) {
if !dotest() {
return
var grammars = []string{
-`Program = .
-`,
-
-`Program = foo .
-foo = "foo" .
-`,
-
-`Program = "a" | "b" "c" .
-`,
-
-`Program = "a" ... "z" .
-`,
-
-`Program = Song .
- Song = { Note } .
- Note = Do | (Re | Mi | Fa | So | La) | Ti .
- Do = "c" .
- Re = "d" .
- Mi = "e" .
- Fa = "f" .
- So = "g" .
- La = "a" .
- Ti = ti .
- ti = "b" .
-`,
+ `Program = .`,
+
+ `Program = foo .
+ foo = "foo" .`,
+
+ `Program = "a" | "b" "c" .`,
+
+ `Program = "a" ... "z" .`,
+
+ `Program = Song .
+ Song = { Note } .
+ Note = Do | (Re | Mi | Fa | So | La) | Ti .
+ Do = "c" .
+ Re = "d" .
+ Mi = "e" .
+ Fa = "f" .
+ So = "g" .
+ La = "a" .
+ Ti = ti .
+ ti = "b" .`,
}
scanner scanner.Scanner
pos token.Pos // token position
tok token.Token // one token look-ahead
- lit []byte // token literal
+ lit string // token literal
}
// make the error message more specific
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
- msg += " " + string(p.lit)
+ msg += " " + p.lit
}
}
p.error(pos, msg)
func (p *parser) parseIdentifier() *Name {
pos := p.pos
- name := string(p.lit)
+ name := p.lit
p.expect(token.IDENT)
return &Name{pos, name}
}
pos := p.pos
value := ""
if p.tok == token.STRING {
- value, _ = strconv.Unquote(string(p.lit))
+ value, _ = strconv.Unquote(p.lit)
// Unquote may fail with an error, but only if the scanner found
// an illegal string in the first place. In this case the error
// has already been reported.
file *token.File
pos token.Pos // token position
tok token.Token // one token look-ahead
- lit []byte // token literal
+ lit string // token literal
packs map[string]string // PackageName -> ImportPath
rules map[string]expr // RuleName -> Expression
// make the error message more specific
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
- msg += " " + string(p.lit)
+ msg += " " + p.lit
}
}
p.error(pos, msg)
func (p *parser) parseIdentifier() string {
- name := string(p.lit)
+ name := p.lit
p.expect(token.IDENT)
return name
}
func (p *parser) parseString() string {
s := ""
if p.tok == token.STRING {
- s, _ = strconv.Unquote(string(p.lit))
+ s, _ = strconv.Unquote(p.lit)
// Unquote may fail with an error, but only if the scanner found
// an illegal string in the first place. In this case the error
// has already been reported.
var fname string
switch p.tok {
case token.ILLEGAL:
- if string(p.lit) != "@" {
+ if p.lit != "@" {
return nil
}
fname = "@"
}
// Make the connection.
if socket != "" {
- conn, err = net.Dial("unix", "", socket+":"+displayStr)
+ conn, err = net.Dial("unix", socket+":"+displayStr)
} else if host != "" {
- conn, err = net.Dial(protocol, "", host+":"+strconv.Itoa(6000+displayInt))
+ conn, err = net.Dial(protocol, host+":"+strconv.Itoa(6000+displayInt))
} else {
- conn, err = net.Dial("unix", "", "/tmp/.X11-unix/X"+displayStr)
+ conn, err = net.Dial("unix", "/tmp/.X11-unix/X"+displayStr)
}
if err != nil {
return nil, "", os.NewError("cannot connect to " + display + ": " + err.String())
}
func runTests(t *testing.T, baseName string, tests []test) {
- for i, test := range tests {
+ delta := 1
+ if testing.Short() {
+ delta = 16
+ }
+ for i := 0; i < len(tests); i += delta {
name := fmt.Sprintf("%s[%d]", baseName, i)
- test.run(t, name)
+ tests[i].run(t, name)
}
}
sc, ev := newScanner(args)
var toks [4]token.Token
- var lits [4][]byte
+ var lits [4]string
for i := range toks {
_, toks[i], lits[i] = sc.Scan()
}
flag.Bool(...) // global options
flag.Parse() // parse leading command
- subcmd := flag.Arg[0]
+ subcmd := flag.Arg(0)
switch subcmd {
// add per-subcommand options
}
}
func TestCountMallocs(t *testing.T) {
+ if testing.Short() {
+ return
+ }
mallocs := 0 - runtime.MemStats.Mallocs
for i := 0; i < 100; i++ {
Sprintf("")
// A Comment node represents a single //-style or /*-style comment.
type Comment struct {
Slash token.Pos // position of "/" starting the comment
- Text []byte // comment text (excluding '\n' for //-style comments)
+ Text string // comment text (excluding '\n' for //-style comments)
}
BasicLit struct {
ValuePos token.Pos // literal position
Kind token.Token // token.INT, token.FLOAT, token.IMAG, token.CHAR, or token.STRING
- Value []byte // literal string; e.g. 42, 0x7f, 3.14, 1e-9, 2.4i, 'a', '\x7f', "foo" or `\m\n\o`
+ Value string // literal string; e.g. 42, 0x7f, 3.14, 1e-9, 2.4i, 'a', '\x7f', "foo" or `\m\n\o`
}
// A FuncLit node represents a function literal.
ImportSpec struct {
Doc *CommentGroup // associated documentation; or nil
Name *Ident // local package name (including "."); or nil
- Path *BasicLit // package path
+ Path *BasicLit // import path
Comment *CommentGroup // line comments; or nil
}
Package token.Pos // position of "package" keyword
Name *Ident // package name
Decls []Decl // top-level declarations; or nil
- Scope *Scope // package scope
- Unresolved []*Ident // unresolved global identifiers
+ Scope *Scope // package scope (this file only)
+ Imports []*ImportSpec // imports in this file
+ Unresolved []*Ident // unresolved identifiers in this file
Comments []*CommentGroup // list of all comments in the source file
}
// collectively building a Go package.
//
type Package struct {
- Name string // package name
- Scope *Scope // package scope
- Files map[string]*File // Go source files by filename
+ Name string // package name
+ Scope *Scope // package scope
+ Imports map[string]*Scope // map of import path -> package scope across all files
+ Files map[string]*File // Go source files by filename
}
// separator is an empty //-style comment that is interspersed between
// different comment groups when they are concatenated into a single group
//
-var separator = &Comment{noPos, []byte("//")}
+var separator = &Comment{noPos, "//"}
// MergePackageFiles creates a file AST by merging the ASTs of the
}
// TODO(gri) need to compute pkgScope and unresolved identifiers!
- return &File{doc, pos, NewIdent(pkg.Name), decls, nil, nil, comments}
+ // TODO(gri) need to compute imports!
+ return &File{doc, pos, NewIdent(pkg.Name), decls, nil, nil, nil, comments}
}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements NewPackage.
+
+package ast
+
+import (
+ "fmt"
+ "go/scanner"
+ "go/token"
+ "os"
+)
+
+
+type pkgBuilder struct {
+ scanner.ErrorVector
+ fset *token.FileSet
+}
+
+
+func (p *pkgBuilder) error(pos token.Pos, msg string) {
+ p.Error(p.fset.Position(pos), msg)
+}
+
+
+func (p *pkgBuilder) errorf(pos token.Pos, format string, args ...interface{}) {
+ p.error(pos, fmt.Sprintf(format, args...))
+}
+
+
+func (p *pkgBuilder) declare(scope, altScope *Scope, obj *Object) {
+ alt := scope.Insert(obj)
+ if alt == nil && altScope != nil {
+ // see if there is a conflicting declaration in altScope
+ alt = altScope.Lookup(obj.Name)
+ }
+ if alt != nil {
+ prevDecl := ""
+ if pos := alt.Pos(); pos.IsValid() {
+ prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.fset.Position(pos))
+ }
+ p.error(obj.Pos(), fmt.Sprintf("%s redeclared in this block%s", obj.Name, prevDecl))
+ }
+}
+
+
+func resolve(scope *Scope, ident *Ident) bool {
+ for ; scope != nil; scope = scope.Outer {
+ if obj := scope.Lookup(ident.Name); obj != nil {
+ ident.Obj = obj
+ return true
+ }
+ }
+ return false
+}
+
+
+// NewPackage uses an Importer to resolve imports. Given an importPath,
+// an importer returns the imported package's name, its scope of exported
+// objects, and an error, if any.
+//
+type Importer func(path string) (name string, scope *Scope, err os.Error)
+
+
+// NewPackage creates a new Package node from a set of File nodes. It resolves
+// unresolved identifiers across files and updates each file's Unresolved list
+// accordingly. If a non-nil importer and universe scope are provided, they are
+// used to resolve identifiers not declared in any of the package files. Any
+// remaining unresolved identifiers are reported as undeclared. If the files
+// belong to different packages, one package name is selected and files with
+// different package name are reported and then ignored.
+// The result is a package node and a scanner.ErrorList if there were errors.
+//
+func NewPackage(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, os.Error) {
+ var p pkgBuilder
+ p.fset = fset
+
+ // complete package scope
+ pkgName := ""
+ pkgScope := NewScope(universe)
+ for _, file := range files {
+ // package names must match
+ switch name := file.Name.Name; {
+ case pkgName == "":
+ pkgName = name
+ case name != pkgName:
+ p.errorf(file.Package, "package %s; expected %s", name, pkgName)
+ continue // ignore this file
+ }
+
+ // collect top-level file objects in package scope
+ for _, obj := range file.Scope.Objects {
+ p.declare(pkgScope, nil, obj)
+ }
+ }
+
+ // imports maps import paths to package names and scopes
+ // TODO(gri): Eventually we like to get to the import scope from
+ // a package object. Then we can have a map path -> Obj.
+ type importedPkg struct {
+ name string
+ scope *Scope
+ }
+ imports := make(map[string]*importedPkg)
+
+ // complete file scopes with imports and resolve identifiers
+ for _, file := range files {
+ // ignore file if it belongs to a different package
+ // (error has already been reported)
+ if file.Name.Name != pkgName {
+ continue
+ }
+
+ // build file scope by processing all imports
+ importErrors := false
+ fileScope := NewScope(pkgScope)
+ for _, spec := range file.Imports {
+ // add import to global map of imports
+ path := string(spec.Path.Value)
+ path = path[1 : len(path)-1] // strip ""'s
+ pkg := imports[path]
+ if pkg == nil {
+ if importer == nil {
+ importErrors = true
+ continue
+ }
+ name, scope, err := importer(path)
+ if err != nil {
+ p.errorf(spec.Path.Pos(), "could not import %s (%s)", path, err)
+ importErrors = true
+ continue
+ }
+ pkg = &importedPkg{name, scope}
+ imports[path] = pkg
+ // TODO(gri) If a local package name != "." is provided,
+ // global identifier resolution could proceed even if the
+ // import failed. Consider adjusting the logic here a bit.
+ }
+ // local name overrides imported package name
+ name := pkg.name
+ if spec.Name != nil {
+ name = spec.Name.Name
+ }
+ // add import to file scope
+ if name == "." {
+ // merge imported scope with file scope
+ for _, obj := range pkg.scope.Objects {
+ p.declare(fileScope, pkgScope, obj)
+ }
+ } else {
+ // declare imported package object in file scope
+ obj := NewObj(Pkg, name)
+ obj.Decl = spec
+ p.declare(fileScope, pkgScope, obj)
+ }
+ }
+
+ // resolve identifiers
+ if importErrors {
+ // don't use the universe scope without correct imports
+ // (objects in the universe may be shadowed by imports;
+ // with missing imports identifiers might get resolved
+ // wrongly)
+ pkgScope.Outer = nil
+ }
+ i := 0
+ for _, ident := range file.Unresolved {
+ if !resolve(fileScope, ident) {
+ p.errorf(ident.Pos(), "undeclared name: %s", ident.Name)
+ file.Unresolved[i] = ident
+ i++
+ }
+
+ }
+ file.Unresolved = file.Unresolved[0:i]
+ pkgScope.Outer = universe // reset universe scope
+ }
+
+ // collect all import paths and respective package scopes
+ importedScopes := make(map[string]*Scope)
+ for path, pkg := range imports {
+ importedScopes[path] = pkg.scope
+ }
+
+ return &Package{pkgName, pkgScope, importedScopes, files}, p.GetError(scanner.Sorted)
+}
}
-// Insert attempts to insert a named object into the scope s.
-// If the scope does not contain an object with that name yet,
-// Insert inserts the object and returns it. Otherwise, Insert
-// leaves the scope unchanged and returns the object found in
-// the scope instead.
+// Insert attempts to insert a named object obj into the scope s.
+// If the scope already contains an object alt with the same name,
+// Insert leaves the scope unchanged and returns alt. Otherwise
+// it inserts obj and returns nil."
//
func (s *Scope) Insert(obj *Object) (alt *Object) {
if alt = s.Objects[obj.Name]; alt == nil {
s.Objects[obj.Name] = obj
- alt = obj
}
return
}
return n.Pos()
}
}
+ case *ImportSpec:
+ if d.Name != nil && d.Name.Name == name {
+ return d.Name.Pos()
+ }
+ return d.Path.Pos()
case *ValueSpec:
for _, n := range d.Names {
if n.Name == name {
// nor to have trailing spaces at the end of lines.
// The comment markers have already been removed.
//
-// Turn each run of multiple \n into </p><p>
+// Turn each run of multiple \n into </p><p>.
// Turn each run of indented lines into a <pre> block without indent.
//
// URLs in the comment text are converted into links; if the URL also appears
n2 := len(comments.List)
list := make([]*ast.Comment, n1+1+n2) // + 1 for separator line
copy(list, doc.doc.List)
- list[n1] = &ast.Comment{token.NoPos, []byte("//")} // separator line
+ list[n1] = &ast.Comment{token.NoPos, "//"} // separator line
copy(list[n1+1:], comments.List)
doc.doc = &ast.CommentGroup{list}
}
// if the type is not exported, the effect to
// a client is as if there were no type name
if t.IsExported() {
- return string(t.Name)
+ return t.Name
}
case *ast.StarExpr:
return baseTypeName(t.X)
// collect BUG(...) comments
for _, c := range src.Comments {
text := c.List[0].Text
- if m := bug_markers.FindIndex(text); m != nil {
+ if m := bug_markers.FindStringIndex(text); m != nil {
// found a BUG comment; maybe empty
- if btxt := text[m[1]:]; bug_content.Match(btxt) {
+ if btxt := text[m[1]:]; bug_content.MatchString(btxt) {
// non-empty BUG comment; collect comment without BUG prefix
list := copyCommentList(c.List)
list[0].Text = text[m[1]:]
var p parser
p.init(fset, filename, data, 0)
- x := p.parseExpr()
+ x := p.parseRhs()
if p.tok == token.SEMICOLON {
p.next() // consume automatically inserted semicolon, if any
}
name := src.Name.Name
pkg, found := pkgs[name]
if !found {
- pkg = &ast.Package{name, nil, make(map[string]*ast.File)}
+ // TODO(gri) Use NewPackage here; reconsider ParseFiles API.
+ pkg = &ast.Package{name, nil, nil, make(map[string]*ast.File)}
pkgs[name] = pkg
}
pkg.Files[filename] = src
lineComment *ast.CommentGroup // last line comment
// Next token
- pos token.Pos // token position
- tok token.Token // one token look-ahead
- lit_ []byte // token literal (slice into original source, don't hold on to it)
+ pos token.Pos // token position
+ tok token.Token // one token look-ahead
+ lit string // token literal
// Non-syntactic parser control
exprLev int // < 0: in control clause, >= 0: in expression
// Ordinary identifer scopes
- pkgScope *ast.Scope // pkgScope.Outer == nil
- topScope *ast.Scope // top-most scope; may be pkgScope
- unresolved []*ast.Ident // unresolved global identifiers
+ pkgScope *ast.Scope // pkgScope.Outer == nil
+ topScope *ast.Scope // top-most scope; may be pkgScope
+ unresolved []*ast.Ident // unresolved identifiers
+ imports []*ast.ImportSpec // list of imports
// Label scope
// (maintained by open/close LabelScope)
}
-func (p *parser) lit() []byte {
- // make a copy of p.lit_ so that we don't hold on to
- // a copy of the entire source indirectly in the AST
- t := make([]byte, len(p.lit_))
- copy(t, p.lit_)
- return t
-}
-
-
// ----------------------------------------------------------------------------
// Scoping support
func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name != "_" {
obj := ast.NewObj(kind, ident.Name)
// remember the corresponding declaration for redeclaration
// errors and global variable resolution/typechecking phase
obj.Decl = decl
- alt := scope.Insert(obj)
- if alt != obj && p.mode&DeclarationErrors != 0 {
+ if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
prevDecl := ""
if pos := alt.Pos(); pos.IsValid() {
prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
// the same type, and at least one of the non-blank variables is new.
n := 0 // number of new variables
for _, ident := range idents {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name != "_" {
obj := ast.NewObj(ast.Var, ident.Name)
// short var declarations cannot have redeclaration errors
// and are not global => no need to remember the respective
// declaration
alt := p.topScope.Insert(obj)
- if alt == obj {
+ if alt == nil {
n++ // new declaration
+ alt = obj
}
ident.Obj = alt
}
}
-func (p *parser) resolve(ident *ast.Ident) {
+// The unresolved object is a sentinel to mark identifiers that have been added
+// to the list of unresolved identifiers. The sentinel is only used for verifying
+// internal consistency.
+var unresolved = new(ast.Object)
+
+
+func (p *parser) resolve(x ast.Expr) {
+ // nothing to do if x is not an identifier or the blank identifier
+ ident, _ := x.(*ast.Ident)
+ if ident == nil {
+ return
+ }
+ assert(ident.Obj == nil, "identifier already declared or resolved")
if ident.Name == "_" {
return
}
return
}
}
- // collect unresolved global identifiers; ignore the others
- if p.topScope == p.pkgScope {
- p.unresolved = append(p.unresolved, ident)
- }
+ // all local scopes are known, so any unresolved identifier
+ // must be found either in the file scope, package scope
+ // (perhaps in another file), or universe scope --- collect
+ // them so that they can be resolved later
+ ident.Obj = unresolved
+ p.unresolved = append(p.unresolved, ident)
}
s := p.tok.String()
switch {
case p.tok.IsLiteral():
- p.printTrace(s, string(p.lit_))
+ p.printTrace(s, p.lit)
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
}
}
- p.pos, p.tok, p.lit_ = p.scanner.Scan()
+ p.pos, p.tok, p.lit = p.scanner.Scan()
}
// Consume a comment and return it and the line on which it ends.
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.file.Line(p.pos)
- if p.lit_[1] == '*' {
- for _, b := range p.lit_ {
- if b == '\n' {
+ if p.lit[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.lit); i++ {
+ if p.lit[i] == '\n' {
endline++
}
}
}
- comment = &ast.Comment{p.pos, p.lit()}
+ comment = &ast.Comment{p.pos, p.lit}
p.next0()
return
if pos == p.pos {
// the error happened at the current position;
// make the error message more specific
- if p.tok == token.SEMICOLON && p.lit_[0] == '\n' {
+ if p.tok == token.SEMICOLON && p.lit[0] == '\n' {
msg += ", found newline"
} else {
msg += ", found '" + p.tok.String() + "'"
if p.tok.IsLiteral() {
- msg += " " + string(p.lit_)
+ msg += " " + p.lit
}
}
}
}
+func assert(cond bool, msg string) {
+ if !cond {
+ panic("go/parser internal error: " + msg)
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Identifiers
pos := p.pos
name := "_"
if p.tok == token.IDENT {
- name = string(p.lit_)
+ name = p.lit
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
// ----------------------------------------------------------------------------
// Common productions
-func (p *parser) parseExprList() (list []ast.Expr) {
+// If lhs is set, result list elements which are identifiers are not resolved.
+func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
- list = append(list, p.parseExpr())
+ list = append(list, p.parseExpr(lhs))
for p.tok == token.COMMA {
p.next()
- list = append(list, p.parseExpr())
+ list = append(list, p.parseExpr(lhs))
}
return
}
+func (p *parser) parseLhsList() []ast.Expr {
+ list := p.parseExprList(true)
+ switch p.tok {
+ case token.DEFINE:
+ // lhs of a short variable declaration
+ p.shortVarDecl(p.makeIdentList(list))
+ case token.COLON:
+ // lhs of a label declaration or a communication clause of a select
+ // statement (parseLhsList is not called when parsing the case clause
+ // of a switch statement):
+ // - labels are declared by the caller of parseLhsList
+ // - for communication clauses, if there is a stand-alone identifier
+ // followed by a colon, we have a syntax error; there is no need
+ // to resolve the identifier in that case
+ default:
+ // identifiers must be declared elsewhere
+ for _, x := range list {
+ p.resolve(x)
+ }
+ }
+ return list
+}
+
+
+func (p *parser) parseRhsList() []ast.Expr {
+ return p.parseExprList(false)
+}
+
+
// ----------------------------------------------------------------------------
// Types
}
-func (p *parser) parseQualifiedIdent() ast.Expr {
+// If the result is an identifier, it is not resolved.
+func (p *parser) parseTypeName() ast.Expr {
if p.trace {
- defer un(trace(p, "QualifiedIdent"))
+ defer un(trace(p, "TypeName"))
}
ident := p.parseIdent()
- p.resolve(ident)
- var x ast.Expr = ident
+ // don't resolve ident yet - it may be a parameter or field name
+
if p.tok == token.PERIOD {
- // first identifier is a package identifier
+ // ident is a package name
p.next()
+ p.resolve(ident)
sel := p.parseIdent()
- x = &ast.SelectorExpr{x, sel}
+ return &ast.SelectorExpr{ident, sel}
}
- return x
-}
-
-
-func (p *parser) parseTypeName() ast.Expr {
- if p.trace {
- defer un(trace(p, "TypeName"))
- }
-
- return p.parseQualifiedIdent()
+ return ident
}
len = &ast.Ellipsis{p.pos, nil}
p.next()
} else if p.tok != token.RBRACK {
- len = p.parseExpr()
+ len = p.parseRhs()
}
p.expect(token.RBRACK)
elt := p.parseType()
}
-func (p *parser) parseFieldDecl() *ast.Field {
+func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
}
// optional tag
var tag *ast.BasicLit
if p.tok == token.STRING {
- tag = &ast.BasicLit{p.pos, p.tok, p.lit()}
+ tag = &ast.BasicLit{p.pos, p.tok, p.lit}
p.next()
}
} else {
// ["*"] TypeName (AnonymousField)
typ = list[0] // we always have at least one element
+ p.resolve(typ)
if n := len(list); n > 1 || !isTypeName(deref(typ)) {
pos := typ.Pos()
p.errorExpected(pos, "anonymous field")
p.expectSemi() // call before accessing p.linecomment
- return &ast.Field{doc, idents, typ, tag, p.lineComment}
+ field := &ast.Field{doc, idents, typ, tag, p.lineComment}
+ p.declare(field, scope, ast.Var, idents...)
+
+ return field
}
pos := p.expect(token.STRUCT)
lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // struct scope
var list []*ast.Field
for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
// a field declaration cannot start with a '(' but we accept
// it here for more robust parsing and better error messages
// (parseFieldDecl will check and complain if necessary)
- list = append(list, p.parseFieldDecl())
+ list = append(list, p.parseFieldDecl(scope))
}
rbrace := p.expect(token.RBRACE)
+ // TODO(gri): store struct scope in AST
return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
}
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
p.next()
- typ := p.tryType() // don't use parseType so we can provide better error message
+ typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
if typ == nil {
p.error(pos, "'...' parameter is missing type")
typ = &ast.BadExpr{pos, p.pos}
}
return &ast.Ellipsis{pos, typ}
}
- return p.tryType()
+ return p.tryIdentOrType(false)
}
// if we had a list of identifiers, it must be followed by a type
typ = p.tryVarType(isParam)
+ if typ != nil {
+ p.resolve(typ)
+ }
return
}
// Type { "," Type } (anonymous parameters)
params = make([]*ast.Field, len(list))
for i, x := range list {
+ p.resolve(x)
params[i] = &ast.Field{Type: x}
}
}
}
-func (p *parser) parseMethodSpec() *ast.Field {
+func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
}
doc := p.leadComment
var idents []*ast.Ident
var typ ast.Expr
- x := p.parseQualifiedIdent()
+ x := p.parseTypeName()
if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
// method
idents = []*ast.Ident{ident}
}
p.expectSemi() // call before accessing p.linecomment
- return &ast.Field{doc, idents, typ, nil, p.lineComment}
+ spec := &ast.Field{doc, idents, typ, nil, p.lineComment}
+ p.declare(spec, scope, ast.Fun, idents...)
+
+ return spec
}
pos := p.expect(token.INTERFACE)
lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // interface scope
var list []*ast.Field
for p.tok == token.IDENT {
- list = append(list, p.parseMethodSpec())
+ list = append(list, p.parseMethodSpec(scope))
}
rbrace := p.expect(token.RBRACE)
+ // TODO(gri): store interface scope in AST
return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
}
}
-func (p *parser) tryRawType(ellipsisOk bool) ast.Expr {
+// If the result is an identifier, it is not resolved.
+func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
switch p.tok {
case token.IDENT:
return p.parseTypeName()
}
-func (p *parser) tryType() ast.Expr { return p.tryRawType(false) }
+func (p *parser) tryType() ast.Expr {
+ typ := p.tryIdentOrType(false)
+ if typ != nil {
+ p.resolve(typ)
+ }
+ return typ
+}
// ----------------------------------------------------------------------------
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
+// If lhs is set and the result is an identifier, it is not resolved.
//
-func (p *parser) parseOperand() ast.Expr {
+func (p *parser) parseOperand(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
- ident := p.parseIdent()
- p.resolve(ident)
- return ident
+ x := p.parseIdent()
+ if !lhs {
+ p.resolve(x)
+ }
+ return x
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
- x := &ast.BasicLit{p.pos, p.tok, p.lit()}
+ x := &ast.BasicLit{p.pos, p.tok, p.lit}
p.next()
return x
lparen := p.pos
p.next()
p.exprLev++
- x := p.parseExpr()
+ x := p.parseRhs()
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{lparen, x, rparen}
return p.parseFuncTypeOrLit()
default:
- t := p.tryRawType(true) // could be type for composite literal or conversion
- if t != nil {
- return t
+ if typ := p.tryIdentOrType(true); typ != nil {
+ // could be type for composite literal or conversion
+ _, isIdent := typ.(*ast.Ident)
+ assert(!isIdent, "type cannot be identifier")
+ return typ
}
}
}
-func (p *parser) parseSelectorOrTypeAssertion(x ast.Expr) ast.Expr {
+func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
- defer un(trace(p, "SelectorOrTypeAssertion"))
+ defer un(trace(p, "Selector"))
}
- p.expect(token.PERIOD)
- if p.tok == token.IDENT {
- // selector
- sel := p.parseIdent()
- return &ast.SelectorExpr{x, sel}
+ sel := p.parseIdent()
+
+ return &ast.SelectorExpr{x, sel}
+}
+
+
+func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "TypeAssertion"))
}
- // type assertion
p.expect(token.LPAREN)
var typ ast.Expr
if p.tok == token.TYPE {
var low, high ast.Expr
isSlice := false
if p.tok != token.COLON {
- low = p.parseExpr()
+ low = p.parseRhs()
}
if p.tok == token.COLON {
isSlice = true
p.next()
if p.tok != token.RBRACK {
- high = p.parseExpr()
+ high = p.parseRhs()
}
}
p.exprLev--
var list []ast.Expr
var ellipsis token.Pos
for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
- list = append(list, p.parseExpr())
+ list = append(list, p.parseRhs())
if p.tok == token.ELLIPSIS {
ellipsis = p.pos
p.next()
return p.parseLiteralValue(nil)
}
- x := p.parseExpr()
- if keyOk && p.tok == token.COLON {
- colon := p.pos
- p.next()
- x = &ast.KeyValueExpr{x, colon, p.parseElement(false)}
+ x := p.parseExpr(keyOk) // don't resolve if map key
+ if keyOk {
+ if p.tok == token.COLON {
+ colon := p.pos
+ p.next()
+ return &ast.KeyValueExpr{x, colon, p.parseElement(false)}
+ }
+ p.resolve(x) // not a map key
}
+
return x
}
}
-func (p *parser) parsePrimaryExpr() ast.Expr {
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "PrimaryExpr"))
}
- x := p.parseOperand()
+ x := p.parseOperand(lhs)
L:
for {
switch p.tok {
case token.PERIOD:
- x = p.parseSelectorOrTypeAssertion(p.checkExpr(x))
+ p.next()
+ if lhs {
+ p.resolve(x)
+ }
+ switch p.tok {
+ case token.IDENT:
+ x = p.parseSelector(p.checkExpr(x))
+ case token.LPAREN:
+ x = p.parseTypeAssertion(p.checkExpr(x))
+ default:
+ pos := p.pos
+ p.next() // make progress
+ p.errorExpected(pos, "selector or type assertion")
+ x = &ast.BadExpr{pos, p.pos}
+ }
case token.LBRACK:
+ if lhs {
+ p.resolve(x)
+ }
x = p.parseIndexOrSlice(p.checkExpr(x))
case token.LPAREN:
+ if lhs {
+ p.resolve(x)
+ }
x = p.parseCallOrConversion(p.checkExprOrType(x))
case token.LBRACE:
if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
+ if lhs {
+ p.resolve(x)
+ }
x = p.parseLiteralValue(x)
} else {
break L
default:
break L
}
+ lhs = false // no need to try to resolve again
}
return x
}
-func (p *parser) parseUnaryExpr() ast.Expr {
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "UnaryExpr"))
}
case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.RANGE:
pos, op := p.pos, p.tok
p.next()
- x := p.parseUnaryExpr()
+ x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{pos, op, p.checkExpr(x)}
case token.ARROW:
return &ast.ChanType{pos, ast.RECV, value}
}
- x := p.parseUnaryExpr()
+ x := p.parseUnaryExpr(false)
return &ast.UnaryExpr{pos, token.ARROW, p.checkExpr(x)}
case token.MUL:
// pointer type or unary "*" expression
pos := p.pos
p.next()
- x := p.parseUnaryExpr()
+ x := p.parseUnaryExpr(false)
return &ast.StarExpr{pos, p.checkExprOrType(x)}
}
- return p.parsePrimaryExpr()
+ return p.parsePrimaryExpr(lhs)
}
-func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
if p.trace {
defer un(trace(p, "BinaryExpr"))
}
- x := p.parseUnaryExpr()
+ x := p.parseUnaryExpr(lhs)
for prec := p.tok.Precedence(); prec >= prec1; prec-- {
for p.tok.Precedence() == prec {
pos, op := p.pos, p.tok
p.next()
- y := p.parseBinaryExpr(prec + 1)
+ if lhs {
+ p.resolve(x)
+ lhs = false
+ }
+ y := p.parseBinaryExpr(false, prec+1)
x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)}
}
}
}
+// If lhs is set and the result is an identifier, it is not resolved.
// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
// should reject when a type/raw type is obviously not allowed
-func (p *parser) parseExpr() ast.Expr {
+func (p *parser) parseExpr(lhs bool) ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
- return p.parseBinaryExpr(token.LowestPrec + 1)
+ return p.parseBinaryExpr(lhs, token.LowestPrec+1)
+}
+
+
+func (p *parser) parseRhs() ast.Expr {
+ return p.parseExpr(false)
}
defer un(trace(p, "SimpleStmt"))
}
- x := p.parseExprList()
+ x := p.parseLhsList()
switch p.tok {
case
// assignment statement
pos, tok := p.pos, p.tok
p.next()
- y := p.parseExprList()
- if tok == token.DEFINE {
- p.shortVarDecl(p.makeIdentList(x))
- }
+ y := p.parseRhsList()
return &ast.AssignStmt{x, pos, tok, y}
}
// send statement
arrow := p.pos
p.next() // consume "<-"
- y := p.parseExpr()
+ y := p.parseRhs()
return &ast.SendStmt{x[0], arrow, y}
case token.INC, token.DEC:
func (p *parser) parseCallExpr() *ast.CallExpr {
- x := p.parseExpr()
+ x := p.parseRhs()
if call, isCall := x.(*ast.CallExpr); isCall {
return call
}
p.expect(token.RETURN)
var x []ast.Expr
if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
- x = p.parseExprList()
+ x = p.parseRhsList()
}
p.expectSemi()
p.exprLev = -1
if p.tok == token.SEMICOLON {
p.next()
- x = p.parseExpr()
+ x = p.parseRhs()
} else {
s = p.parseSimpleStmt(false)
if p.tok == token.SEMICOLON {
p.next()
- x = p.parseExpr()
+ x = p.parseRhs()
} else {
x = p.makeExpr(s)
s = nil
if p.tok == token.CASE {
p.next()
if exprSwitch {
- list = p.parseExprList()
+ list = p.parseRhsList()
} else {
list = p.parseTypeList()
}
var comm ast.Stmt
if p.tok == token.CASE {
p.next()
- lhs := p.parseExprList()
+ lhs := p.parseLhsList()
if p.tok == token.ARROW {
// SendStmt
if len(lhs) > 1 {
}
arrow := p.pos
p.next()
- rhs := p.parseExpr()
+ rhs := p.parseRhs()
comm = &ast.SendStmt{lhs[0], arrow, rhs}
} else {
// RecvStmt
lhs = lhs[0:2]
}
p.next()
- rhs = p.parseExpr()
- if tok == token.DEFINE {
- p.shortVarDecl(p.makeIdentList(lhs))
- }
+ rhs = p.parseRhs()
} else {
// rhs must be single receive operation
if len(lhs) > 1 {
var path *ast.BasicLit
if p.tok == token.STRING {
- path = &ast.BasicLit{p.pos, p.tok, p.lit()}
+ path = &ast.BasicLit{p.pos, p.tok, p.lit}
p.next()
} else {
p.expect(token.STRING) // use expect() error handling
}
p.expectSemi() // call before accessing p.linecomment
- return &ast.ImportSpec{doc, ident, path, p.lineComment}
+ // collect imports
+ spec := &ast.ImportSpec{doc, ident, path, p.lineComment}
+ p.imports = append(p.imports, spec)
+
+ return spec
}
var values []ast.Expr
if typ != nil || p.tok == token.ASSIGN || iota == 0 {
p.expect(token.ASSIGN)
- values = p.parseExprList()
+ values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
var values []ast.Expr
if typ == nil || p.tok == token.ASSIGN {
p.expect(token.ASSIGN)
- values = p.parseExprList()
+ values = p.parseRhsList()
}
p.expectSemi() // call before accessing p.linecomment
}
}
- if p.topScope != p.pkgScope {
- panic("internal error: imbalanced scopes")
- }
+ assert(p.topScope == p.pkgScope, "imbalanced scopes")
// resolve global identifiers within the same file
i := 0
for _, ident := range p.unresolved {
// i <= index for current ident
- ident.Obj = p.pkgScope.Lookup(ident.Name)
+ assert(ident.Obj == unresolved, "object already resolved")
+ ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
if ident.Obj == nil {
p.unresolved[i] = ident
i++
}
}
- return &ast.File{doc, pos, ident, decls, p.pkgScope, p.unresolved[0:i], p.comments}
+ // TODO(gri): store p.imports in AST
+ return &ast.File{doc, pos, ident, decls, p.pkgScope, p.imports, p.unresolved[0:i], p.comments}
}
// the first linebreak is always a formfeed since this section must not
// depend on any previous formatting
prevBreak := -1 // index of last expression that was followed by a linebreak
- linebreakMin := 1
- if mode&periodSep != 0 {
- // Make fragments like
- //
- // a.Bar(1,
- // 2).Foo
- //
- // format correctly (a linebreak shouldn't be added before Foo) when
- // doing period-separated expr lists by setting minimum linebreak to 0
- // lines for them.
- linebreakMin = 0
- }
- if prev.IsValid() && prev.Line < line && p.linebreak(line, linebreakMin, ws, true) {
+ if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) {
ws = ignore
*multiLine = true
prevBreak = 0
// lines are broken using newlines so comments remain aligned
// unless forceFF is set or there are multiple expressions on
// the same line in which case formfeed is used
- if p.linebreak(line, linebreakMin, ws, useFF || prevBreak+1 < i) {
+ if p.linebreak(line, 0, ws, useFF || prevBreak+1 < i) {
ws = ignore
*multiLine = true
prevBreak = i
func (p *printer) setLineComment(text string) {
- p.setComment(&ast.CommentGroup{[]*ast.Comment{&ast.Comment{token.NoPos, []byte(text)}}})
+ p.setComment(&ast.CommentGroup{[]*ast.Comment{&ast.Comment{token.NoPos, text}}})
}
}
case *ast.StarExpr:
- if e.Op.String() == "/" {
+ if e.Op == token.QUO { // `*/`
maxProblem = 5
}
)
-const (
- esc2 = '\xfe' // an escape byte that cannot occur in regular UTF-8
- _ = 1 / (esc2 - tabwriter.Escape) // cause compiler error if esc2 == tabwriter.Escape
-)
-
-
var (
esc = []byte{tabwriter.Escape}
htab = []byte{'\t'}
mode pmode // current printer mode
lastTok token.Token // the last token printed (token.ILLEGAL if it's whitespace)
- // Buffered whitespace
- buffer []whiteSpace
+ // Reused buffers
+ wsbuf []whiteSpace // delayed white space
+ litbuf bytes.Buffer // for creation of escaped literals and comments
// The (possibly estimated) position in the generated output;
// in AST space (i.e., pos is set whenever a token position is
p.Config = *cfg
p.fset = fset
p.errors = make(chan os.Error)
- p.buffer = make([]whiteSpace, 0, 16) // whitespace sequences are short
+ p.wsbuf = make([]whiteSpace, 0, 16) // whitespace sequences are short
p.nodeSizes = nodeSizes
}
}
+// escape escapes string s by bracketing it with tabwriter.Escape.
+// Escaped strings pass through tabwriter unchanged. (Note that
+// valid Go programs cannot contain tabwriter.Escape bytes since
+// they do not appear in legal UTF-8 sequences).
+//
+func (p *printer) escape(s string) string {
+ p.litbuf.Reset()
+ p.litbuf.WriteByte(tabwriter.Escape)
+ p.litbuf.WriteString(s)
+ p.litbuf.WriteByte(tabwriter.Escape)
+ return p.litbuf.String()
+}
+
+
// nlines returns the adjusted number of linebreaks given the desired number
// of breaks n such that min <= result <= max where max depends on the current
// nesting level.
// source text. writeItem updates p.last to the position immediately following
// the data.
//
-func (p *printer) writeItem(pos token.Position, data []byte) {
+func (p *printer) writeItem(pos token.Position, data string) {
if pos.IsValid() {
// continue with previous position if we don't have a valid pos
if p.last.IsValid() && p.last.Filename != pos.Filename {
// e.g., the result of ast.MergePackageFiles)
p.indent = 0
p.mode = 0
- p.buffer = p.buffer[0:0]
+ p.wsbuf = p.wsbuf[0:0]
}
p.pos = pos
}
_, filename := filepath.Split(pos.Filename)
p.write0([]byte(fmt.Sprintf("[%s:%d:%d]", filename, pos.Line, pos.Column)))
}
- p.write(data)
+ p.write([]byte(data))
p.last = p.pos
}
if prev == nil {
// first comment of a comment group
j := 0
- for i, ch := range p.buffer {
+ for i, ch := range p.wsbuf {
switch ch {
case blank:
// ignore any blanks before a comment
- p.buffer[i] = ignore
+ p.wsbuf[i] = ignore
continue
case vtab:
// respect existing tabs - important
if prev == nil {
// first comment of a comment group
j := 0
- for i, ch := range p.buffer {
+ for i, ch := range p.wsbuf {
switch ch {
case blank, vtab:
// ignore any horizontal whitespace before line breaks
- p.buffer[i] = ignore
+ p.wsbuf[i] = ignore
continue
case indent:
// apply pending indentation
}
case newline, formfeed:
// TODO(gri): may want to keep formfeed info in some cases
- p.buffer[i] = ignore
+ p.wsbuf[i] = ignore
}
j = i
break
}
-func (p *printer) writeCommentLine(comment *ast.Comment, pos token.Position, line []byte) {
- // line must pass through unchanged, bracket it with tabwriter.Escape
- line = bytes.Join([][]byte{esc, line, esc}, nil)
- p.writeItem(pos, line)
-}
-
+// TODO(gri): It should be possible to convert the code below from using
+// []byte to string and in the process eliminate some conversions.
// Split comment text into lines
func split(text []byte) [][]byte {
// shortcut common case of //-style comments
if text[1] == '/' {
- p.writeCommentLine(comment, p.fset.Position(comment.Pos()), text)
+ p.writeItem(p.fset.Position(comment.Pos()), p.escape(text))
return
}
// for /*-style comments, print line by line and let the
// write function take care of the proper indentation
- lines := split(text)
+ lines := split([]byte(text))
stripCommonPrefix(lines)
// write comment lines, separated by formfeed,
pos = p.pos
}
if len(line) > 0 {
- p.writeCommentLine(comment, pos, line)
+ p.writeItem(pos, p.escape(string(line)))
}
}
}
// formfeed was dropped from the whitespace buffer.
//
func (p *printer) writeCommentSuffix(needsLinebreak bool) (droppedFF bool) {
- for i, ch := range p.buffer {
+ for i, ch := range p.wsbuf {
switch ch {
case blank, vtab:
// ignore trailing whitespace
- p.buffer[i] = ignore
+ p.wsbuf[i] = ignore
case indent, unindent:
// don't loose indentation information
case newline, formfeed:
if ch == formfeed {
droppedFF = true
}
- p.buffer[i] = ignore
+ p.wsbuf[i] = ignore
}
}
}
- p.writeWhitespace(len(p.buffer))
+ p.writeWhitespace(len(p.wsbuf))
// make sure we have a line break
if needsLinebreak {
// write entries
var data [1]byte
for i := 0; i < n; i++ {
- switch ch := p.buffer[i]; ch {
+ switch ch := p.wsbuf[i]; ch {
case ignore:
// ignore!
case indent:
// the line break and the label, the unindent is not
// part of the comment whitespace prefix and the comment
// will be positioned correctly indented.
- if i+1 < n && p.buffer[i+1] == unindent {
+ if i+1 < n && p.wsbuf[i+1] == unindent {
// Use a formfeed to terminate the current section.
// Otherwise, a long label name on the next line leading
// to a wide column may increase the indentation column
// of lines before the label; effectively leading to wrong
// indentation.
- p.buffer[i], p.buffer[i+1] = unindent, formfeed
+ p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeed
i-- // do it again
continue
}
// shift remaining entries down
i := 0
- for ; n < len(p.buffer); n++ {
- p.buffer[i] = p.buffer[n]
+ for ; n < len(p.wsbuf); n++ {
+ p.wsbuf[i] = p.wsbuf[n]
i++
}
- p.buffer = p.buffer[0:i]
+ p.wsbuf = p.wsbuf[0:i]
}
func (p *printer) print(args ...interface{}) {
for _, f := range args {
next := p.pos // estimated position of next item
- var data []byte
+ var data string
var tok token.Token
switch x := f.(type) {
// LabeledStmt)
break
}
- i := len(p.buffer)
- if i == cap(p.buffer) {
+ i := len(p.wsbuf)
+ if i == cap(p.wsbuf) {
// Whitespace sequences are very short so this should
// never happen. Handle gracefully (but possibly with
// bad comment placement) if it does happen.
p.writeWhitespace(i)
i = 0
}
- p.buffer = p.buffer[0 : i+1]
- p.buffer[i] = x
+ p.wsbuf = p.wsbuf[0 : i+1]
+ p.wsbuf[i] = x
case *ast.Ident:
- data = []byte(x.Name)
+ data = x.Name
tok = token.IDENT
case *ast.BasicLit:
- // escape all literals so they pass through unchanged
- // (note that valid Go programs cannot contain
- // tabwriter.Escape bytes since they do not appear in
- // legal UTF-8 sequences)
- data = make([]byte, 0, len(x.Value)+2)
- data = append(data, tabwriter.Escape)
- data = append(data, x.Value...)
- data = append(data, tabwriter.Escape)
+ data = p.escape(x.Value)
tok = x.Kind
- // If we have a raw string that spans multiple lines and
- // the opening quote (`) is on a line preceded only by
- // indentation, we don't want to write that indentation
- // because the following lines of the raw string are not
- // indented. It's easiest to correct the output at the end
- // via the trimmer (because of the complex handling of
- // white space).
- // Mark multi-line raw strings by replacing the opening
- // quote with esc2 and have the trimmer take care of fixing
- // it up. (Do this _after_ making a copy of data!)
- if data[1] == '`' && bytes.IndexByte(data, '\n') > 0 {
- data[1] = esc2
- }
case token.Token:
s := x.String()
if mayCombine(p.lastTok, s[0]) {
// (except for token.INT followed by a '.' this
// should never happen because it is taken care
// of via binary expression formatting)
- if len(p.buffer) != 0 {
+ if len(p.wsbuf) != 0 {
p.internalError("whitespace buffer not empty")
}
- p.buffer = p.buffer[0:1]
- p.buffer[0] = ' '
+ p.wsbuf = p.wsbuf[0:1]
+ p.wsbuf[0] = ' '
}
- data = []byte(s)
+ data = s
tok = x
case token.Pos:
if x.IsValid() {
p.lastTok = tok
p.pos = next
- if data != nil {
+ if data != "" {
droppedFF := p.flush(next, tok)
// intersperse extra newlines if present in the source
droppedFF = p.intersperseComments(next, tok)
} else {
// otherwise, write any leftover whitespace
- p.writeWhitespace(len(p.buffer))
+ p.writeWhitespace(len(p.wsbuf))
}
return
}
// through unchanged.
//
type trimmer struct {
- output io.Writer
- state int
- space bytes.Buffer
- hasText bool
+ output io.Writer
+ state int
+ space bytes.Buffer
}
// It can be in one of the following states:
const (
inSpace = iota // inside space
- atEscape // inside space and the last char was an opening tabwriter.Escape
inEscape // inside text bracketed by tabwriter.Escapes
inText // inside text
)
-var backquote = []byte{'`'}
-
-
// Design note: It is tempting to eliminate extra blanks occurring in
// whitespace in this function as it could simplify some
// of the blanks logic in the node printing functions.
func (p *trimmer) Write(data []byte) (n int, err os.Error) {
// invariants:
- // p.state == inSpace, atEscape:
+ // p.state == inSpace:
// p.space is unwritten
- // p.hasText indicates if there is any text on this line
// p.state == inEscape, inText:
// data[m:n] is unwritten
m := 0
case '\n', '\f':
p.space.Reset() // discard trailing space
_, err = p.output.Write(newlines[0:1]) // write newline
- p.hasText = false
case tabwriter.Escape:
- p.state = atEscape
+ _, err = p.output.Write(p.space.Bytes())
+ p.state = inEscape
+ m = n + 1 // +1: skip tabwriter.Escape
default:
_, err = p.output.Write(p.space.Bytes())
p.state = inText
m = n
}
- case atEscape:
- // discard indentation if we have a multi-line raw string
- // (see printer.print for details)
- if b != esc2 || p.hasText {
- _, err = p.output.Write(p.space.Bytes())
- }
- p.state = inEscape
- m = n
- if b == esc2 {
- _, err = p.output.Write(backquote) // convert back
- m++
- }
case inEscape:
if b == tabwriter.Escape {
_, err = p.output.Write(data[m:n])
p.state = inSpace
p.space.Reset()
- p.hasText = true
}
case inText:
switch b {
p.state = inSpace
p.space.Reset()
p.space.WriteByte(b) // WriteByte returns no errors
- p.hasText = true
case '\n', '\f':
_, err = p.output.Write(data[m:n])
p.state = inSpace
p.space.Reset()
_, err = p.output.Write(newlines[0:1]) // write newline
- p.hasText = false
case tabwriter.Escape:
_, err = p.output.Write(data[m:n])
- p.state = atEscape
- p.space.Reset()
- p.hasText = true
+ p.state = inEscape
+ m = n + 1 // +1: skip tabwriter.Escape
}
+ default:
+ panic("unreachable")
}
if err != nil {
return
_, err = p.output.Write(data[m:n])
p.state = inSpace
p.space.Reset()
- p.hasText = true
}
return
// start a timer to produce a time-out signal
tc := make(chan int)
go func() {
- time.Sleep(20e9) // plenty of a safety margin, even for very slow machines
+ time.Sleep(10e9) // plenty of a safety margin, even for very slow machines
tc <- 0
}()
func TestFiles(t *testing.T) {
- for _, e := range data {
+ for i, e := range data {
source := filepath.Join(dataDir, e.source)
golden := filepath.Join(dataDir, e.golden)
check(t, source, golden, e.mode)
// TODO(gri) check that golden is idempotent
- //check(t, golden, golden, e.mode);
+ //check(t, golden, golden, e.mode)
+ if testing.Short() && i >= 3 {
+ break
+ }
}
}
var _ = ``
var _ = `foo`
var _ =
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var _ = // comment
var _ = // comment
`foo`
var _ = // comment
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var _ = /* comment */ ``
var _ = /* comment */
`foo`
var _ = /* comment */
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var board = []int(
-`...........
+ `...........
...........
....●●●....
....●●●....
var state = S{
"foo",
- // the next line should not be indented
-`...........
+ // the next line should remain indented
+ `...........
...........
....●●●....
....●●●....
b.(T).
c
}
+
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC,0666); err != nil {
+ log.Fatal(err)
+ }
+}
var _ =
`foo`
var _ =
- // the next line should not be indented
+ // the next line should remain indented
`foo
bar`
var _ = // comment
`foo`
var _ = // comment
- // the next line should not be indented
+ // the next line should remain indented
`foo
bar`
var _ = /* comment */
`foo`
var _ = /* comment */
- // the next line should not be indented
+ // the next line should remain indented
`foo
bar`
var state = S{
"foo",
- // the next line should not be indented
+ // the next line should remain indented
`...........
...........
....●●●....
(T).
c
}
+
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC, 0666); err != nil {
+ log.Fatal(err)
+ }
+}
_ = `foo
bar`
_ = `three spaces before the end of the line starting here:
-they must not be removed`\f}
+they must not be removed`
+}
func _() {
var _ = ``
var _ = `foo`
var _ =
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var _ = // comment
var _ = // comment
`foo`
var _ = // comment
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var _ = /* comment */ ``
var _ = /* comment */
`foo`
var _ = /* comment */
- // the next line should not be indented
-`foo
+ // the next line should remain indented
+ `foo
bar`
var board = []int(
-`...........
+ `...........
...........
....●●●....
....●●●....
var state = S{
"foo",
- // the next line should not be indented
-`...........
+ // the next line should remain indented
+ `...........
...........
....●●●....
....●●●....
b.(T).
c
}
+
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC,0666); err != nil {
+ log.Fatal(err)
+ }
+}
}
-var newline = []byte{'\n'}
-
-// Scan scans the next token and returns the token position pos,
-// the token tok, and the literal text lit corresponding to the
+// Scan scans the next token and returns the token position,
+// the token, and the literal string corresponding to the
// token. The source end is indicated by token.EOF.
//
// If the returned token is token.SEMICOLON, the corresponding
-// literal value is ";" if the semicolon was present in the source,
+// literal string is ";" if the semicolon was present in the source,
// and "\n" if the semicolon was inserted because of a newline or
// at EOF.
//
// set with Init. Token positions are relative to that file
// and thus relative to the file set.
//
-func (S *Scanner) Scan() (token.Pos, token.Token, []byte) {
+func (S *Scanner) Scan() (token.Pos, token.Token, string) {
scanAgain:
S.skipWhitespace()
case -1:
if S.insertSemi {
S.insertSemi = false // EOF consumed
- return S.file.Pos(offs), token.SEMICOLON, newline
+ return S.file.Pos(offs), token.SEMICOLON, "\n"
}
tok = token.EOF
case '\n':
// set in the first place and exited early
// from S.skipWhitespace()
S.insertSemi = false // newline consumed
- return S.file.Pos(offs), token.SEMICOLON, newline
+ return S.file.Pos(offs), token.SEMICOLON, "\n"
case '"':
insertSemi = true
tok = token.STRING
S.offset = offs
S.rdOffset = offs + 1
S.insertSemi = false // newline consumed
- return S.file.Pos(offs), token.SEMICOLON, newline
+ return S.file.Pos(offs), token.SEMICOLON, "\n"
}
S.scanComment()
if S.mode&ScanComments == 0 {
if S.mode&InsertSemis != 0 {
S.insertSemi = insertSemi
}
- return S.file.Pos(offs), tok, S.src[offs:S.offset]
+
+ // TODO(gri): The scanner API should change such that the literal string
+ // is only valid if an actual literal was scanned. This will
+ // permit a more efficient implementation.
+ return S.file.Pos(offs), tok, string(S.src[offs:S.offset])
}
index := 0
epos := token.Position{"", 0, 1, 1} // expected position
for {
- pos, tok, litb := s.Scan()
+ pos, tok, lit := s.Scan()
e := elt{token.EOF, "", special}
if index < len(tokens) {
e = tokens[index]
}
- lit := string(litb)
if tok == token.EOF {
lit = "<EOF>"
epos.Line = src_linecount
}
epos.Offset += len(lit) + len(whitespace)
epos.Line += newlineCount(lit) + whitespace_linecount
- if tok == token.COMMENT && litb[1] == '/' {
+ if tok == token.COMMENT && lit[1] == '/' {
// correct for unaccounted '/n' in //-style comment
epos.Offset++
epos.Line++
semiPos.Column++
pos, tok, lit = S.Scan()
if tok == token.SEMICOLON {
- if string(lit) != semiLit {
+ if lit != semiLit {
t.Errorf(`bad literal for %q: got %q, expected %q`, line, lit, semiLit)
}
checkPos(t, line, pos, semiPos)
for _, s := range segments {
p, _, lit := S.Scan()
pos := file.Position(p)
- checkPos(t, string(lit), p, token.Position{s.filename, pos.Offset, s.line, pos.Column})
+ checkPos(t, lit, p, token.Position{s.filename, pos.Offset, s.line, pos.Column})
}
if S.ErrorCount != 0 {
for offs, ch := range src {
pos, tok, lit := s.Scan()
if poffs := file.Offset(pos); poffs != offs {
- t.Errorf("bad position for %s: got %d, expected %d", string(lit), poffs, offs)
+ t.Errorf("bad position for %s: got %d, expected %d", lit, poffs, offs)
}
- if tok == token.ILLEGAL && string(lit) != string(ch) {
- t.Errorf("bad token: got %s, expected %s", string(lit), string(ch))
+ if tok == token.ILLEGAL && lit != string(ch) {
+ t.Errorf("bad token: got %s, expected %s", lit, string(ch))
}
}
)
-// At the moment we have no array literal syntax that lets us describe
-// the index for each element - use a map for now to make sure they are
-// in sync.
-var tokens = map[Token]string{
+var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
// constant name (e.g. for the token IDENT, the string is "IDENT").
//
func (tok Token) String() string {
- if str, exists := tokens[tok]; exists {
- return str
+ s := ""
+ if 0 <= tok && tok < Token(len(tokens)) {
+ s = tokens[tok]
}
- return "token(" + strconv.Itoa(int(tok)) + ")"
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(tok)) + ")"
+ }
+ return s
}
//obj.N = n
name.Obj = obj
if name.Name != "_" {
- if alt := scope.Insert(obj); alt != obj {
+ if alt := scope.Insert(obj); alt != nil {
tc.Errorf(name.Pos(), "%s already declared at %s", name.Name, tc.fset.Position(alt.Pos()).String())
}
}
//
func CheckFile(fset *token.FileSet, file *ast.File, importer Importer) os.Error {
// create a single-file dummy package
- pkg := &ast.Package{file.Name.Name, nil, map[string]*ast.File{fset.Position(file.Name.NamePos).Filename: file}}
+ pkg := &ast.Package{file.Name.Name, nil, nil, map[string]*ast.File{fset.Position(file.Name.NamePos).Filename: file}}
return CheckPackage(fset, pkg, importer)
}
if ftype != nil {
for _, par := range ftype.Params.Objects {
if par.Name != "_" {
- obj := tc.topScope.Insert(par)
- assert(obj == par) // ftype has no double declarations
+ alt := tc.topScope.Insert(par)
+ assert(alt == nil) // ftype has no double declarations
}
}
}
case token.EOF:
break loop
case token.COMMENT:
- s := errRx.FindSubmatch(lit)
+ s := errRx.FindStringSubmatch(lit)
if len(s) == 2 {
list = append(list, &scanner.Error{fset.Position(prev), string(s[1])})
}
func def(obj *ast.Object) {
alt := Universe.Insert(obj)
- if alt != obj {
+ if alt != nil {
panic("object declared twice")
}
}
func newStructType(name string) *structType {
s := &structType{CommonType{Name: name}, nil}
// For historical reasons we set the id here rather than init.
- // Se the comment in newTypeObject for details.
+ // See the comment in newTypeObject for details.
setTypeId(s)
return s
}
// getType returns the Gob type describing the given reflect.Type.
// Should be called only when handling GobEncoders/Decoders,
// which may be pointers. All other types are handled through the
-// base type, never a pointer.
+// base type, never a pointer.
// typeLock must be held.
func getType(name string, ut *userTypeInfo, rt reflect.Type) (gobType, os.Error) {
typ, present := types[rt]
func checkId(want, got typeId) {
if want != got {
- fmt.Fprintf(os.Stderr, "checkId: %d should be %d\n", int(want), int(got))
+ fmt.Fprintf(os.Stderr, "checkId: %d should be %d\n", int(got), int(want))
panic("bootstrap type wrong id: " + got.name() + " " + got.string() + " not " + want.string())
}
}
// handle Content-Range header.
// TODO(adg): handle multiple ranges
ranges, err := parseRange(r.Header.Get("Range"), size)
- if err != nil || len(ranges) > 1 {
+ if err == nil && len(ranges) > 1 {
+ err = os.ErrorString("multiple ranges not supported")
+ }
+ if err != nil {
Error(w, err.String(), StatusRequestedRangeNotSatisfiable)
return
}
//
// pprof http://localhost:6060/debug/pprof/heap
//
+// Or to look at a 30-second CPU profile:
+//
+// pprof http://localhost:6060/debug/pprof/profile
+//
package pprof
import (
"runtime/pprof"
"strconv"
"strings"
+ "time"
)
func init() {
http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline))
+ http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile))
http.Handle("/debug/pprof/heap", http.HandlerFunc(Heap))
http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol))
}
// command line, with arguments separated by NUL bytes.
// The package initialization registers it as /debug/pprof/cmdline.
func Cmdline(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("content-type", "text/plain; charset=utf-8")
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprintf(w, strings.Join(os.Args, "\x00"))
}
// Heap responds with the pprof-formatted heap profile.
// The package initialization registers it as /debug/pprof/heap.
func Heap(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("content-type", "text/plain; charset=utf-8")
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
pprof.WriteHeapProfile(w)
}
+// Profile responds with the pprof-formatted cpu profile.
+// The package initialization registers it as /debug/pprof/profile.
+func Profile(w http.ResponseWriter, r *http.Request) {
+ sec, _ := strconv.Atoi64(r.FormValue("seconds"))
+ if sec == 0 {
+ sec = 30
+ }
+
+ // Set Content Type assuming StartCPUProfile will work,
+ // because if it does it starts writing.
+ w.Header().Set("Content-Type", "application/octet-stream")
+ if err := pprof.StartCPUProfile(w); err != nil {
+ // StartCPUProfile failed, so no writes yet.
+ // Can change header back to text content
+ // and send error code.
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err)
+ return
+ }
+ time.Sleep(sec * 1e9)
+ pprof.StopCPUProfile()
+}
+
// Symbol looks up the program counters listed in the request,
// responding with a table mapping program counters to function names.
// The package initialization registers it as /debug/pprof/symbol.
func Symbol(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("content-type", "text/plain; charset=utf-8")
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
// We don't know how many symbols we have, but we
// do have symbol information. Pprof only cares whether
ts := httptest.NewServer(nil)
defer ts.Close()
- conn, err := net.Dial("tcp", "", ts.Listener.Addr().String())
+ conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatal(err)
}
// Slow client that should timeout.
t1 := time.Nanoseconds()
- conn, err := net.Dial("tcp", "", fmt.Sprintf("localhost:%d", addr.Port))
+ conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", addr.Port))
if err != nil {
t.Fatalf("Dial: %v", err)
}
}
// Verify that the connection is closed when the declared Content-Length
// is larger than what the handler wrote.
- conn, err := net.Dial("tcp", "", ts.Listener.Addr().String())
+ conn, err := net.Dial("tcp", ts.Listener.Addr().String())
if err != nil {
t.Fatalf("error dialing: %v", err)
}
}))
defer s.Close()
- conn, err := net.Dial("tcp", "", s.Listener.Addr().String())
+ conn, err := net.Dial("tcp", s.Listener.Addr().String())
if err != nil {
t.Fatal("dial error:", err)
}
return pc, nil
}
- conn, err := net.Dial("tcp", "", cm.addr())
+ conn, err := net.Dial("tcp", cm.addr())
if err != nil {
return nil, err
}
"basn6a16",
}
+var filenamesShort = []string{
+ "basn0g01",
+ "basn0g04-31",
+ "basn6a16",
+}
+
func readPng(filename string) (image.Image, os.Error) {
f, err := os.Open(filename, os.O_RDONLY, 0444)
if err != nil {
}
func TestReader(t *testing.T) {
- for _, fn := range filenames {
+ names := filenames
+ if testing.Short() {
+ names = filenamesShort
+ }
+ for _, fn := range names {
// Read the .png file.
img, err := readPng("testdata/pngsuite/" + fn + ".png")
if err != nil {
func TestWriter(t *testing.T) {
// The filenames variable is declared in reader_test.go.
- for _, fn := range filenames {
+ names := filenames
+ if testing.Short() {
+ names = filenamesShort
+ }
+ for _, fn := range names {
qfn := "testdata/pngsuite/" + fn + ".png"
// Read the image.
m0, err := readPng(qfn)
}
func TestUnmarshalMarshal(t *testing.T) {
+ initBig()
var v interface{}
if err := Unmarshal(jsonBig, &v); err != nil {
t.Fatalf("Unmarshal: %v", err)
// Tests of a large random structure.
func TestCompactBig(t *testing.T) {
+ initBig()
var buf bytes.Buffer
if err := Compact(&buf, jsonBig); err != nil {
t.Fatalf("Compact: %v", err)
}
func TestIndentBig(t *testing.T) {
+ initBig()
var buf bytes.Buffer
if err := Indent(&buf, jsonBig, "", "\t"); err != nil {
t.Fatalf("Indent1: %v", err)
}
func TestNextValueBig(t *testing.T) {
+ initBig()
var scan scanner
item, rest, err := nextValue(jsonBig, &scan)
if err != nil {
}
func BenchmarkSkipValue(b *testing.B) {
+ initBig()
var scan scanner
for i := 0; i < b.N; i++ {
nextValue(jsonBig, &scan)
var jsonBig []byte
-func init() {
- b, err := Marshal(genValue(10000))
- if err != nil {
- panic(err)
+const (
+ big = 10000
+ small = 100
+)
+
+func initBig() {
+ n := big
+ if testing.Short() {
+ n = small
+ }
+ if len(jsonBig) != n {
+ b, err := Marshal(genValue(n))
+ if err != nil {
+ panic(err)
+ }
+ jsonBig = b
}
- jsonBig = b
}
func genValue(n int) interface{} {
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Stub cgo routines for systems that do not use cgo to do network lookups.
+
+package net
+
+import "os"
+
+func cgoLookupHost(name string) (addrs []string, err os.Error, completed bool) {
+ return nil, nil, false
+}
+
+func cgoLookupPort(network, service string) (port int, err os.Error, completed bool) {
+ return 0, nil, false
+}
+
+func cgoLookupIP(name string) (addrs []IP, err os.Error, completed bool) {
+ return nil, nil, false
+}
import "os"
-// Dial connects to the remote address raddr on the network net.
-// If the string laddr is not empty, it is used as the local address
-// for the connection.
+// Dial connects to the address addr on the network net.
//
// Known networks are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only),
// "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4"
//
// For IP networks, addresses have the form host:port. If host is
// a literal IPv6 address, it must be enclosed in square brackets.
+// The functions JoinHostPort and SplitHostPort manipulate
+// addresses in this form.
//
// Examples:
-// Dial("tcp", "", "12.34.56.78:80")
-// Dial("tcp", "", "google.com:80")
-// Dial("tcp", "", "[de:ad:be:ef::ca:fe]:80")
-// Dial("tcp", "127.0.0.1:123", "127.0.0.1:88")
+// Dial("tcp", "12.34.56.78:80")
+// Dial("tcp", "google.com:80")
+// Dial("tcp", "[de:ad:be:ef::ca:fe]:80")
//
-func Dial(net, laddr, raddr string) (c Conn, err os.Error) {
+func Dial(net, addr string) (c Conn, err os.Error) {
+ raddr := addr
+ if raddr == "" {
+ return nil, &OpError{"dial", net, nil, errMissingAddress}
+ }
switch net {
case "tcp", "tcp4", "tcp6":
- var la, ra *TCPAddr
- if laddr != "" {
- if la, err = ResolveTCPAddr(laddr); err != nil {
- goto Error
- }
- }
- if raddr != "" {
- if ra, err = ResolveTCPAddr(raddr); err != nil {
- goto Error
- }
+ var ra *TCPAddr
+ if ra, err = ResolveTCPAddr(raddr); err != nil {
+ goto Error
}
- c, err := DialTCP(net, la, ra)
+ c, err := DialTCP(net, nil, ra)
if err != nil {
return nil, err
}
return c, nil
case "udp", "udp4", "udp6":
- var la, ra *UDPAddr
- if laddr != "" {
- if la, err = ResolveUDPAddr(laddr); err != nil {
- goto Error
- }
- }
- if raddr != "" {
- if ra, err = ResolveUDPAddr(raddr); err != nil {
- goto Error
- }
+ var ra *UDPAddr
+ if ra, err = ResolveUDPAddr(raddr); err != nil {
+ goto Error
}
- c, err := DialUDP(net, la, ra)
+ c, err := DialUDP(net, nil, ra)
if err != nil {
return nil, err
}
return c, nil
case "unix", "unixgram", "unixpacket":
- var la, ra *UnixAddr
- if raddr != "" {
- if ra, err = ResolveUnixAddr(net, raddr); err != nil {
- goto Error
- }
- }
- if laddr != "" {
- if la, err = ResolveUnixAddr(net, laddr); err != nil {
- goto Error
- }
+ var ra *UnixAddr
+ if ra, err = ResolveUnixAddr(net, raddr); err != nil {
+ goto Error
}
- c, err = DialUnix(net, la, ra)
+ c, err = DialUnix(net, nil, ra)
if err != nil {
return nil, err
}
return c, nil
case "ip", "ip4", "ip6":
- var la, ra *IPAddr
- if laddr != "" {
- if la, err = ResolveIPAddr(laddr); err != nil {
- goto Error
- }
- }
- if raddr != "" {
- if ra, err = ResolveIPAddr(raddr); err != nil {
- goto Error
- }
+ var ra *IPAddr
+ if ra, err = ResolveIPAddr(raddr); err != nil {
+ goto Error
}
- c, err := DialIP(net, la, ra)
+ c, err := DialIP(net, nil, ra)
if err != nil {
return nil, err
}
}
func doDial(t *testing.T, network, addr string) {
- fd, err := Dial(network, "", addr)
+ fd, err := Dial(network, addr)
if err != nil {
t.Errorf("Dial(%q, %q, %q) = _, %v", network, "", addr, err)
return
"[2001:4860:0:2001::68]:80", // ipv6.google.com; removed if ipv6 flag not set
}
+func TestLookupCNAME(t *testing.T) {
+ cname, err := LookupCNAME("www.google.com")
+ if cname != "www.l.google.com." || err != nil {
+ t.Errorf(`LookupCNAME("www.google.com.") = %q, %v, want "www.l.google.com.", nil`, cname, err)
+ }
+}
+
func TestDialGoogle(t *testing.T) {
// If no ipv6 tunnel, don't try the last address.
if !*ipv6 {
// Insert an actual IP address for google.com
// into the table.
- _, addrs, err := LookupHost("www.google.com")
+ addrs, err := LookupIP("www.google.com")
if err != nil {
t.Fatalf("lookup www.google.com: %v", err)
}
if len(addrs) == 0 {
t.Fatalf("no addresses for www.google.com")
}
- ip := ParseIP(addrs[0]).To4()
+ ip := addrs[0].To4()
for i, s := range googleaddrs {
if strings.Contains(s, "%") {
// all the cfg.servers[i] are IP addresses, which
// Dial will use without a DNS lookup.
server := cfg.servers[i] + ":53"
- c, cerr := Dial("udp", "", server)
+ c, cerr := Dial("udp", server)
if cerr != nil {
err = cerr
continue
return
}
-func convertRR_A(records []dnsRR) []string {
- addrs := make([]string, len(records))
+func convertRR_A(records []dnsRR) []IP {
+ addrs := make([]IP, len(records))
for i := 0; i < len(records); i++ {
rr := records[i]
a := rr.(*dnsRR_A).A
- addrs[i] = IPv4(byte(a>>24), byte(a>>16), byte(a>>8), byte(a)).String()
+ addrs[i] = IPv4(byte(a>>24), byte(a>>16), byte(a>>8), byte(a))
+ }
+ return addrs
+}
+
+func convertRR_AAAA(records []dnsRR) []IP {
+ addrs := make([]IP, len(records))
+ for i := 0; i < len(records); i++ {
+ rr := records[i]
+ a := make(IP, 16)
+ copy(a, rr.(*dnsRR_AAAA).AAAA[:])
+ addrs[i] = a
}
return addrs
}
return
}
-// LookupHost looks for name using the local hosts file and DNS resolver.
-// It returns the canonical name for the host and an array of that
-// host's addresses.
-func LookupHost(name string) (cname string, addrs []string, err os.Error) {
+// goLookupHost is the native Go implementation of LookupHost.
+func goLookupHost(name string) (addrs []string, err os.Error) {
onceLoadConfig.Do(loadConfig)
if dnserr != nil || cfg == nil {
err = dnserr
// Use entries from /etc/hosts if they match.
addrs = lookupStaticHost(name)
if len(addrs) > 0 {
- cname = name
+ return
+ }
+ ips, err := goLookupIP(name)
+ if err != nil {
+ return
+ }
+ addrs = make([]string, 0, len(ips))
+ for _, ip := range ips {
+ addrs = append(addrs, ip.String())
+ }
+ return
+}
+
+// goLookupIP is the native Go implementation of LookupIP.
+func goLookupIP(name string) (addrs []IP, err os.Error) {
+ onceLoadConfig.Do(loadConfig)
+ if dnserr != nil || cfg == nil {
+ err = dnserr
return
}
var records []dnsRR
+ var cname string
cname, records, err = lookup(name, dnsTypeA)
if err != nil {
return
}
addrs = convertRR_A(records)
+ if cname != "" {
+ name = cname
+ }
+ _, records, err = lookup(name, dnsTypeAAAA)
+ if err != nil && len(addrs) > 0 {
+ // Ignore error because A lookup succeeded.
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ addrs = append(addrs, convertRR_AAAA(records)...)
+ return
+}
+
+// LookupCNAME returns the canonical DNS host for the given name.
+// Callers that do not care about the canonical name can call
+// LookupHost or LookupIP directly; both take care of resolving
+// the canonical name as part of the lookup.
+func LookupCNAME(name string) (cname string, err os.Error) {
+ onceLoadConfig.Do(loadConfig)
+ if dnserr != nil || cfg == nil {
+ err = dnserr
+ return
+ }
+ _, rr, err := lookup(name, dnsTypeCNAME)
+ if err != nil {
+ return
+ }
+ if len(rr) >= 0 {
+ cname = rr[0].(*dnsRR_CNAME).Cname
+ }
return
}
+// An SRV represents a single DNS SRV record.
type SRV struct {
Target string
Port uint16
return
}
+// An MX represents a single DNS MX record.
type MX struct {
Host string
Pref uint16
}
+// LookupMX returns the DNS MX records associated with name.
func LookupMX(name string) (entries []*MX, err os.Error) {
var records []dnsRR
_, records, err = lookup(name, dnsTypeMX)
dnsTypeMINFO = 14
dnsTypeMX = 15
dnsTypeTXT = 16
+ dnsTypeAAAA = 28
dnsTypeSRV = 33
// valid dnsQuestion.qtype only
A uint32 "ipv4"
}
-func (rr *dnsRR_A) Header() *dnsRR_Header { return &rr.Hdr }
+func (rr *dnsRR_A) Header() *dnsRR_Header {
+ return &rr.Hdr
+}
+
+type dnsRR_AAAA struct {
+ Hdr dnsRR_Header
+ AAAA [16]byte "ipv6"
+}
+func (rr *dnsRR_AAAA) Header() *dnsRR_Header {
+ return &rr.Hdr
+}
// Packing and unpacking.
//
dnsTypeTXT: func() dnsRR { return new(dnsRR_TXT) },
dnsTypeSRV: func() dnsRR { return new(dnsRR_SRV) },
dnsTypeA: func() dnsRR { return new(dnsRR_A) },
+ dnsTypeAAAA: func() dnsRR { return new(dnsRR_AAAA) },
}
// Pack a domain name s into msg[off:].
// TODO(rsc): Move into generic library?
// Pack a reflect.StructValue into msg. Struct members can only be uint16, uint32, string,
-// and other (often anonymous) structs.
+// [n]byte, and other (often anonymous) structs.
func packStructValue(val *reflect.StructValue, msg []byte, off int) (off1 int, ok bool) {
for i := 0; i < val.NumField(); i++ {
f := val.Type().(*reflect.StructType).Field(i)
msg[off+3] = byte(i)
off += 4
}
+ case *reflect.ArrayValue:
+ if fv.Type().(*reflect.ArrayType).Elem().Kind() != reflect.Uint8 {
+ goto BadType
+ }
+ n := fv.Len()
+ if off+n > len(msg) {
+ return len(msg), false
+ }
+ reflect.Copy(reflect.NewValue(msg[off:off+n]).(*reflect.SliceValue), fv)
+ off += n
case *reflect.StringValue:
// There are multiple string encodings.
// The tag distinguishes ordinary strings from domain names.
fv.Set(uint64(i))
off += 4
}
+ case *reflect.ArrayValue:
+ if fv.Type().(*reflect.ArrayType).Elem().Kind() != reflect.Uint8 {
+ goto BadType
+ }
+ n := fv.Len()
+ if off+n > len(msg) {
+ return len(msg), false
+ }
+ reflect.Copy(fv, reflect.NewValue(msg[off:off+n]).(*reflect.SliceValue))
+ off += n
case *reflect.StringValue:
var s string
switch f.Tag {
// Generic struct printer.
// Doesn't care about the string tag "domain-name",
-// but does look for an "ipv4" tag on uint32 variables,
+// but does look for an "ipv4" tag on uint32 variables
+// and the "ipv6" tag on array variables,
// printing them as IP addresses.
func printStructValue(val *reflect.StructValue) string {
s := "{"
} else if fv, ok := fval.(*reflect.UintValue); ok && f.Tag == "ipv4" {
i := fv.Get()
s += IPv4(byte(i>>24), byte(i>>16), byte(i>>8), byte(i)).String()
+ } else if fv, ok := fval.(*reflect.ArrayValue); ok && f.Tag == "ipv6" {
+ i := fv.Interface().([]byte)
+ s += IP(i).String()
} else {
s += fmt.Sprint(fval.Interface())
}
pollserver = p
}
-func newFD(fd, family, proto int, net string, laddr, raddr Addr) (f *netFD, err os.Error) {
+func newFD(fd, family, proto int, net string) (f *netFD, err os.Error) {
onceStartServer.Do(startServer)
if e := syscall.SetNonblock(fd, true); e != 0 {
- return nil, &OpError{"setnonblock", net, laddr, os.Errno(e)}
+ return nil, os.Errno(e)
}
f = &netFD{
sysfd: fd,
family: family,
proto: proto,
net: net,
- laddr: laddr,
- raddr: raddr,
}
+ f.cr = make(chan bool, 1)
+ f.cw = make(chan bool, 1)
+ return f, nil
+}
+
+func (fd *netFD) setAddr(laddr, raddr Addr) {
+ fd.laddr = laddr
+ fd.raddr = raddr
var ls, rs string
if laddr != nil {
ls = laddr.String()
if raddr != nil {
rs = raddr.String()
}
- f.sysfile = os.NewFile(fd, net+":"+ls+"->"+rs)
- f.cr = make(chan bool, 1)
- f.cw = make(chan bool, 1)
- return f, nil
+ fd.sysfile = os.NewFile(fd.sysfd, fd.net+":"+ls+"->"+rs)
+}
+
+func (fd *netFD) connect(ra syscall.Sockaddr) (err os.Error) {
+ e := syscall.Connect(fd.sysfd, ra)
+ if e == syscall.EINPROGRESS {
+ var errno int
+ pollserver.WaitWrite(fd)
+ e, errno = syscall.GetsockoptInt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_ERROR)
+ if errno != 0 {
+ return os.NewSyscallError("getsockopt", errno)
+ }
+ }
+ if e != 0 {
+ return os.Errno(e)
+ }
+ return nil
}
// Add a reference to this fd.
syscall.CloseOnExec(s)
syscall.ForkLock.RUnlock()
- if nfd, err = newFD(s, fd.family, fd.proto, fd.net, fd.laddr, toAddr(sa)); err != nil {
+ if nfd, err = newFD(s, fd.family, fd.proto, fd.net); err != nil {
syscall.Close(s)
return nil, err
}
+ nfd.setAddr(fd.laddr, toAddr(sa))
return nfd, nil
}
wio sync.Mutex
}
-func allocFD(fd, family, proto int, net string, laddr, raddr Addr) (f *netFD) {
+func allocFD(fd, family, proto int, net string) (f *netFD) {
f = &netFD{
sysfd: fd,
family: family,
proto: proto,
net: net,
- laddr: laddr,
- raddr: raddr,
}
runtime.SetFinalizer(f, (*netFD).Close)
return f
}
-func newFD(fd, family, proto int, net string, laddr, raddr Addr) (f *netFD, err os.Error) {
+func newFD(fd, family, proto int, net string) (f *netFD, err os.Error) {
if initErr != nil {
return nil, initErr
}
onceStartServer.Do(startServer)
// Associate our socket with resultsrv.iocp.
if _, e := syscall.CreateIoCompletionPort(int32(fd), resultsrv.iocp, 0, 0); e != 0 {
- return nil, &OpError{"CreateIoCompletionPort", net, laddr, os.Errno(e)}
+ return nil, os.Errno(e)
+ }
+ return allocFD(fd, family, proto, net), nil
+}
+
+func (fd *netFD) setAddr(laddr, raddr Addr) {
+ fd.laddr = laddr
+ fd.raddr = raddr
+}
+
+func (fd *netFD) connect(ra syscall.Sockaddr) (err os.Error) {
+ e := syscall.Connect(fd.sysfd, ra)
+ if e != 0 {
+ return os.Errno(e)
}
- return allocFD(fd, family, proto, net, laddr, raddr), nil
+ return nil
}
// Add a reference to this fd.
lsa, _ := lrsa.Sockaddr()
rsa, _ := rrsa.Sockaddr()
- return allocFD(s, fd.family, fd.proto, fd.net, toAddr(lsa), toAddr(rsa)), nil
+ nfd = allocFD(s, fd.family, fd.proto, fd.net)
+ nfd.setAddr(toAddr(lsa), toAddr(rsa))
+ return nfd, nil
}
// Not implemeted functions.
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "os"
+ "syscall"
+)
+
+func newFileFD(f *os.File) (nfd *netFD, err os.Error) {
+ fd, errno := syscall.Dup(f.Fd())
+ if errno != 0 {
+ return nil, os.NewSyscallError("dup", errno)
+ }
+
+ proto, errno := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_TYPE)
+ if errno != 0 {
+ return nil, os.NewSyscallError("getsockopt", errno)
+ }
+
+ toAddr := sockaddrToTCP
+ sa, _ := syscall.Getsockname(fd)
+ switch sa.(type) {
+ default:
+ closesocket(fd)
+ return nil, os.EINVAL
+ case *syscall.SockaddrInet4:
+ if proto == syscall.SOCK_DGRAM {
+ toAddr = sockaddrToUDP
+ } else if proto == syscall.SOCK_RAW {
+ toAddr = sockaddrToIP
+ }
+ case *syscall.SockaddrInet6:
+ if proto == syscall.SOCK_DGRAM {
+ toAddr = sockaddrToUDP
+ } else if proto == syscall.SOCK_RAW {
+ toAddr = sockaddrToIP
+ }
+ case *syscall.SockaddrUnix:
+ toAddr = sockaddrToUnix
+ if proto == syscall.SOCK_DGRAM {
+ toAddr = sockaddrToUnixgram
+ } else if proto == syscall.SOCK_SEQPACKET {
+ toAddr = sockaddrToUnixpacket
+ }
+ }
+ laddr := toAddr(sa)
+ sa, _ = syscall.Getpeername(fd)
+ raddr := toAddr(sa)
+
+ if nfd, err = newFD(fd, 0, proto, laddr.Network()); err != nil {
+ return nil, err
+ }
+ nfd.setAddr(laddr, raddr)
+ return nfd, nil
+}
+
+// FileConn returns a copy of the network connection corresponding to
+// the open file f. It is the caller's responsibility to close f when
+// finished. Closing c does not affect f, and closing f does not
+// affect c.
+func FileConn(f *os.File) (c Conn, err os.Error) {
+ fd, err := newFileFD(f)
+ if err != nil {
+ return nil, err
+ }
+ switch fd.laddr.(type) {
+ case *TCPAddr:
+ return newTCPConn(fd), nil
+ case *UDPAddr:
+ return newUDPConn(fd), nil
+ case *UnixAddr:
+ return newUnixConn(fd), nil
+ case *IPAddr:
+ return newIPConn(fd), nil
+ }
+ fd.Close()
+ return nil, os.EINVAL
+}
+
+// FileListener returns a copy of the network listener corresponding
+// to the open file f. It is the caller's responsibility to close l
+// when finished. Closing c does not affect l, and closing l does not
+// affect c.
+func FileListener(f *os.File) (l Listener, err os.Error) {
+ fd, err := newFileFD(f)
+ if err != nil {
+ return nil, err
+ }
+ switch laddr := fd.laddr.(type) {
+ case *TCPAddr:
+ return &TCPListener{fd}, nil
+ case *UnixAddr:
+ return &UnixListener{fd, laddr.Name}, nil
+ }
+ fd.Close()
+ return nil, os.EINVAL
+}
+
+// FilePacketConn returns a copy of the packet network connection
+// corresponding to the open file f. It is the caller's
+// responsibility to close f when finished. Closing c does not affect
+// f, and closing f does not affect c.
+func FilePacketConn(f *os.File) (c PacketConn, err os.Error) {
+ fd, err := newFileFD(f)
+ if err != nil {
+ return nil, err
+ }
+ switch fd.laddr.(type) {
+ case *UDPAddr:
+ return newUDPConn(fd), nil
+ case *UnixAddr:
+ return newUnixConn(fd), nil
+ }
+ fd.Close()
+ return nil, os.EINVAL
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "os"
+ "reflect"
+ "runtime"
+ "syscall"
+ "testing"
+)
+
+type listenerFile interface {
+ Listener
+ File() (f *os.File, err os.Error)
+}
+
+type packetConnFile interface {
+ PacketConn
+ File() (f *os.File, err os.Error)
+}
+
+type connFile interface {
+ Conn
+ File() (f *os.File, err os.Error)
+}
+
+func testFileListener(t *testing.T, net, laddr string) {
+ if net == "tcp" {
+ laddr += ":0" // any available port
+ }
+ l, err := Listen(net, laddr)
+ if err != nil {
+ t.Fatalf("Listen failed: %v", err)
+ }
+ defer l.Close()
+ lf := l.(listenerFile)
+ f, err := lf.File()
+ if err != nil {
+ t.Fatalf("File failed: %v", err)
+ }
+ c, err := FileListener(f)
+ if err != nil {
+ t.Fatalf("FileListener failed: %v", err)
+ }
+ if !reflect.DeepEqual(l.Addr(), c.Addr()) {
+ t.Fatalf("Addrs not equal: %#v != %#v", l.Addr(), c.Addr())
+ }
+ if err := c.Close(); err != nil {
+ t.Fatalf("Close failed: %v", err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatalf("Close failed: %v", err)
+ }
+}
+
+func TestFileListener(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ return
+ }
+ testFileListener(t, "tcp", "127.0.0.1")
+ testFileListener(t, "tcp", "127.0.0.1")
+ if kernelSupportsIPv6() {
+ testFileListener(t, "tcp", "[::ffff:127.0.0.1]")
+ testFileListener(t, "tcp", "127.0.0.1")
+ testFileListener(t, "tcp", "[::ffff:127.0.0.1]")
+ }
+ if syscall.OS == "linux" {
+ testFileListener(t, "unix", "@gotest/net")
+ testFileListener(t, "unixpacket", "@gotest/net")
+ }
+}
+
+func testFilePacketConn(t *testing.T, pcf packetConnFile) {
+ f, err := pcf.File()
+ if err != nil {
+ t.Fatalf("File failed: %v", err)
+ }
+ c, err := FilePacketConn(f)
+ if err != nil {
+ t.Fatalf("FilePacketConn failed: %v", err)
+ }
+ if !reflect.DeepEqual(pcf.LocalAddr(), c.LocalAddr()) {
+ t.Fatalf("LocalAddrs not equal: %#v != %#v", pcf.LocalAddr(), c.LocalAddr())
+ }
+ if err := c.Close(); err != nil {
+ t.Fatalf("Close failed: %v", err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatalf("Close failed: %v", err)
+ }
+}
+
+func testFilePacketConnListen(t *testing.T, net, laddr string) {
+ l, err := ListenPacket(net, laddr)
+ if err != nil {
+ t.Fatalf("Listen failed: %v", err)
+ }
+ testFilePacketConn(t, l.(packetConnFile))
+ if err := l.Close(); err != nil {
+ t.Fatalf("Close failed: %v", err)
+ }
+}
+
+func testFilePacketConnDial(t *testing.T, net, raddr string) {
+ c, err := Dial(net, raddr)
+ if err != nil {
+ t.Fatalf("Dial failed: %v", err)
+ }
+ testFilePacketConn(t, c.(packetConnFile))
+ if err := c.Close(); err != nil {
+ t.Fatalf("Close failed: %v", err)
+ }
+}
+
+func TestFilePacketConn(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ return
+ }
+ testFilePacketConnListen(t, "udp", "127.0.0.1:0")
+ testFilePacketConnDial(t, "udp", "127.0.0.1:12345")
+ if kernelSupportsIPv6() {
+ testFilePacketConnListen(t, "udp", "[::1]:0")
+ testFilePacketConnDial(t, "udp", "[::ffff:127.0.0.1]:12345")
+ }
+ if syscall.OS == "linux" {
+ testFilePacketConnListen(t, "unixgram", "@gotest1/net")
+ }
+}
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "os"
+ "syscall"
+)
+
+func FileConn(f *os.File) (c Conn, err os.Error) {
+ // TODO: Implement this
+ return nil, os.NewSyscallError("FileConn", syscall.EWINDOWS)
+}
+
+func FileListener(f *os.File) (l Listener, err os.Error) {
+ // TODO: Implement this
+ return nil, os.NewSyscallError("FileListener", syscall.EWINDOWS)
+}
+
+func FilePacketConn(f *os.File) (c PacketConn, err os.Error) {
+ // TODO: Implement this
+ return nil, os.NewSyscallError("FilePacketConn", syscall.EWINDOWS)
+}
ips []IP
}
-
var hosttests = []hostTest{
{"odin", []IP{
IPv4(127, 0, 0, 2),
return p
}
-// A SyntaxError represents a malformed text string and the type of string that was expected.
-type SyntaxError struct {
+// A ParseError represents a malformed text string and the type of string that was expected.
+type ParseError struct {
Type string
Text string
}
-func (e *SyntaxError) String() string {
+func (e *ParseError) String() string {
return "invalid " + e.Type + ": " + e.Text
}
}
// ParseCIDR parses s as a CIDR notation IP address and mask,
-// like "192.168.100.1/24" or "2001:DB8::/48".
+// like "192.168.100.1/24", "2001:DB8::/48", as defined in
+// RFC 4632 and RFC 4291.
func ParseCIDR(s string) (ip IP, mask IPMask, err os.Error) {
i := byteIndex(s, '/')
if i < 0 {
- return nil, nil, &SyntaxError{"CIDR address", s}
+ return nil, nil, &ParseError{"CIDR address", s}
}
ipstr, maskstr := s[:i], s[i+1:]
- ip = ParseIP(ipstr)
+ iplen := 4
+ ip = parseIPv4(ipstr)
+ if ip == nil {
+ iplen = 16
+ ip = parseIPv6(ipstr)
+ }
nn, i, ok := dtoi(maskstr, 0)
- if ip == nil || !ok || i != len(maskstr) || nn < 0 || nn > 8*len(ip) {
- return nil, nil, &SyntaxError{"CIDR address", s}
+ if ip == nil || !ok || i != len(maskstr) || nn < 0 || nn > 8*iplen {
+ return nil, nil, &ParseError{"CIDR address", s}
}
n := uint(nn)
- if len(ip) == 4 {
+ if iplen == 4 {
v4mask := ^uint32(0xffffffff >> n)
- mask = IPMask(IPv4(byte(v4mask>>24), byte(v4mask>>16), byte(v4mask>>8), byte(v4mask)))
- return ip, mask, nil
- }
- mask = make(IPMask, 16)
- for i := 0; i < 16; i++ {
- if n >= 8 {
- mask[i] = 0xff
- n -= 8
- continue
+ mask = IPv4Mask(byte(v4mask>>24), byte(v4mask>>16), byte(v4mask>>8), byte(v4mask))
+ } else {
+ mask = make(IPMask, 16)
+ for i := 0; i < 16; i++ {
+ if n >= 8 {
+ mask[i] = 0xff
+ n -= 8
+ continue
+ }
+ mask[i] = ^byte(0xff >> n)
+ n = 0
+
+ }
+ }
+ // address must not have any bits not in mask
+ for i := range ip {
+ if ip[i]&^mask[i] != 0 {
+ return nil, nil, &ParseError{"CIDR address", s}
}
- mask[i] = ^byte(0xff >> n)
- n = 0
}
return ip, mask, nil
}
package net
import (
+ "bytes"
+ "reflect"
"testing"
+ "os"
)
-func isEqual(a, b IP) bool {
+func isEqual(a, b []byte) bool {
if a == nil && b == nil {
return true
}
- if a == nil || b == nil || len(a) != len(b) {
+ if a == nil || b == nil {
return false
}
- for i := 0; i < len(a); i++ {
- if a[i] != b[i] {
- return false
- }
- }
- return true
+ return bytes.Equal(a, b)
}
-type parseIPTest struct {
+var parseiptests = []struct {
in string
out IP
-}
-
-var parseiptests = []parseIPTest{
+}{
{"127.0.1.2", IPv4(127, 0, 1, 2)},
{"127.0.0.1", IPv4(127, 0, 0, 1)},
{"127.0.0.256", nil},
}
func TestParseIP(t *testing.T) {
- for i := 0; i < len(parseiptests); i++ {
- tt := parseiptests[i]
+ for _, tt := range parseiptests {
if out := ParseIP(tt.in); !isEqual(out, tt.out) {
t.Errorf("ParseIP(%#q) = %v, want %v", tt.in, out, tt.out)
}
}
}
-type ipStringTest struct {
+var ipstringtests = []struct {
in IP
out string
-}
-
-var ipstringtests = []ipStringTest{
+}{
// cf. RFC 5952 (A Recommendation for IPv6 Address Text Representation)
{IP{0x20, 0x1, 0xd, 0xb8, 0, 0, 0, 0,
0, 0, 0x1, 0x23, 0, 0x12, 0, 0x1},
}
func TestIPString(t *testing.T) {
- for i := 0; i < len(ipstringtests); i++ {
- tt := ipstringtests[i]
+ for _, tt := range ipstringtests {
if out := tt.in.String(); out != tt.out {
t.Errorf("IP.String(%v) = %#q, want %#q", tt.in, out, tt.out)
}
}
}
+
+var parsecidrtests = []struct {
+ in string
+ ip IP
+ mask IPMask
+ err os.Error
+}{
+ {"135.104.0.0/32", IPv4(135, 104, 0, 0), IPv4Mask(255, 255, 255, 255), nil},
+ {"0.0.0.0/24", IPv4(0, 0, 0, 0), IPv4Mask(255, 255, 255, 0), nil},
+ {"135.104.0.0/24", IPv4(135, 104, 0, 0), IPv4Mask(255, 255, 255, 0), nil},
+ {"135.104.0.1/32", IPv4(135, 104, 0, 1), IPv4Mask(255, 255, 255, 255), nil},
+ {"135.104.0.1/24", nil, nil, &ParseError{"CIDR address", "135.104.0.1/24"}},
+ {"::1/128", ParseIP("::1"), IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")), nil},
+ {"abcd:2345::/127", ParseIP("abcd:2345::"), IPMask(ParseIP("ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe")), nil},
+ {"abcd:2345::/65", ParseIP("abcd:2345::"), IPMask(ParseIP("ffff:ffff:ffff:ffff:8000::")), nil},
+ {"abcd:2345::/64", ParseIP("abcd:2345::"), IPMask(ParseIP("ffff:ffff:ffff:ffff::")), nil},
+ {"abcd:2345::/63", ParseIP("abcd:2345::"), IPMask(ParseIP("ffff:ffff:ffff:fffe::")), nil},
+ {"abcd:2345::/33", ParseIP("abcd:2345::"), IPMask(ParseIP("ffff:ffff:8000::")), nil},
+ {"abcd:2345::/32", ParseIP("abcd:2345::"), IPMask(ParseIP("ffff:ffff::")), nil},
+ {"abcd:2344::/31", ParseIP("abcd:2344::"), IPMask(ParseIP("ffff:fffe::")), nil},
+ {"abcd:2300::/24", ParseIP("abcd:2300::"), IPMask(ParseIP("ffff:ff00::")), nil},
+ {"abcd:2345::/24", nil, nil, &ParseError{"CIDR address", "abcd:2345::/24"}},
+ {"2001:DB8::/48", ParseIP("2001:DB8::"), IPMask(ParseIP("ffff:ffff:ffff::")), nil},
+}
+
+func TestParseCIDR(t *testing.T) {
+ for _, tt := range parsecidrtests {
+ if ip, mask, err := ParseCIDR(tt.in); !isEqual(ip, tt.ip) || !isEqual(mask, tt.mask) || !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("ParseCIDR(%q) = %v, %v, %v; want %v, %v, %v", tt.in, ip, mask, err, tt.ip, tt.mask, tt.err)
+ }
+ }
+}
+
+var splitjointests = []struct {
+ Host string
+ Port string
+ Join string
+}{
+ {"www.google.com", "80", "www.google.com:80"},
+ {"127.0.0.1", "1234", "127.0.0.1:1234"},
+ {"::1", "80", "[::1]:80"},
+}
+
+func TestSplitHostPort(t *testing.T) {
+ for _, tt := range splitjointests {
+ if host, port, err := SplitHostPort(tt.Join); host != tt.Host || port != tt.Port || err != nil {
+ t.Errorf("SplitHostPort(%q) = %q, %q, %v; want %q, %q, nil", tt.Join, host, port, err, tt.Host, tt.Port)
+ }
+ }
+}
+
+func TestJoinHostPort(t *testing.T) {
+ for _, tt := range splitjointests {
+ if join := JoinHostPort(tt.Host, tt.Port); join != tt.Join {
+ t.Errorf("JoinHostPort(%q, %q) = %q; want %q", tt.Host, tt.Port, join, tt.Join)
+ }
+ }
+}
addr = ParseIP(host)
if addr == nil {
// Not an IP address. Try as a DNS name.
- _, addrs, err1 := LookupHost(host)
+ addrs, err1 := LookupHost(host)
if err1 != nil {
err = err1
goto Error
return nil, InvalidAddrError("unexpected socket family")
}
-// Split "host:port" into "host" and "port".
-// Host cannot contain colons unless it is bracketed.
-func splitHostPort(hostport string) (host, port string, err os.Error) {
+// SplitHostPort splits a network address of the form
+// "host:port" or "[host]:port" into host and port.
+// The latter form must be used when host contains a colon.
+func SplitHostPort(hostport string) (host, port string, err os.Error) {
// The port starts after the last colon.
i := last(hostport, ':')
if i < 0 {
return
}
-// Join "host" and "port" into "host:port".
-// If host contains colons, will join into "[host]:port".
-func joinHostPort(host, port string) string {
+// JoinHostPort combines host and port into a network address
+// of the form "host:port" or, if host contains a colon, "[host]:port".
+func JoinHostPort(host, port string) string {
// If host has colons, have to bracket it.
if byteIndex(host, ':') >= 0 {
return "[" + host + "]:" + port
// Convert "host:port" into IP address and port.
func hostPortToIP(net, hostport string) (ip IP, iport int, err os.Error) {
- host, port, err := splitHostPort(hostport)
+ host, port, err := SplitHostPort(hostport)
if err != nil {
goto Error
}
addr = ParseIP(host)
if addr == nil {
// Not an IP address. Try as a DNS name.
- _, addrs, err1 := LookupHost(host)
+ addrs, err1 := LookupHost(host)
if err1 != nil {
err = err1
goto Error
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "os"
+)
+
+// LookupHost looks up the given host using the local resolver.
+// It returns an array of that host's addresses.
+func LookupHost(host string) (addrs []string, err os.Error) {
+ addrs, err, ok := cgoLookupHost(host)
+ if !ok {
+ addrs, err = goLookupHost(host)
+ }
+ return
+}
+
+// LookupIP looks up host using the local resolver.
+// It returns an array of that host's IPv4 and IPv6 addresses.
+func LookupIP(host string) (addrs []IP, err os.Error) {
+ addrs, err, ok := cgoLookupIP(host)
+ if !ok {
+ addrs, err = goLookupIP(host)
+ }
+ return
+}
+
+// LookupPort looks up the port for the given network and service.
+func LookupPort(network, service string) (port int, err os.Error) {
+ port, err, ok := cgoLookupPort(network, service)
+ if !ok {
+ port, err = goLookupPort(network, service)
+ }
+ return
+}
type DialErrorTest struct {
Net string
- Laddr string
Raddr string
Pattern string
}
var dialErrorTests = []DialErrorTest{
{
- "datakit", "", "mh/astro/r70",
+ "datakit", "mh/astro/r70",
"dial datakit mh/astro/r70: unknown network datakit",
},
{
- "tcp", "", "127.0.0.1:☺",
+ "tcp", "127.0.0.1:☺",
"dial tcp 127.0.0.1:☺: unknown port tcp/☺",
},
{
- "tcp", "", "no-such-name.google.com.:80",
+ "tcp", "no-such-name.google.com.:80",
"dial tcp no-such-name.google.com.:80: lookup no-such-name.google.com.( on .*)?: no (.*)",
},
{
- "tcp", "", "no-such-name.no-such-top-level-domain.:80",
+ "tcp", "no-such-name.no-such-top-level-domain.:80",
"dial tcp no-such-name.no-such-top-level-domain.:80: lookup no-such-name.no-such-top-level-domain.( on .*)?: no (.*)",
},
{
- "tcp", "", "no-such-name:80",
+ "tcp", "no-such-name:80",
`dial tcp no-such-name:80: lookup no-such-name\.(.*\.)?( on .*)?: no (.*)`,
},
{
- "tcp", "", "mh/astro/r70:http",
+ "tcp", "mh/astro/r70:http",
"dial tcp mh/astro/r70:http: lookup mh/astro/r70: invalid domain name",
},
{
- "unix", "", "/etc/file-not-found",
+ "unix", "/etc/file-not-found",
"dial unix /etc/file-not-found: [nN]o such file or directory",
},
{
- "unix", "", "/etc/",
- "dial unix /etc/: ([pP]ermission denied|[sS]ocket operation on non-socket|[cC]onnection refused)",
+ "unix", "/etc/",
+ "dial unix /etc/: ([pP]ermission denied|socket operation on non-socket|connection refused)",
},
{
- "unixpacket", "", "/etc/file-not-found",
+ "unixpacket", "/etc/file-not-found",
"dial unixpacket /etc/file-not-found: no such file or directory",
},
{
- "unixpacket", "", "/etc/",
+ "unixpacket", "/etc/",
"dial unixpacket /etc/: (permission denied|socket operation on non-socket|connection refused)",
},
}
return
}
for i, tt := range dialErrorTests {
- c, e := Dial(tt.Net, tt.Laddr, tt.Raddr)
+ c, e := Dial(tt.Net, tt.Raddr)
if c != nil {
c.Close()
}
file.close()
}
-// LookupPort looks up the port for the given network and service.
-func LookupPort(network, service string) (port int, err os.Error) {
+// goLookupPort is the native Go implementation of LookupPort.
+func goLookupPort(network, service string) (port int, err os.Error) {
onceReadServices.Do(readServices)
switch network {
}
func connect(t *testing.T, network, addr string, isEmpty bool) {
- var laddr string
+ var fd Conn
+ var err os.Error
if network == "unixgram" {
- laddr = addr + ".local"
+ fd, err = DialUnix(network, &UnixAddr{addr + ".local", network}, &UnixAddr{addr, network})
+ } else {
+ fd, err = Dial(network, addr)
}
- fd, err := Dial(network, laddr, addr)
if err != nil {
- t.Fatalf("net.Dial(%q, %q, %q) = _, %v", network, laddr, addr, err)
+ t.Fatalf("net.Dial(%q, %q) = _, %v", network, addr, err)
}
fd.SetReadTimeout(1e9) // 1s
}
}
+ if fd, err = newFD(s, f, p, net); err != nil {
+ closesocket(s)
+ return nil, err
+ }
+
if ra != nil {
- e = syscall.Connect(s, ra)
- for e == syscall.EINTR {
- e = syscall.Connect(s, ra)
- }
- if e != 0 {
+ if err = fd.connect(ra); err != nil {
+ fd.sysfd = -1
closesocket(s)
- return nil, os.Errno(e)
+ return nil, err
}
}
sa, _ = syscall.Getpeername(s)
raddr := toAddr(sa)
- fd, err = newFD(s, f, p, net, laddr, raddr)
- if err != nil {
- closesocket(s)
- return nil, err
- }
-
+ fd.setAddr(laddr, raddr)
return fd, nil
}
func sockaddrToString(sa syscall.Sockaddr) (name string, err os.Error) {
switch a := sa.(type) {
case *syscall.SockaddrInet4:
- return joinHostPort(IP(a.Addr[0:]).String(), itoa(a.Port)), nil
+ return JoinHostPort(IP(a.Addr[0:]).String(), itoa(a.Port)), nil
case *syscall.SockaddrInet6:
- return joinHostPort(IP(a.Addr[0:]).String(), itoa(a.Port)), nil
+ return JoinHostPort(IP(a.Addr[0:]).String(), itoa(a.Port)), nil
case *syscall.SockaddrUnix:
return a.Name, nil
}
if a == nil {
return "<nil>"
}
- return joinHostPort(a.IP.String(), itoa(a.Port))
+ return JoinHostPort(a.IP.String(), itoa(a.Port))
}
func (a *TCPAddr) family() int {
// Closing c does not affect f, and closing f does not affect c.
func (c *TCPConn) File() (f *os.File, err os.Error) { return c.fd.dup() }
-// DialTCP is like Dial but can only connect to TCP networks
-// and returns a TCPConn structure.
+// DialTCP connects to the remote address raddr on the network net,
+// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used
+// as the local address for the connection.
func DialTCP(net string, laddr, raddr *TCPAddr) (c *TCPConn, err os.Error) {
if raddr == nil {
return nil, &OpError{"dial", "tcp", nil, errMissingAddress}
// Dial connects to the given address on the given network using net.Dial
// and then returns a new Conn for the connection.
func Dial(network, addr string) (*Conn, os.Error) {
- c, err := net.Dial(network, "", addr)
+ c, err := net.Dial(network, addr)
if err != nil {
return nil, err
}
)
func testTimeout(t *testing.T, network, addr string, readFrom bool) {
- fd, err := Dial(network, "", addr)
+ fd, err := Dial(network, addr)
if err != nil {
t.Errorf("dial %s %s failed: %v", network, addr, err)
return
if a == nil {
return "<nil>"
}
- return joinHostPort(a.IP.String(), itoa(a.Port))
+ return JoinHostPort(a.IP.String(), itoa(a.Port))
}
func (a *UDPAddr) family() int {
// Import imports a set of channels from the given network and address.
func Import(network, remoteaddr string) (*Importer, os.Error) {
- conn, err := net.Dial(network, "", remoteaddr)
+ conn, err := net.Dial(network, remoteaddr)
if err != nil {
return nil, err
}
func testFlow(sendDone chan bool, ch <-chan int, N int, t *testing.T) {
go func() {
- time.Sleep(1e9)
+ time.Sleep(0.5e9)
sendDone <- false
}()
}
return nil
}
+
+// basename removes trailing slashes and the leading directory name from path name
+func basename(name string) string {
+ i := len(name) - 1
+ // Remove trailing slashes
+ for ; i > 0 && name[i] == '/'; i-- {
+ name = name[:i]
+ }
+ // Remove leading directory name
+ for i--; i >= 0; i-- {
+ if name[i] == '/' {
+ name = name[i+1:]
+ break
+ }
+ }
+
+ return name
+}
// at the specified network address and path.
func DialHTTPPath(network, address, path string) (*Client, os.Error) {
var err os.Error
- conn, err := net.Dial(network, "", address)
+ conn, err := net.Dial(network, address)
if err != nil {
return nil, err
}
// Dial connects to an RPC server at the specified network address.
func Dial(network, address string) (*Client, os.Error) {
- conn, err := net.Dial(network, "", address)
+ conn, err := net.Dial(network, address)
if err != nil {
return nil, err
}
// Dial connects to a JSON-RPC server at the specified network address.
func Dial(network, address string) (*rpc.Client, os.Error) {
- conn, err := net.Dial(network, "", address)
+ conn, err := net.Dial(network, address)
if err != nil {
return nil, err
}
)
func TestCPUProfile(t *testing.T) {
- if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
+ switch runtime.GOOS {
+ case "darwin":
+ // see Apple Bug Report #9177434 (copied into change description)
+ return
+ case "plan9":
+ // unimplemented
+ return
+ case "windows":
+ // unimplemented
return
}
// Dial returns a new Client connected to an SMTP server at addr.
func Dial(addr string) (*Client, os.Error) {
- conn, err := net.Dial("tcp", "", addr)
+ conn, err := net.Dial("tcp", addr)
if err != nil {
return nil, err
}
}
func TestSortLarge_Random(t *testing.T) {
- data := make([]int, 1000000)
+ n := 1000000
+ if testing.Short() {
+ n /= 100
+ }
+ data := make([]int, n)
for i := 0; i < len(data); i++ {
data[i] = rand.Intn(100)
}
func TestBentleyMcIlroy(t *testing.T) {
sizes := []int{100, 1023, 1024, 1025}
+ if testing.Short() {
+ sizes = []int{100, 127, 128, 129}
+ }
dists := []string{"sawtooth", "rand", "stagger", "plateau", "shuffle"}
modes := []string{"copy", "reverse", "reverse1", "reverse2", "sort", "dither"}
var tmp1, tmp2 [1025]int
}
b := make([]byte, n)
- bp := 0
- for i := 0; i < len(a); i++ {
- s := a[i]
- for j := 0; j < len(s); j++ {
- b[bp] = s[j]
- bp++
- }
- if i+1 < len(a) {
- s = sep
- for j := 0; j < len(s); j++ {
- b[bp] = s[j]
- bp++
- }
- }
+ bp := copy(b, a[0])
+ for _, s := range a[1:] {
+ bp += copy(b[bp:], sep)
+ bp += copy(b[bp:], s)
}
return string(b)
}
// fine. It could also shrink but that falls out naturally.
maxbytes := len(s) // length of b
nbytes := 0 // number of bytes encoded in b
- b := make([]byte, maxbytes)
- for _, c := range s {
+ // The output buffer b is initialized on demand, the first
+ // time a character differs.
+ var b []byte
+
+ for i, c := range s {
rune := mapping(c)
+ if b == nil {
+ if rune == c {
+ continue
+ }
+ b = make([]byte, maxbytes)
+ nbytes = copy(b, s[:i])
+ }
if rune >= 0 {
wid := 1
if rune >= utf8.RuneSelf {
nbytes += utf8.EncodeRune(b[nbytes:maxbytes], rune)
}
}
+ if b == nil {
+ return s
+ }
return string(b[0:nbytes])
}
import (
"os"
+ "reflect"
"strconv"
. "strings"
"testing"
"unicode"
+ "unsafe"
"utf8"
)
if m != expect {
t.Errorf("drop: expected %q got %q", expect, m)
}
+
+ // 6. Identity
+ identity := func(rune int) int {
+ return rune
+ }
+ orig := "Input string that we expect not to be copied."
+ m = Map(identity, orig)
+ if (*reflect.StringHeader)(unsafe.Pointer(&orig)).Data !=
+ (*reflect.StringHeader)(unsafe.Pointer(&m)).Data {
+ t.Error("unexpected copy during identity map")
+ }
}
func TestToUpper(t *testing.T) { runStringTests(t, ToUpper, "ToUpper", upperTests) }
func TestToLower(t *testing.T) { runStringTests(t, ToLower, "ToLower", lowerTests) }
+func BenchmarkMapNoChanges(b *testing.B) {
+ identity := func(rune int) int {
+ return rune
+ }
+ for i := 0; i < b.N; i++ {
+ Map(identity, "Some string that won't be modified.")
+ }
+}
+
func TestSpecialCase(t *testing.T) {
lower := "abcçdefgğhıijklmnoöprsştuüvyz"
upper := "ABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZ"
func TestCaseConsistency(t *testing.T) {
// Make a string of all the runes.
- a := make([]int, unicode.MaxRune+1)
+ numRunes := unicode.MaxRune + 1
+ if testing.Short() {
+ numRunes = 1000
+ }
+ a := make([]int, numRunes)
for i := range a {
a[i] = i
}
lower := ToLower(s)
// Consistency checks
- if n := utf8.RuneCountInString(upper); n != unicode.MaxRune+1 {
+ if n := utf8.RuneCountInString(upper); n != numRunes {
t.Error("rune count wrong in upper:", n)
}
- if n := utf8.RuneCountInString(lower); n != unicode.MaxRune+1 {
+ if n := utf8.RuneCountInString(lower); n != numRunes {
t.Error("rune count wrong in lower:", n)
}
if !equal("ToUpper(upper)", ToUpper(upper), upper, t) {
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package atomic
+package atomic_test
import (
"runtime"
+ . "sync/atomic"
"testing"
"unsafe"
)
magic64 = 0xdeddeadbeefbeef
)
+// Do the 64-bit functions panic? If so, don't bother testing.
+var test64err = func() (err interface{}) {
+ defer func() {
+ err = recover()
+ }()
+ var x int64
+ AddInt64(&x, 1)
+ return nil
+}()
+
func TestAddInt32(t *testing.T) {
var x struct {
before int32
}
func TestAddInt64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
var x struct {
before int64
i int64
}
func TestAddUint64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
var x struct {
before uint64
i uint64
}
func TestCompareAndSwapInt64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
var x struct {
before int64
i int64
}
func TestCompareAndSwapUint64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
var x struct {
before uint64
i uint64
}
func TestHammer32(t *testing.T) {
- const (
- n = 100000
- p = 4
- )
+ const p = 4
+ n := 100000
+ if testing.Short() {
+ n = 1000
+ }
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
for _, tt := range hammer32 {
for i := 0; i < p; i++ {
<-c
}
- if val != n*p {
+ if val != uint32(n)*p {
t.Errorf("%s: val=%d want %d", tt.name, val, n*p)
}
}
}
func TestHammer64(t *testing.T) {
- const (
- n = 100000
- p = 4
- )
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ const p = 4
+ n := 100000
+ if testing.Short() {
+ n = 1000
+ }
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
for _, tt := range hammer64 {
for i := 0; i < p; i++ {
<-c
}
- if val != n*p {
+ if val != uint64(n)*p {
t.Errorf("%s: val=%d want %d", tt.name, val, n*p)
}
}
// AddUintptr atomically adds delta to *val and returns the new value.
func AddUintptr(val *uintptr, delta uintptr) (new uintptr)
+
+// Helper for ARM. Linker will discard on other systems
+func panic64() {
+ panic("sync/atomic: broken 64-bit atomic operations (buggy QEMU)")
+}
}
func TestRWMutex(t *testing.T) {
- HammerRWMutex(1, 1, 1000)
- HammerRWMutex(1, 3, 1000)
- HammerRWMutex(1, 10, 1000)
- HammerRWMutex(4, 1, 1000)
- HammerRWMutex(4, 3, 1000)
- HammerRWMutex(4, 10, 1000)
- HammerRWMutex(10, 1, 1000)
- HammerRWMutex(10, 3, 1000)
- HammerRWMutex(10, 10, 1000)
- HammerRWMutex(10, 5, 10000)
+ n := 1000
+ if testing.Short() {
+ n = 5
+ }
+ HammerRWMutex(1, 1, n)
+ HammerRWMutex(1, 3, n)
+ HammerRWMutex(1, 10, n)
+ HammerRWMutex(4, 1, n)
+ HammerRWMutex(4, 3, n)
+ HammerRWMutex(4, 10, n)
+ HammerRWMutex(10, 1, n)
+ HammerRWMutex(10, 3, n)
+ HammerRWMutex(10, 10, n)
+ HammerRWMutex(10, 5, n)
}
func TestRLocker(t *testing.T) {
conn, err = unixSyslog()
} else {
var c net.Conn
- c, err = net.Dial(network, "", raddr)
+ c, err = net.Dial(network, raddr)
conn = netConn{c}
}
return &Writer{priority, prefix, conn}, err
for _, network := range logTypes {
for _, path := range logPaths {
raddr = path
- conn, err := net.Dial(network, "", raddr)
+ conn, err := net.Dial(network, raddr)
if err != nil {
continue
} else {
)
var (
+ // The short flag requests that tests run more quickly, but its functionality
+ // is provided by test writers themselves. The testing package is just its
+ // home. The all.bash installation script sets it to make installation more
+ // efficient, but by default the flag is off so a plain "gotest" will do a
+ // full test of the package.
+ short = flag.Bool("test.short", false, "run smaller test suite to save time")
+
// Report as tests are run; default is silent for success.
chatty = flag.Bool("test.v", false, "verbose: print additional output")
match = flag.String("test.run", "", "regular expression to select tests to run")
cpuProfile = flag.String("test.cpuprofile", "", "write a cpu profile to the named file during execution")
)
+// Short reports whether the -test.short flag is set.
+func Short() bool {
+ return *short
+}
+
// Insert final newline if needed and tabs after internal newlines.
func tabify(s string) string {
go tRunner(t, &tests[i])
<-t.ch
ns += time.Nanoseconds()
- tstr := fmt.Sprintf("(%.1f seconds)", float64(ns)/1e9)
+ tstr := fmt.Sprintf("(%.2f seconds)", float64(ns)/1e9)
if t.failed {
println("--- FAIL:", tests[i].Name, tstr)
print(t.errors)
package time_test
import (
+ "fmt"
"os"
"syscall"
"testing"
}
}
+func TestAfterQueuing(t *testing.T) {
+ // This test flakes out on some systems,
+ // so we'll try it a few times before declaring it a failure.
+ const attempts = 3
+ err := os.NewError("!=nil")
+ for i := 0; i < attempts && err != nil; i++ {
+ if err = testAfterQueuing(t); err != nil {
+ t.Logf("attempt %v failed: %v", i, err)
+ }
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
// For gccgo omit 0 for now because it can take too long to start the
-// thread.
var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, /*0*/}
type afterResult struct {
result <- afterResult{slot, <-ac}
}
-func TestAfterQueuing(t *testing.T) {
+func testAfterQueuing(t *testing.T) os.Error {
const (
Delta = 100 * 1e6
)
for _, slot := range slots {
r := <-result
if r.slot != slot {
- t.Fatalf("after queue got slot %d, expected %d", r.slot, slot)
+ return fmt.Errorf("after queue got slot %d, expected %d", r.slot, slot)
}
ns := r.t - t0
target := int64(slot * Delta)
slop := int64(Delta) / 4
if ns < target-slop || ns > target+slop {
- t.Fatalf("after queue slot %d arrived at %g, expected [%g,%g]", slot, float64(ns), float64(target-slop), float64(target+slop))
+ return fmt.Errorf("after queue slot %d arrived at %g, expected [%g,%g]", slot, float64(ns), float64(target-slop), float64(target+slop))
}
}
+ return nil
}
}
}
-const randCount = 100000
+func randCount() int {
+ if testing.Short() {
+ return 100
+ }
+ return 100000
+}
func TestRandomAccess(t *testing.T) {
for _, s := range testStrings {
t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
break
}
- for j := 0; j < randCount; j++ {
+ for j := 0; j < randCount(); j++ {
i := rand.Intn(len(runes))
expect := runes[i]
got := str.At(i)
t.Errorf("%s: expected %d runes; got %d", s, len(runes), str.RuneCount())
break
}
- for k := 0; k < randCount; k++ {
+ for k := 0; k < randCount(); k++ {
i := rand.Intn(len(runes))
j := rand.Intn(len(runes) + 1)
if i > j { // include empty strings
switch parsedUrl.Scheme {
case "ws":
- client, err = net.Dial("tcp", "", parsedUrl.Host)
+ client, err = net.Dial("tcp", parsedUrl.Host)
case "wss":
- client, err = tls.Dial("tcp", "", parsedUrl.Host, nil)
+ client, err = tls.Dial("tcp", parsedUrl.Host, nil)
default:
err = ErrBadScheme
once.Do(startServer)
// websocket.Dial()
- client, err := net.Dial("tcp", "", serverAddr)
+ client, err := net.Dial("tcp", serverAddr)
if err != nil {
t.Fatal("dialing", err)
}
once.Do(startServer)
// websocket.Dial()
- client, err := net.Dial("tcp", "", serverAddr)
+ client, err := net.Dial("tcp", serverAddr)
if err != nil {
t.Fatal("dialing", err)
}
func TestWithQuery(t *testing.T) {
once.Do(startServer)
- client, err := net.Dial("tcp", "", serverAddr)
+ client, err := net.Dial("tcp", serverAddr)
if err != nil {
t.Fatal("dialing", err)
}
func TestWithProtocol(t *testing.T) {
once.Do(startServer)
- client, err := net.Dial("tcp", "", serverAddr)
+ client, err := net.Dial("tcp", serverAddr)
if err != nil {
t.Fatal("dialing", err)
}
once.Do(startServer)
// websocket.Dial()
- client, err := net.Dial("tcp", "", serverAddr)
+ client, err := net.Dial("tcp", serverAddr)
if err != nil {
t.Fatal("dialing", err)
}
return;
}
+func GetsockoptInt(fd, level, opt int) (value, errno int) {
+ var n int32
+ vallen := Socklen_t(4)
+ errno = libc_getsockopt(fd, level, opt, (*byte)(unsafe.Pointer(&n)), &vallen)
+ return int(n), errno
+}
+
func setsockopt(fd, level, opt int, valueptr uintptr, length Socklen_t) (errno int) {
r := libc_setsockopt(fd, level, opt, (*byte)(unsafe.Pointer(valueptr)),
length);
if r < 0 { errno = GetErrno() }
return;
}
-
-// FIXME: No getsockopt.
func Select(nfds int, r *FdSet_t, w *FdSet_t, e *FdSet_t, timeout *Timeval) (n int, errno int) {
n = libc_select(nfds, (*byte)(unsafe.Pointer(r)),
- (*byte)(unsafe.Pointer(e)),
+ (*byte)(unsafe.Pointer(w)),
(*byte)(unsafe.Pointer(e)), timeout);
if n < 0 { errno = GetErrno() }
return;
xno)
${GC} -g -c _testmain.go
${GL} *.o ${GOLIBS}
- ./a.out "$@"
+ ./a.out -test.short "$@"
;;
xyes)
rm -rf ../testsuite/*.o
exit 1
}
-set result [libgo_load "./a.exe" "" ""]
+set result [libgo_load "./a.exe" "-test.short" ""]
set status [lindex $result 0]
$status go