From 8039ca76a5705ae5052b20cee64110c32545c4fc Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Thu, 24 Mar 2011 23:46:17 +0000 Subject: [PATCH] Update to current version of Go library. From-SVN: r171427 --- libgo/MERGE | 2 +- libgo/Makefile.am | 152 +++-- libgo/Makefile.in | 233 ++++++-- libgo/go/archive/zip/reader.go | 9 +- libgo/go/big/int.go | 89 ++- libgo/go/big/int_test.go | 39 ++ libgo/go/big/nat.go | 47 ++ libgo/go/bufio/bufio_test.go | 8 +- libgo/go/compress/flate/deflate_test.go | 13 +- libgo/go/compress/lzw/reader_test.go | 23 +- libgo/go/compress/lzw/writer_test.go | 27 +- libgo/go/crypto/ecdsa/ecdsa.go | 149 +++++ libgo/go/crypto/ecdsa/ecdsa_test.go | 218 +++++++ libgo/go/crypto/elliptic/elliptic.go | 5 + libgo/go/crypto/openpgp/packet/packet.go | 30 +- libgo/go/crypto/openpgp/packet/packet_test.go | 20 + libgo/go/crypto/openpgp/packet/private_key.go | 31 +- libgo/go/crypto/openpgp/packet/public_key.go | 46 +- .../crypto/openpgp/packet/public_key_test.go | 12 +- libgo/go/crypto/openpgp/packet/signature.go | 108 +++- libgo/go/crypto/openpgp/read_test.go | 32 +- libgo/go/crypto/openpgp/write.go | 6 +- libgo/go/crypto/openpgp/write_test.go | 16 +- libgo/go/crypto/tls/common.go | 4 + libgo/go/crypto/tls/conn.go | 10 +- libgo/go/crypto/tls/generate_cert.go | 6 +- libgo/go/debug/proc/proc_darwin.go | 2 +- libgo/go/debug/proc/proc_freebsd.go | 2 +- libgo/go/debug/proc/proc_linux.go | 18 +- libgo/go/debug/proc/proc_windows.go | 2 +- libgo/go/exec/exec.go | 8 +- libgo/go/exec/exec_test.go | 52 ++ libgo/go/exp/eval/stmt.go | 13 +- libgo/go/exp/eval/stmt_test.go | 2 +- libgo/go/exp/ogle/cmd.go | 2 +- libgo/go/expvar/expvar.go | 2 +- libgo/go/flag/flag.go | 49 +- libgo/go/flag/flag_test.go | 7 + libgo/go/fmt/format.go | 4 +- libgo/go/fmt/scan.go | 61 +- libgo/go/fmt/scan_test.go | 29 +- libgo/go/go/ast/ast.go | 46 +- libgo/go/go/ast/filter.go | 3 +- libgo/go/go/ast/print.go | 31 +- libgo/go/go/ast/scope.go | 261 +++------ libgo/go/go/ast/walk.go | 8 +- libgo/go/go/parser/interface.go | 4 +- libgo/go/go/parser/parser.go | 445 ++++++++++---- libgo/go/go/parser/parser_test.go | 3 +- libgo/go/go/printer/nodes.go | 92 ++- libgo/go/go/printer/printer.go | 35 +- libgo/go/go/printer/printer_test.go | 36 +- .../go/go/printer/testdata/expressions.golden | 6 +- .../go/go/printer/testdata/expressions.input | 2 +- libgo/go/go/printer/testdata/expressions.raw | 6 +- libgo/go/go/printer/testdata/slow.golden | 85 +++ libgo/go/go/printer/testdata/slow.input | 85 +++ libgo/go/go/scanner/scanner.go | 12 +- libgo/go/go/scanner/scanner_test.go | 45 +- libgo/go/go/typechecker/scope.go | 72 +-- .../testdata/{test0.go => test0.src} | 0 .../testdata/{test1.go => test1.src} | 2 +- .../testdata/{test3.go => test3.src} | 7 +- .../testdata/{test4.go => test4.src} | 2 +- libgo/go/go/typechecker/type.go | 125 ++++ libgo/go/go/typechecker/typechecker.go | 78 +-- libgo/go/go/typechecker/typechecker_test.go | 2 +- libgo/go/go/typechecker/universe.go | 4 +- libgo/go/gob/codec_test.go | 61 +- libgo/go/gob/decode.go | 395 +++++++++---- libgo/go/gob/decoder.go | 9 +- libgo/go/gob/encode.go | 214 +++++-- libgo/go/gob/encoder.go | 118 ++-- libgo/go/gob/gobencdec_test.go | 384 ++++++++++++ libgo/go/gob/timing_test.go | 90 +++ libgo/go/gob/type.go | 280 +++++++-- libgo/go/gob/type_test.go | 24 +- libgo/go/hash/fnv/fnv.go | 133 +++++ libgo/go/hash/fnv/fnv_test.go | 167 ++++++ libgo/go/http/cgi/child.go | 192 ++++++ libgo/go/http/cgi/child_test.go | 83 +++ libgo/go/http/cgi/host.go | 221 +++++++ libgo/go/http/cgi/host_test.go | 273 +++++++++ libgo/go/http/cgi/matryoshka_test.go | 74 +++ libgo/go/http/client.go | 83 +-- libgo/go/http/client_test.go | 23 +- libgo/go/http/cookie.go | 272 +++++++++ libgo/go/http/cookie_test.go | 110 ++++ libgo/go/http/dump.go | 4 +- libgo/go/http/export_test.go | 34 ++ libgo/go/http/fs.go | 22 +- libgo/go/http/fs_test.go | 83 +-- libgo/go/http/httptest/recorder.go | 59 ++ libgo/go/http/httptest/server.go | 70 +++ libgo/go/http/persist.go | 112 ++-- libgo/go/http/pprof/pprof.go | 6 +- libgo/go/http/proxy_test.go | 30 +- libgo/go/http/range_test.go | 57 ++ libgo/go/http/readrequest_test.go | 2 +- libgo/go/http/request.go | 41 +- libgo/go/http/request_test.go | 37 +- libgo/go/http/requestwrite_test.go | 64 +- libgo/go/http/response.go | 34 +- libgo/go/http/responsewrite_test.go | 38 +- libgo/go/http/serve_test.go | 257 +++++--- libgo/go/http/server.go | 155 +++-- libgo/go/http/transport.go | 550 +++++++++++++++--- libgo/go/http/transport_test.go | 235 ++++++++ libgo/go/io/ioutil/ioutil.go | 38 +- libgo/go/io/ioutil/tempfile.go | 7 +- libgo/go/io/ioutil/tempfile_test.go | 5 +- libgo/go/io/pipe.go | 307 +++------- libgo/go/mime/multipart/multipart.go | 14 +- libgo/go/mime/multipart/multipart_test.go | 13 +- libgo/go/net/fd.go | 70 ++- libgo/go/net/fd_linux.go | 82 ++- libgo/go/net/ip.go | 109 +++- libgo/go/netchan/common.go | 4 +- libgo/go/netchan/export.go | 4 +- libgo/go/netchan/import.go | 4 +- libgo/go/netchan/netchan_test.go | 16 +- libgo/go/os/exec.go | 43 +- libgo/go/os/inotify/inotify_linux_test.go | 16 +- libgo/go/os/os_test.go | 47 +- libgo/go/path/filepath/match.go | 282 +++++++++ libgo/go/path/filepath/match_test.go | 117 ++++ libgo/go/path/filepath/path.go | 335 +++++++++++ libgo/go/path/filepath/path_test.go | 486 ++++++++++++++++ libgo/go/path/filepath/path_unix.go | 28 + libgo/go/path/filepath/path_windows.go | 37 ++ libgo/go/path/match.go | 83 +-- libgo/go/path/match_test.go | 29 - libgo/go/path/path.go | 78 +-- libgo/go/path/path_test.go | 159 ----- libgo/go/path/path_unix.go | 11 - libgo/go/path/path_windows.go | 11 - libgo/go/reflect/all_test.go | 37 +- libgo/go/reflect/value.go | 58 +- libgo/go/rpc/client.go | 41 +- libgo/go/rpc/server.go | 122 ++-- libgo/go/rpc/server_test.go | 62 +- libgo/go/runtime/debug.go | 84 +-- libgo/go/runtime/mem.go | 69 +++ libgo/go/runtime/pprof/pprof.go | 69 +++ libgo/go/runtime/pprof/pprof_test.go | 69 +++ libgo/go/strings/strings.go | 16 +- libgo/go/strings/strings_test.go | 52 ++ libgo/go/sync/waitgroup.go | 2 +- libgo/go/template/template.go | 5 +- libgo/go/testing/script/script.go | 4 +- libgo/go/testing/testing.go | 61 +- libgo/go/time/sleep.go | 30 +- libgo/go/time/sleep_test.go | 4 +- libgo/go/time/sys.go | 62 ++ libgo/go/time/time.go | 26 +- libgo/go/time/time_test.go | 12 + libgo/go/websocket/server.go | 8 +- libgo/go/websocket/websocket_test.go | 11 +- libgo/go/xml/xml.go | 1 - libgo/merge.sh | 2 +- libgo/runtime/channel.h | 7 +- libgo/runtime/go-rec-big.c | 3 + libgo/runtime/go-rec-nb-big.c | 15 +- libgo/runtime/go-rec-nb-small.c | 4 +- libgo/runtime/go-rec-small.c | 3 +- libgo/runtime/go-reflect-chan.c | 45 +- libgo/syscalls/exec.go | 67 ++- libgo/testsuite/gotest | 53 +- 168 files changed, 8977 insertions(+), 2743 deletions(-) create mode 100644 libgo/go/crypto/ecdsa/ecdsa.go create mode 100644 libgo/go/crypto/ecdsa/ecdsa_test.go create mode 100644 libgo/go/go/printer/testdata/slow.golden create mode 100644 libgo/go/go/printer/testdata/slow.input rename libgo/go/go/typechecker/testdata/{test0.go => test0.src} (100%) rename libgo/go/go/typechecker/testdata/{test1.go => test1.src} (83%) rename libgo/go/go/typechecker/testdata/{test3.go => test3.src} (80%) rename libgo/go/go/typechecker/testdata/{test4.go => test4.src} (84%) create mode 100644 libgo/go/go/typechecker/type.go create mode 100644 libgo/go/gob/gobencdec_test.go create mode 100644 libgo/go/gob/timing_test.go create mode 100644 libgo/go/hash/fnv/fnv.go create mode 100644 libgo/go/hash/fnv/fnv_test.go create mode 100644 libgo/go/http/cgi/child.go create mode 100644 libgo/go/http/cgi/child_test.go create mode 100644 libgo/go/http/cgi/host.go create mode 100644 libgo/go/http/cgi/host_test.go create mode 100644 libgo/go/http/cgi/matryoshka_test.go create mode 100644 libgo/go/http/cookie.go create mode 100644 libgo/go/http/cookie_test.go create mode 100644 libgo/go/http/export_test.go create mode 100644 libgo/go/http/httptest/recorder.go create mode 100644 libgo/go/http/httptest/server.go create mode 100644 libgo/go/http/range_test.go create mode 100644 libgo/go/http/transport_test.go create mode 100644 libgo/go/path/filepath/match.go create mode 100644 libgo/go/path/filepath/match_test.go create mode 100644 libgo/go/path/filepath/path.go create mode 100644 libgo/go/path/filepath/path_test.go create mode 100644 libgo/go/path/filepath/path_unix.go create mode 100644 libgo/go/path/filepath/path_windows.go delete mode 100644 libgo/go/path/path_unix.go delete mode 100644 libgo/go/path/path_windows.go create mode 100644 libgo/go/runtime/mem.go create mode 100644 libgo/go/runtime/pprof/pprof_test.go create mode 100644 libgo/go/time/sys.go diff --git a/libgo/MERGE b/libgo/MERGE index e572b232450..729be060290 100644 --- a/libgo/MERGE +++ b/libgo/MERGE @@ -1,4 +1,4 @@ -94d654be2064 +31d7feb9281b The first line of this file holds the Mercurial revision number of the last merge done from the master library sources. diff --git a/libgo/Makefile.am b/libgo/Makefile.am index 2db29fdea52..77bf27a7c53 100644 --- a/libgo/Makefile.am +++ b/libgo/Makefile.am @@ -182,6 +182,7 @@ toolexeclibgocrypto_DATA = \ crypto/cast5.gox \ crypto/cipher.gox \ crypto/dsa.gox \ + crypto/ecdsa.gox \ crypto/elliptic.gox \ crypto/hmac.gox \ crypto/md4.gox \ @@ -254,11 +255,14 @@ toolexeclibgohashdir = $(toolexeclibgodir)/hash toolexeclibgohash_DATA = \ hash/adler32.gox \ hash/crc32.gox \ - hash/crc64.gox + hash/crc64.gox \ + hash/fnv.gox toolexeclibgohttpdir = $(toolexeclibgodir)/http toolexeclibgohttp_DATA = \ + http/cgi.gox \ + http/httptest.gox \ http/pprof.gox toolexeclibgoimagedir = $(toolexeclibgodir)/image @@ -301,6 +305,11 @@ toolexeclibgoos_DATA = \ $(os_inotify_gox) \ os/signal.gox +toolexeclibgopathdir = $(toolexeclibgodir)/path + +toolexeclibgopath_DATA = \ + path/filepath.gox + toolexeclibgorpcdir = $(toolexeclibgodir)/rpc toolexeclibgorpc_DATA = \ @@ -543,6 +552,7 @@ go_html_files = \ go_http_files = \ go/http/chunked.go \ go/http/client.go \ + go/http/cookie.go \ go/http/dump.go \ go/http/fs.go \ go/http/header.go \ @@ -726,8 +736,7 @@ go_patch_files = \ go_path_files = \ go/path/match.go \ - go/path/path.go \ - go/path/path_unix.go + go/path/path.go go_rand_files = \ go/rand/exp.go \ @@ -753,6 +762,7 @@ go_runtime_files = \ go/runtime/debug.go \ go/runtime/error.go \ go/runtime/extern.go \ + go/runtime/mem.go \ go/runtime/sig.go \ go/runtime/softfloat64.go \ go/runtime/type.go \ @@ -826,6 +836,7 @@ go_testing_files = \ go_time_files = \ go/time/format.go \ go/time/sleep.go \ + go/time/sys.go \ go/time/tick.go \ go/time/time.go \ go/time/zoneinfo_unix.go @@ -936,6 +947,8 @@ go_crypto_cipher_files = \ go/crypto/cipher/ofb.go go_crypto_dsa_files = \ go/crypto/dsa/dsa.go +go_crypto_ecdsa_files = \ + go/crypto/ecdsa/ecdsa.go go_crypto_elliptic_files = \ go/crypto/elliptic/elliptic.go go_crypto_hmac_files = \ @@ -1101,6 +1114,7 @@ go_go_token_files = \ go/go/token/token.go go_go_typechecker_files = \ go/go/typechecker/scope.go \ + go/go/typechecker/type.go \ go/go/typechecker/typechecker.go \ go/go/typechecker/universe.go @@ -1110,7 +1124,15 @@ go_hash_crc32_files = \ go/hash/crc32/crc32.go go_hash_crc64_files = \ go/hash/crc64/crc64.go - +go_hash_fnv_files = \ + go/hash/fnv/fnv.go + +go_http_cgi_files = \ + go/http/cgi/child.go \ + go/http/cgi/host.go +go_http_httptest_files = \ + go/http/httptest/recorder.go \ + go/http/httptest/server.go go_http_pprof_files = \ go/http/pprof/pprof.go @@ -1151,6 +1173,11 @@ go_os_signal_files = \ go/os/signal/signal.go \ unix.go +go_path_filepath_files = \ + go/path/filepath/match.go \ + go/path/filepath/path.go \ + go/path/filepath/path_unix.go + go_rpc_jsonrpc_files = \ go/rpc/jsonrpc/client.go \ go/rpc/jsonrpc/server.go @@ -1377,6 +1404,7 @@ libgo_go_objs = \ crypto/cast5.lo \ crypto/cipher.lo \ crypto/dsa.lo \ + crypto/ecdsa.lo \ crypto/elliptic.lo \ crypto/hmac.lo \ crypto/md4.lo \ @@ -1426,6 +1454,9 @@ libgo_go_objs = \ hash/adler32.lo \ hash/crc32.lo \ hash/crc64.lo \ + hash/fnv.lo \ + http/cgi.lo \ + http/httptest.lo \ http/pprof.lo \ image/jpeg.lo \ image/png.lo \ @@ -1436,6 +1467,7 @@ libgo_go_objs = \ net/textproto.lo \ $(os_lib_inotify_lo) \ os/signal.lo \ + path/filepath.lo \ rpc/jsonrpc.lo \ runtime/debug.lo \ runtime/pprof.lo \ @@ -1532,7 +1564,7 @@ asn1/check: $(CHECK_DEPS) $(CHECK) .PHONY: asn1/check -big/big.lo: $(go_big_files) fmt.gox rand.gox strings.gox +big/big.lo: $(go_big_files) fmt.gox rand.gox strings.gox os.gox $(BUILDPACKAGE) big/check: $(CHECK_DEPS) $(CHECK) @@ -1597,9 +1629,9 @@ fmt/check: $(CHECK_DEPS) $(CHECK) .PHONY: fmt/check -gob/gob.lo: $(go_gob_files) bytes.gox fmt.gox io.gox math.gox os.gox \ - reflect.gox runtime.gox strings.gox sync.gox unicode.gox \ - utf8.gox +gob/gob.lo: $(go_gob_files) bufio.gox bytes.gox fmt.gox io.gox math.gox \ + os.gox reflect.gox runtime.gox strings.gox sync.gox \ + unicode.gox utf8.gox $(BUILDPACKAGE) gob/check: $(CHECK_DEPS) $(CHECK) @@ -1621,8 +1653,8 @@ html/check: $(CHECK_DEPS) http/http.lo: $(go_http_files) bufio.gox bytes.gox container/vector.gox \ crypto/rand.gox crypto/tls.gox encoding/base64.gox fmt.gox \ io.gox io/ioutil.gox log.gox mime.gox mime/multipart.gox \ - net.gox net/textproto.gox os.gox path.gox sort.gox \ - strconv.gox strings.gox sync.gox time.gox utf8.gox + net.gox net/textproto.gox os.gox path.gox path/filepath.gox \ + sort.gox strconv.gox strings.gox sync.gox time.gox utf8.gox $(BUILDPACKAGE) http/check: $(CHECK_DEPS) $(CHECK) @@ -1634,7 +1666,7 @@ image/check: $(CHECK_DEPS) $(CHECK) .PHONY: image/check -io/io.lo: $(go_io_files) os.gox runtime.gox sync.gox +io/io.lo: $(go_io_files) os.gox sync.gox $(BUILDPACKAGE) io/check: $(CHECK_DEPS) $(CHECK) @@ -1697,8 +1729,7 @@ patch/check: $(CHECK_DEPS) $(CHECK) .PHONY: patch/check -path/path.lo: $(go_path_files) io/ioutil.gox os.gox sort.gox strings.gox \ - utf8.gox +path/path.lo: $(go_path_files) os.gox strings.gox utf8.gox $(BUILDPACKAGE) path/check: $(CHECK_DEPS) $(CHECK) @@ -1799,7 +1830,7 @@ template/check: $(CHECK_DEPS) .PHONY: template/check testing/testing.lo: $(go_testing_files) flag.gox fmt.gox os.gox regexp.gox \ - runtime.gox time.gox + runtime.gox runtime/pprof.gox time.gox $(BUILDPACKAGE) testing/check: $(CHECK_DEPS) $(CHECK) @@ -1862,7 +1893,7 @@ archive/tar/check: $(CHECK_DEPS) archive/zip.lo: $(go_archive_zip_files) bufio.gox bytes.gox \ compress/flate.gox hash.gox hash/crc32.gox \ - encoding/binary.gox io.gox os.gox + encoding/binary.gox io.gox io/ioutil.gox os.gox $(BUILDPACKAGE) archive/zip/check: $(CHECK_DEPS) @$(MKDIR_P) archive/zip @@ -1977,6 +2008,14 @@ crypto/dsa/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/dsa/check +crypto/ecdsa.lo: $(go_crypto_ecdsa_files) big.gox crypto/elliptic.gox io.gox \ + os.gox + $(BUILDPACKAGE) +crypto/ecdsa/check: $(CHECK_DEPS) + @$(MKDIR_P) crypto/ecdsa + $(CHECK) +.PHONY: crypto/ecdsa/check + crypto/elliptic.lo: $(go_crypto_elliptic_files) big.gox io.gox os.gox sync.gox $(BUILDPACKAGE) crypto/elliptic/check: $(CHECK_DEPS) @@ -2014,8 +2053,8 @@ crypto/ocsp/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/ocsp/check -crypto/openpgp.lo: $(go_crypto_openpgp_files) crypto.gox \ - crypto/openpgp/armor.gox crypto/openpgp/error.gox \ +crypto/openpgp.lo: $(go_crypto_openpgp_files) crypto.gox crypto/dsa.gox \ + crypto/openpgp/armor.gox crypto/openpgp/error.gox \ crypto/openpgp/packet.gox crypto/rsa.gox crypto/sha256.gox \ hash.gox io.gox os.gox strconv.gox time.gox $(BUILDPACKAGE) @@ -2137,10 +2176,10 @@ crypto/openpgp/error/check: $(CHECK_DEPS) crypto/openpgp/packet.lo: $(go_crypto_openpgp_packet_files) big.gox bytes.gox \ compress/flate.gox compress/zlib.gox crypto.gox \ crypto/aes.gox crypto/cast5.gox crypto/cipher.gox \ - crypto/openpgp/error.gox crypto/openpgp/s2k.gox \ - crypto/rand.gox crypto/rsa.gox crypto/sha1.gox \ - crypto/subtle.gox encoding/binary.gox hash.gox io.gox \ - io/ioutil.gox os.gox strconv.gox strings.gox + crypto/dsa.gox crypto/openpgp/error.gox \ + crypto/openpgp/s2k.gox crypto/rand.gox crypto/rsa.gox \ + crypto/sha1.gox crypto/subtle.gox encoding/binary.gox fmt.gox \ + hash.gox io.gox io/ioutil.gox os.gox strconv.gox strings.gox $(BUILDPACKAGE) crypto/openpgp/packet/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/openpgp/packet @@ -2288,8 +2327,8 @@ exp/eval/check: $(CHECK_DEPS) $(CHECK) .PHONY: exp/eval/check -go/ast.lo: $(go_go_ast_files) fmt.gox go/token.gox io.gox os.gox reflect.gox \ - unicode.gox utf8.gox +go/ast.lo: $(go_go_ast_files) bytes.gox fmt.gox go/token.gox io.gox os.gox \ + reflect.gox unicode.gox utf8.gox $(BUILDPACKAGE) go/ast/check: $(CHECK_DEPS) @$(MKDIR_P) go/ast @@ -2306,7 +2345,7 @@ go/doc/check: $(CHECK_DEPS) go/parser.lo: $(go_go_parser_files) bytes.gox fmt.gox go/ast.gox \ go/scanner.gox go/token.gox io.gox io/ioutil.gox os.gox \ - path.gox strings.gox + path/filepath.gox strings.gox $(BUILDPACKAGE) go/parser/check: $(CHECK_DEPS) @$(MKDIR_P) go/parser @@ -2314,8 +2353,8 @@ go/parser/check: $(CHECK_DEPS) .PHONY: go/parser/check go/printer.lo: $(go_go_printer_files) bytes.gox fmt.gox go/ast.gox \ - go/token.gox io.gox os.gox reflect.gox runtime.gox \ - strings.gox tabwriter.gox + go/token.gox io.gox os.gox path/filepath.gox reflect.gox \ + runtime.gox strings.gox tabwriter.gox $(BUILDPACKAGE) go/printer/check: $(CHECK_DEPS) @$(MKDIR_P) go/printer @@ -2323,8 +2362,8 @@ go/printer/check: $(CHECK_DEPS) .PHONY: go/printer/check go/scanner.lo: $(go_go_scanner_files) bytes.gox container/vector.gox fmt.gox \ - go/token.gox io.gox os.gox path.gox sort.gox strconv.gox \ - unicode.gox utf8.gox + go/token.gox io.gox os.gox path/filepath.gox sort.gox \ + strconv.gox unicode.gox utf8.gox $(BUILDPACKAGE) go/scanner/check: $(CHECK_DEPS) @$(MKDIR_P) go/scanner @@ -2367,6 +2406,30 @@ hash/crc64/check: $(CHECK_DEPS) $(CHECK) .PHONY: hash/crc64/check +hash/fnv.lo: $(go_hash_fnv_files) encoding/binary.gox hash.gox os.gox + $(BUILDPACKAGE) +hash/fnv/check: $(CHECK_DEPS) + @$(MKDIR_P) hash/fnv + $(CHECK) +.PHONY: hash/fnv/check + +http/cgi.lo: $(go_http_cgi_files) bufio.gox bytes.gox encoding/line.gox \ + exec.gox fmt.gox http.gox io.gox io/ioutil.gox log.gox \ + os.gox path/filepath.gox regexp.gox strconv.gox strings.gox + $(BUILDPACKAGE) +http/cgi/check: $(CHECK_DEPS) + @$(MKDIR_P) http/cgi + $(CHECK) +.PHONY: http/cgi/check + +http/httptest.lo: $(go_http_httptest_files) bytes.gox fmt.gox http.gox \ + net.gox os.gox + $(BUILDPACKAGE) +http/httptest/check: $(CHECK_DEPS) + @$(MKDIR_P) http/httptest + $(CHECK) +.PHONY: http/httptest/check + http/pprof.lo: $(go_http_pprof_files) bufio.gox fmt.gox http.gox os.gox \ runtime.gox runtime/pprof.gox strconv.gox strings.gox $(BUILDPACKAGE) @@ -2398,8 +2461,8 @@ index/suffixarray/check: $(CHECK_DEPS) $(CHECK) .PHONY: index/suffixarray/check -io/ioutil.lo: $(go_io_ioutil_files) bytes.gox io.gox os.gox sort.gox \ - strconv.gox +io/ioutil.lo: $(go_io_ioutil_files) bytes.gox io.gox os.gox path/filepath.gox \ + sort.gox strconv.gox $(BUILDPACKAGE) io/ioutil/check: $(CHECK_DEPS) @$(MKDIR_P) io/ioutil @@ -2407,7 +2470,7 @@ io/ioutil/check: $(CHECK_DEPS) .PHONY: io/ioutil/check mime/multipart.lo: $(go_mime_multipart_files) bufio.gox bytes.gox io.gox \ - mime.gox os.gox regexp.gox strings.gox + mime.gox net/textproto.gox os.gox regexp.gox strings.gox $(BUILDPACKAGE) mime/multipart/check: $(CHECK_DEPS) @$(MKDIR_P) mime/multipart @@ -2445,6 +2508,14 @@ unix.go: $(srcdir)/go/os/signal/mkunix.sh sysinfo.go $(SHELL) $(srcdir)/go/os/signal/mkunix.sh sysinfo.go > $@.tmp mv -f $@.tmp $@ +path/filepath.lo: $(go_path_filepath_files) bytes.gox os.gox sort.gox \ + strings.gox utf8.gox + $(BUILDPACKAGE) +path/filepath/check: $(CHECK_DEPS) + @$(MKDIR_P) path/filepath + $(CHECK) +.PHONY: path/filepath/check + rpc/jsonrpc.lo: $(go_rpc_jsonrpc_files) fmt.gox io.gox json.gox net.gox \ os.gox rpc.gox sync.gox $(BUILDPACKAGE) @@ -2462,7 +2533,7 @@ runtime/debug/check: $(CHECK_DEPS) .PHONY: runtime/debug/check runtime/pprof.lo: $(go_runtime_pprof_files) bufio.gox fmt.gox io.gox os.gox \ - runtime.gox + runtime.gox sync.gox $(BUILDPACKAGE) runtime/pprof/check: $(CHECK_DEPS) @$(MKDIR_P) runtime/pprof @@ -2653,6 +2724,8 @@ crypto/cipher.gox: crypto/cipher.lo $(BUILDGOX) crypto/dsa.gox: crypto/dsa.lo $(BUILDGOX) +crypto/ecdsa.gox: crypto/ecdsa.lo + $(BUILDGOX) crypto/elliptic.gox: crypto/elliptic.lo $(BUILDGOX) crypto/hmac.gox: crypto/hmac.lo @@ -2757,7 +2830,13 @@ hash/crc32.gox: hash/crc32.lo $(BUILDGOX) hash/crc64.gox: hash/crc64.lo $(BUILDGOX) +hash/fnv.gox: hash/fnv.lo + $(BUILDGOX) +http/cgi.gox: http/cgi.lo + $(BUILDGOX) +http/httptest.gox: http/httptest.lo + $(BUILDGOX) http/pprof.gox: http/pprof.lo $(BUILDGOX) @@ -2785,6 +2864,9 @@ os/inotify.gox: os/inotify.lo os/signal.gox: os/signal.lo $(BUILDGOX) +path/filepath.gox: path/filepath.lo + $(BUILDGOX) + rpc/jsonrpc.gox: rpc/jsonrpc.lo $(BUILDGOX) @@ -2823,7 +2905,7 @@ TEST_PACKAGES = \ fmt/check \ gob/check \ html/check \ - $(if $(GCCGO_RUN_ALL_TESTS),http/check) \ + http/check \ io/check \ json/check \ log/check \ @@ -2872,6 +2954,7 @@ TEST_PACKAGES = \ crypto/cast5/check \ crypto/cipher/check \ crypto/dsa/check \ + crypto/ecdsa/check \ crypto/elliptic/check \ crypto/hmac/check \ crypto/md4/check \ @@ -2916,6 +2999,8 @@ TEST_PACKAGES = \ hash/adler32/check \ hash/crc32/check \ hash/crc64/check \ + hash/fnv/check \ + http/cgi/check \ image/png/check \ index/suffixarray/check \ io/ioutil/check \ @@ -2923,6 +3008,7 @@ TEST_PACKAGES = \ net/textproto/check \ $(os_inotify_check) \ os/signal/check \ + path/filepath/check \ rpc/jsonrpc/check \ sync/atomic/check \ testing/quick/check \ diff --git a/libgo/Makefile.in b/libgo/Makefile.in index 7bb302da913..dd942254dcb 100644 --- a/libgo/Makefile.in +++ b/libgo/Makefile.in @@ -110,6 +110,7 @@ am__installdirs = "$(DESTDIR)$(toolexeclibdir)" \ "$(DESTDIR)$(toolexeclibgomimedir)" \ "$(DESTDIR)$(toolexeclibgonetdir)" \ "$(DESTDIR)$(toolexeclibgoosdir)" \ + "$(DESTDIR)$(toolexeclibgopathdir)" \ "$(DESTDIR)$(toolexeclibgorpcdir)" \ "$(DESTDIR)$(toolexeclibgoruntimedir)" \ "$(DESTDIR)$(toolexeclibgosyncdir)" \ @@ -141,9 +142,10 @@ am__DEPENDENCIES_2 = asn1/asn1.lo big/big.lo bufio/bufio.lo \ container/heap.lo container/list.lo container/ring.lo \ container/vector.lo crypto/aes.lo crypto/block.lo \ crypto/blowfish.lo crypto/cast5.lo crypto/cipher.lo \ - crypto/dsa.lo crypto/elliptic.lo crypto/hmac.lo crypto/md4.lo \ - crypto/md5.lo crypto/ocsp.lo crypto/openpgp.lo crypto/rand.lo \ - crypto/rc4.lo crypto/ripemd160.lo crypto/rsa.lo crypto/sha1.lo \ + crypto/dsa.lo crypto/ecdsa.lo crypto/elliptic.lo \ + crypto/hmac.lo crypto/md4.lo crypto/md5.lo crypto/ocsp.lo \ + crypto/openpgp.lo crypto/rand.lo crypto/rc4.lo \ + crypto/ripemd160.lo crypto/rsa.lo crypto/sha1.lo \ crypto/sha256.lo crypto/sha512.lo crypto/subtle.lo \ crypto/tls.lo crypto/twofish.lo crypto/x509.lo crypto/xtea.lo \ crypto/openpgp/armor.lo crypto/openpgp/error.lo \ @@ -155,13 +157,14 @@ am__DEPENDENCIES_2 = asn1/asn1.lo big/big.lo bufio/bufio.lo \ exp/datafmt.lo exp/draw.lo exp/eval.lo go/ast.lo go/doc.lo \ go/parser.lo go/printer.lo go/scanner.lo go/token.lo \ go/typechecker.lo hash/adler32.lo hash/crc32.lo hash/crc64.lo \ - http/pprof.lo image/jpeg.lo image/png.lo index/suffixarray.lo \ - io/ioutil.lo mime/multipart.lo net/dict.lo net/textproto.lo \ - $(am__DEPENDENCIES_1) os/signal.lo rpc/jsonrpc.lo \ - runtime/debug.lo runtime/pprof.lo sync/atomic.lo \ - sync/atomic_c.lo syscalls/syscall.lo syscalls/errno.lo \ - testing/testing.lo testing/iotest.lo testing/quick.lo \ - testing/script.lo + hash/fnv.lo http/cgi.lo http/httptest.lo http/pprof.lo \ + image/jpeg.lo image/png.lo index/suffixarray.lo io/ioutil.lo \ + mime/multipart.lo net/dict.lo net/textproto.lo \ + $(am__DEPENDENCIES_1) os/signal.lo path/filepath.lo \ + rpc/jsonrpc.lo runtime/debug.lo runtime/pprof.lo \ + sync/atomic.lo sync/atomic_c.lo syscalls/syscall.lo \ + syscalls/errno.lo testing/testing.lo testing/iotest.lo \ + testing/quick.lo testing/script.lo libgo_la_DEPENDENCIES = $(am__DEPENDENCIES_2) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) @@ -280,8 +283,9 @@ DATA = $(toolexeclibgo_DATA) $(toolexeclibgoarchive_DATA) \ $(toolexeclibgoimage_DATA) $(toolexeclibgoindex_DATA) \ $(toolexeclibgoio_DATA) $(toolexeclibgomime_DATA) \ $(toolexeclibgonet_DATA) $(toolexeclibgoos_DATA) \ - $(toolexeclibgorpc_DATA) $(toolexeclibgoruntime_DATA) \ - $(toolexeclibgosync_DATA) $(toolexeclibgotesting_DATA) + $(toolexeclibgopath_DATA) $(toolexeclibgorpc_DATA) \ + $(toolexeclibgoruntime_DATA) $(toolexeclibgosync_DATA) \ + $(toolexeclibgotesting_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \ @@ -620,6 +624,7 @@ toolexeclibgocrypto_DATA = \ crypto/cast5.gox \ crypto/cipher.gox \ crypto/dsa.gox \ + crypto/ecdsa.gox \ crypto/elliptic.gox \ crypto/hmac.gox \ crypto/md4.gox \ @@ -686,10 +691,13 @@ toolexeclibgohashdir = $(toolexeclibgodir)/hash toolexeclibgohash_DATA = \ hash/adler32.gox \ hash/crc32.gox \ - hash/crc64.gox + hash/crc64.gox \ + hash/fnv.gox toolexeclibgohttpdir = $(toolexeclibgodir)/http toolexeclibgohttp_DATA = \ + http/cgi.gox \ + http/httptest.gox \ http/pprof.gox toolexeclibgoimagedir = $(toolexeclibgodir)/image @@ -723,6 +731,10 @@ toolexeclibgoos_DATA = \ $(os_inotify_gox) \ os/signal.gox +toolexeclibgopathdir = $(toolexeclibgodir)/path +toolexeclibgopath_DATA = \ + path/filepath.gox + toolexeclibgorpcdir = $(toolexeclibgodir)/rpc toolexeclibgorpc_DATA = \ rpc/jsonrpc.gox @@ -928,6 +940,7 @@ go_html_files = \ go_http_files = \ go/http/chunked.go \ go/http/client.go \ + go/http/cookie.go \ go/http/dump.go \ go/http/fs.go \ go/http/header.go \ @@ -1084,8 +1097,7 @@ go_patch_files = \ go_path_files = \ go/path/match.go \ - go/path/path.go \ - go/path/path_unix.go + go/path/path.go go_rand_files = \ go/rand/exp.go \ @@ -1111,6 +1123,7 @@ go_runtime_files = \ go/runtime/debug.go \ go/runtime/error.go \ go/runtime/extern.go \ + go/runtime/mem.go \ go/runtime/sig.go \ go/runtime/softfloat64.go \ go/runtime/type.go \ @@ -1170,6 +1183,7 @@ go_testing_files = \ go_time_files = \ go/time/format.go \ go/time/sleep.go \ + go/time/sys.go \ go/time/tick.go \ go/time/time.go \ go/time/zoneinfo_unix.go @@ -1286,6 +1300,9 @@ go_crypto_cipher_files = \ go_crypto_dsa_files = \ go/crypto/dsa/dsa.go +go_crypto_ecdsa_files = \ + go/crypto/ecdsa/ecdsa.go + go_crypto_elliptic_files = \ go/crypto/elliptic/elliptic.go @@ -1490,6 +1507,7 @@ go_go_token_files = \ go_go_typechecker_files = \ go/go/typechecker/scope.go \ + go/go/typechecker/type.go \ go/go/typechecker/typechecker.go \ go/go/typechecker/universe.go @@ -1502,6 +1520,17 @@ go_hash_crc32_files = \ go_hash_crc64_files = \ go/hash/crc64/crc64.go +go_hash_fnv_files = \ + go/hash/fnv/fnv.go + +go_http_cgi_files = \ + go/http/cgi/child.go \ + go/http/cgi/host.go + +go_http_httptest_files = \ + go/http/httptest/recorder.go \ + go/http/httptest/server.go + go_http_pprof_files = \ go/http/pprof/pprof.go @@ -1542,6 +1571,11 @@ go_os_signal_files = \ go/os/signal/signal.go \ unix.go +go_path_filepath_files = \ + go/path/filepath/match.go \ + go/path/filepath/path.go \ + go/path/filepath/path_unix.go + go_rpc_jsonrpc_files = \ go/rpc/jsonrpc/client.go \ go/rpc/jsonrpc/server.go @@ -1718,6 +1752,7 @@ libgo_go_objs = \ crypto/cast5.lo \ crypto/cipher.lo \ crypto/dsa.lo \ + crypto/ecdsa.lo \ crypto/elliptic.lo \ crypto/hmac.lo \ crypto/md4.lo \ @@ -1767,6 +1802,9 @@ libgo_go_objs = \ hash/adler32.lo \ hash/crc32.lo \ hash/crc64.lo \ + hash/fnv.lo \ + http/cgi.lo \ + http/httptest.lo \ http/pprof.lo \ image/jpeg.lo \ image/png.lo \ @@ -1777,6 +1815,7 @@ libgo_go_objs = \ net/textproto.lo \ $(os_lib_inotify_lo) \ os/signal.lo \ + path/filepath.lo \ rpc/jsonrpc.lo \ runtime/debug.lo \ runtime/pprof.lo \ @@ -1883,7 +1922,7 @@ TEST_PACKAGES = \ fmt/check \ gob/check \ html/check \ - $(if $(GCCGO_RUN_ALL_TESTS),http/check) \ + http/check \ io/check \ json/check \ log/check \ @@ -1932,6 +1971,7 @@ TEST_PACKAGES = \ crypto/cast5/check \ crypto/cipher/check \ crypto/dsa/check \ + crypto/ecdsa/check \ crypto/elliptic/check \ crypto/hmac/check \ crypto/md4/check \ @@ -1976,6 +2016,8 @@ TEST_PACKAGES = \ hash/adler32/check \ hash/crc32/check \ hash/crc64/check \ + hash/fnv/check \ + http/cgi/check \ image/png/check \ index/suffixarray/check \ io/ioutil/check \ @@ -1983,6 +2025,7 @@ TEST_PACKAGES = \ net/textproto/check \ $(os_inotify_check) \ os/signal/check \ + path/filepath/check \ rpc/jsonrpc/check \ sync/atomic/check \ testing/quick/check \ @@ -3271,6 +3314,26 @@ uninstall-toolexeclibgoosDATA: test -n "$$files" || exit 0; \ echo " ( cd '$(DESTDIR)$(toolexeclibgoosdir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(toolexeclibgoosdir)" && rm -f $$files +install-toolexeclibgopathDATA: $(toolexeclibgopath_DATA) + @$(NORMAL_INSTALL) + test -z "$(toolexeclibgopathdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgopathdir)" + @list='$(toolexeclibgopath_DATA)'; test -n "$(toolexeclibgopathdir)" || list=; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(toolexeclibgopathdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(toolexeclibgopathdir)" || exit $$?; \ + done + +uninstall-toolexeclibgopathDATA: + @$(NORMAL_UNINSTALL) + @list='$(toolexeclibgopath_DATA)'; test -n "$(toolexeclibgopathdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + test -n "$$files" || exit 0; \ + echo " ( cd '$(DESTDIR)$(toolexeclibgopathdir)' && rm -f" $$files ")"; \ + cd "$(DESTDIR)$(toolexeclibgopathdir)" && rm -f $$files install-toolexeclibgorpcDATA: $(toolexeclibgorpc_DATA) @$(NORMAL_INSTALL) test -z "$(toolexeclibgorpcdir)" || $(MKDIR_P) "$(DESTDIR)$(toolexeclibgorpcdir)" @@ -3668,7 +3731,7 @@ all-am: Makefile $(LIBRARIES) $(LTLIBRARIES) all-multi $(DATA) \ config.h installdirs: installdirs-recursive installdirs-am: - for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptoopenpgpdir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohttpdir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgorpcdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgosyncdir)" "$(DESTDIR)$(toolexeclibgotestingdir)"; do \ + for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibgodir)" "$(DESTDIR)$(toolexeclibgoarchivedir)" "$(DESTDIR)$(toolexeclibgocompressdir)" "$(DESTDIR)$(toolexeclibgocontainerdir)" "$(DESTDIR)$(toolexeclibgocryptodir)" "$(DESTDIR)$(toolexeclibgocryptoopenpgpdir)" "$(DESTDIR)$(toolexeclibgodebugdir)" "$(DESTDIR)$(toolexeclibgoencodingdir)" "$(DESTDIR)$(toolexeclibgoexpdir)" "$(DESTDIR)$(toolexeclibgogodir)" "$(DESTDIR)$(toolexeclibgohashdir)" "$(DESTDIR)$(toolexeclibgohttpdir)" "$(DESTDIR)$(toolexeclibgoimagedir)" "$(DESTDIR)$(toolexeclibgoindexdir)" "$(DESTDIR)$(toolexeclibgoiodir)" "$(DESTDIR)$(toolexeclibgomimedir)" "$(DESTDIR)$(toolexeclibgonetdir)" "$(DESTDIR)$(toolexeclibgoosdir)" "$(DESTDIR)$(toolexeclibgopathdir)" "$(DESTDIR)$(toolexeclibgorpcdir)" "$(DESTDIR)$(toolexeclibgoruntimedir)" "$(DESTDIR)$(toolexeclibgosyncdir)" "$(DESTDIR)$(toolexeclibgotestingdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive @@ -3741,9 +3804,9 @@ install-exec-am: install-multi install-toolexeclibLIBRARIES \ install-toolexeclibgohttpDATA install-toolexeclibgoimageDATA \ install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \ install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \ - install-toolexeclibgoosDATA install-toolexeclibgorpcDATA \ - install-toolexeclibgoruntimeDATA install-toolexeclibgosyncDATA \ - install-toolexeclibgotestingDATA + install-toolexeclibgoosDATA install-toolexeclibgopathDATA \ + install-toolexeclibgorpcDATA install-toolexeclibgoruntimeDATA \ + install-toolexeclibgosyncDATA install-toolexeclibgotestingDATA install-html: install-html-recursive @@ -3800,7 +3863,8 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \ uninstall-toolexeclibgoimageDATA \ uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \ uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \ - uninstall-toolexeclibgoosDATA uninstall-toolexeclibgorpcDATA \ + uninstall-toolexeclibgoosDATA uninstall-toolexeclibgopathDATA \ + uninstall-toolexeclibgorpcDATA \ uninstall-toolexeclibgoruntimeDATA \ uninstall-toolexeclibgosyncDATA \ uninstall-toolexeclibgotestingDATA @@ -3836,15 +3900,15 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \ install-toolexeclibgohttpDATA install-toolexeclibgoimageDATA \ install-toolexeclibgoindexDATA install-toolexeclibgoioDATA \ install-toolexeclibgomimeDATA install-toolexeclibgonetDATA \ - install-toolexeclibgoosDATA install-toolexeclibgorpcDATA \ - install-toolexeclibgoruntimeDATA install-toolexeclibgosyncDATA \ - install-toolexeclibgotestingDATA installcheck installcheck-am \ - installdirs installdirs-am maintainer-clean \ - maintainer-clean-generic maintainer-clean-multi mostlyclean \ - mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ - mostlyclean-local mostlyclean-multi pdf pdf-am ps ps-am tags \ - tags-recursive uninstall uninstall-am \ - uninstall-toolexeclibLIBRARIES \ + install-toolexeclibgoosDATA install-toolexeclibgopathDATA \ + install-toolexeclibgorpcDATA install-toolexeclibgoruntimeDATA \ + install-toolexeclibgosyncDATA install-toolexeclibgotestingDATA \ + installcheck installcheck-am installdirs installdirs-am \ + maintainer-clean maintainer-clean-generic \ + maintainer-clean-multi mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool mostlyclean-local \ + mostlyclean-multi pdf pdf-am ps ps-am tags tags-recursive \ + uninstall uninstall-am uninstall-toolexeclibLIBRARIES \ uninstall-toolexeclibLTLIBRARIES uninstall-toolexeclibgoDATA \ uninstall-toolexeclibgoarchiveDATA \ uninstall-toolexeclibgocompressDATA \ @@ -3859,7 +3923,8 @@ uninstall-am: uninstall-toolexeclibLIBRARIES \ uninstall-toolexeclibgoimageDATA \ uninstall-toolexeclibgoindexDATA uninstall-toolexeclibgoioDATA \ uninstall-toolexeclibgomimeDATA uninstall-toolexeclibgonetDATA \ - uninstall-toolexeclibgoosDATA uninstall-toolexeclibgorpcDATA \ + uninstall-toolexeclibgoosDATA uninstall-toolexeclibgopathDATA \ + uninstall-toolexeclibgorpcDATA \ uninstall-toolexeclibgoruntimeDATA \ uninstall-toolexeclibgosyncDATA \ uninstall-toolexeclibgotestingDATA @@ -3918,7 +3983,7 @@ asn1/check: $(CHECK_DEPS) $(CHECK) .PHONY: asn1/check -big/big.lo: $(go_big_files) fmt.gox rand.gox strings.gox +big/big.lo: $(go_big_files) fmt.gox rand.gox strings.gox os.gox $(BUILDPACKAGE) big/check: $(CHECK_DEPS) $(CHECK) @@ -3983,9 +4048,9 @@ fmt/check: $(CHECK_DEPS) $(CHECK) .PHONY: fmt/check -gob/gob.lo: $(go_gob_files) bytes.gox fmt.gox io.gox math.gox os.gox \ - reflect.gox runtime.gox strings.gox sync.gox unicode.gox \ - utf8.gox +gob/gob.lo: $(go_gob_files) bufio.gox bytes.gox fmt.gox io.gox math.gox \ + os.gox reflect.gox runtime.gox strings.gox sync.gox \ + unicode.gox utf8.gox $(BUILDPACKAGE) gob/check: $(CHECK_DEPS) $(CHECK) @@ -4007,8 +4072,8 @@ html/check: $(CHECK_DEPS) http/http.lo: $(go_http_files) bufio.gox bytes.gox container/vector.gox \ crypto/rand.gox crypto/tls.gox encoding/base64.gox fmt.gox \ io.gox io/ioutil.gox log.gox mime.gox mime/multipart.gox \ - net.gox net/textproto.gox os.gox path.gox sort.gox \ - strconv.gox strings.gox sync.gox time.gox utf8.gox + net.gox net/textproto.gox os.gox path.gox path/filepath.gox \ + sort.gox strconv.gox strings.gox sync.gox time.gox utf8.gox $(BUILDPACKAGE) http/check: $(CHECK_DEPS) $(CHECK) @@ -4020,7 +4085,7 @@ image/check: $(CHECK_DEPS) $(CHECK) .PHONY: image/check -io/io.lo: $(go_io_files) os.gox runtime.gox sync.gox +io/io.lo: $(go_io_files) os.gox sync.gox $(BUILDPACKAGE) io/check: $(CHECK_DEPS) $(CHECK) @@ -4083,8 +4148,7 @@ patch/check: $(CHECK_DEPS) $(CHECK) .PHONY: patch/check -path/path.lo: $(go_path_files) io/ioutil.gox os.gox sort.gox strings.gox \ - utf8.gox +path/path.lo: $(go_path_files) os.gox strings.gox utf8.gox $(BUILDPACKAGE) path/check: $(CHECK_DEPS) $(CHECK) @@ -4185,7 +4249,7 @@ template/check: $(CHECK_DEPS) .PHONY: template/check testing/testing.lo: $(go_testing_files) flag.gox fmt.gox os.gox regexp.gox \ - runtime.gox time.gox + runtime.gox runtime/pprof.gox time.gox $(BUILDPACKAGE) testing/check: $(CHECK_DEPS) $(CHECK) @@ -4248,7 +4312,7 @@ archive/tar/check: $(CHECK_DEPS) archive/zip.lo: $(go_archive_zip_files) bufio.gox bytes.gox \ compress/flate.gox hash.gox hash/crc32.gox \ - encoding/binary.gox io.gox os.gox + encoding/binary.gox io.gox io/ioutil.gox os.gox $(BUILDPACKAGE) archive/zip/check: $(CHECK_DEPS) @$(MKDIR_P) archive/zip @@ -4363,6 +4427,14 @@ crypto/dsa/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/dsa/check +crypto/ecdsa.lo: $(go_crypto_ecdsa_files) big.gox crypto/elliptic.gox io.gox \ + os.gox + $(BUILDPACKAGE) +crypto/ecdsa/check: $(CHECK_DEPS) + @$(MKDIR_P) crypto/ecdsa + $(CHECK) +.PHONY: crypto/ecdsa/check + crypto/elliptic.lo: $(go_crypto_elliptic_files) big.gox io.gox os.gox sync.gox $(BUILDPACKAGE) crypto/elliptic/check: $(CHECK_DEPS) @@ -4400,8 +4472,8 @@ crypto/ocsp/check: $(CHECK_DEPS) $(CHECK) .PHONY: crypto/ocsp/check -crypto/openpgp.lo: $(go_crypto_openpgp_files) crypto.gox \ - crypto/openpgp/armor.gox crypto/openpgp/error.gox \ +crypto/openpgp.lo: $(go_crypto_openpgp_files) crypto.gox crypto/dsa.gox \ + crypto/openpgp/armor.gox crypto/openpgp/error.gox \ crypto/openpgp/packet.gox crypto/rsa.gox crypto/sha256.gox \ hash.gox io.gox os.gox strconv.gox time.gox $(BUILDPACKAGE) @@ -4523,10 +4595,10 @@ crypto/openpgp/error/check: $(CHECK_DEPS) crypto/openpgp/packet.lo: $(go_crypto_openpgp_packet_files) big.gox bytes.gox \ compress/flate.gox compress/zlib.gox crypto.gox \ crypto/aes.gox crypto/cast5.gox crypto/cipher.gox \ - crypto/openpgp/error.gox crypto/openpgp/s2k.gox \ - crypto/rand.gox crypto/rsa.gox crypto/sha1.gox \ - crypto/subtle.gox encoding/binary.gox hash.gox io.gox \ - io/ioutil.gox os.gox strconv.gox strings.gox + crypto/dsa.gox crypto/openpgp/error.gox \ + crypto/openpgp/s2k.gox crypto/rand.gox crypto/rsa.gox \ + crypto/sha1.gox crypto/subtle.gox encoding/binary.gox fmt.gox \ + hash.gox io.gox io/ioutil.gox os.gox strconv.gox strings.gox $(BUILDPACKAGE) crypto/openpgp/packet/check: $(CHECK_DEPS) @$(MKDIR_P) crypto/openpgp/packet @@ -4674,8 +4746,8 @@ exp/eval/check: $(CHECK_DEPS) $(CHECK) .PHONY: exp/eval/check -go/ast.lo: $(go_go_ast_files) fmt.gox go/token.gox io.gox os.gox reflect.gox \ - unicode.gox utf8.gox +go/ast.lo: $(go_go_ast_files) bytes.gox fmt.gox go/token.gox io.gox os.gox \ + reflect.gox unicode.gox utf8.gox $(BUILDPACKAGE) go/ast/check: $(CHECK_DEPS) @$(MKDIR_P) go/ast @@ -4692,7 +4764,7 @@ go/doc/check: $(CHECK_DEPS) go/parser.lo: $(go_go_parser_files) bytes.gox fmt.gox go/ast.gox \ go/scanner.gox go/token.gox io.gox io/ioutil.gox os.gox \ - path.gox strings.gox + path/filepath.gox strings.gox $(BUILDPACKAGE) go/parser/check: $(CHECK_DEPS) @$(MKDIR_P) go/parser @@ -4700,8 +4772,8 @@ go/parser/check: $(CHECK_DEPS) .PHONY: go/parser/check go/printer.lo: $(go_go_printer_files) bytes.gox fmt.gox go/ast.gox \ - go/token.gox io.gox os.gox reflect.gox runtime.gox \ - strings.gox tabwriter.gox + go/token.gox io.gox os.gox path/filepath.gox reflect.gox \ + runtime.gox strings.gox tabwriter.gox $(BUILDPACKAGE) go/printer/check: $(CHECK_DEPS) @$(MKDIR_P) go/printer @@ -4709,8 +4781,8 @@ go/printer/check: $(CHECK_DEPS) .PHONY: go/printer/check go/scanner.lo: $(go_go_scanner_files) bytes.gox container/vector.gox fmt.gox \ - go/token.gox io.gox os.gox path.gox sort.gox strconv.gox \ - unicode.gox utf8.gox + go/token.gox io.gox os.gox path/filepath.gox sort.gox \ + strconv.gox unicode.gox utf8.gox $(BUILDPACKAGE) go/scanner/check: $(CHECK_DEPS) @$(MKDIR_P) go/scanner @@ -4753,6 +4825,30 @@ hash/crc64/check: $(CHECK_DEPS) $(CHECK) .PHONY: hash/crc64/check +hash/fnv.lo: $(go_hash_fnv_files) encoding/binary.gox hash.gox os.gox + $(BUILDPACKAGE) +hash/fnv/check: $(CHECK_DEPS) + @$(MKDIR_P) hash/fnv + $(CHECK) +.PHONY: hash/fnv/check + +http/cgi.lo: $(go_http_cgi_files) bufio.gox bytes.gox encoding/line.gox \ + exec.gox fmt.gox http.gox io.gox io/ioutil.gox log.gox \ + os.gox path/filepath.gox regexp.gox strconv.gox strings.gox + $(BUILDPACKAGE) +http/cgi/check: $(CHECK_DEPS) + @$(MKDIR_P) http/cgi + $(CHECK) +.PHONY: http/cgi/check + +http/httptest.lo: $(go_http_httptest_files) bytes.gox fmt.gox http.gox \ + net.gox os.gox + $(BUILDPACKAGE) +http/httptest/check: $(CHECK_DEPS) + @$(MKDIR_P) http/httptest + $(CHECK) +.PHONY: http/httptest/check + http/pprof.lo: $(go_http_pprof_files) bufio.gox fmt.gox http.gox os.gox \ runtime.gox runtime/pprof.gox strconv.gox strings.gox $(BUILDPACKAGE) @@ -4784,8 +4880,8 @@ index/suffixarray/check: $(CHECK_DEPS) $(CHECK) .PHONY: index/suffixarray/check -io/ioutil.lo: $(go_io_ioutil_files) bytes.gox io.gox os.gox sort.gox \ - strconv.gox +io/ioutil.lo: $(go_io_ioutil_files) bytes.gox io.gox os.gox path/filepath.gox \ + sort.gox strconv.gox $(BUILDPACKAGE) io/ioutil/check: $(CHECK_DEPS) @$(MKDIR_P) io/ioutil @@ -4793,7 +4889,7 @@ io/ioutil/check: $(CHECK_DEPS) .PHONY: io/ioutil/check mime/multipart.lo: $(go_mime_multipart_files) bufio.gox bytes.gox io.gox \ - mime.gox os.gox regexp.gox strings.gox + mime.gox net/textproto.gox os.gox regexp.gox strings.gox $(BUILDPACKAGE) mime/multipart/check: $(CHECK_DEPS) @$(MKDIR_P) mime/multipart @@ -4831,6 +4927,14 @@ unix.go: $(srcdir)/go/os/signal/mkunix.sh sysinfo.go $(SHELL) $(srcdir)/go/os/signal/mkunix.sh sysinfo.go > $@.tmp mv -f $@.tmp $@ +path/filepath.lo: $(go_path_filepath_files) bytes.gox os.gox sort.gox \ + strings.gox utf8.gox + $(BUILDPACKAGE) +path/filepath/check: $(CHECK_DEPS) + @$(MKDIR_P) path/filepath + $(CHECK) +.PHONY: path/filepath/check + rpc/jsonrpc.lo: $(go_rpc_jsonrpc_files) fmt.gox io.gox json.gox net.gox \ os.gox rpc.gox sync.gox $(BUILDPACKAGE) @@ -4848,7 +4952,7 @@ runtime/debug/check: $(CHECK_DEPS) .PHONY: runtime/debug/check runtime/pprof.lo: $(go_runtime_pprof_files) bufio.gox fmt.gox io.gox os.gox \ - runtime.gox + runtime.gox sync.gox $(BUILDPACKAGE) runtime/pprof/check: $(CHECK_DEPS) @$(MKDIR_P) runtime/pprof @@ -5034,6 +5138,8 @@ crypto/cipher.gox: crypto/cipher.lo $(BUILDGOX) crypto/dsa.gox: crypto/dsa.lo $(BUILDGOX) +crypto/ecdsa.gox: crypto/ecdsa.lo + $(BUILDGOX) crypto/elliptic.gox: crypto/elliptic.lo $(BUILDGOX) crypto/hmac.gox: crypto/hmac.lo @@ -5138,7 +5244,13 @@ hash/crc32.gox: hash/crc32.lo $(BUILDGOX) hash/crc64.gox: hash/crc64.lo $(BUILDGOX) +hash/fnv.gox: hash/fnv.lo + $(BUILDGOX) +http/cgi.gox: http/cgi.lo + $(BUILDGOX) +http/httptest.gox: http/httptest.lo + $(BUILDGOX) http/pprof.gox: http/pprof.lo $(BUILDGOX) @@ -5166,6 +5278,9 @@ os/inotify.gox: os/inotify.lo os/signal.gox: os/signal.lo $(BUILDGOX) +path/filepath.gox: path/filepath.lo + $(BUILDGOX) + rpc/jsonrpc.gox: rpc/jsonrpc.lo $(BUILDGOX) diff --git a/libgo/go/archive/zip/reader.go b/libgo/go/archive/zip/reader.go index d8d9bba60bc..3b265c9b72e 100644 --- a/libgo/go/archive/zip/reader.go +++ b/libgo/go/archive/zip/reader.go @@ -19,6 +19,7 @@ import ( "hash/crc32" "encoding/binary" "io" + "io/ioutil" "os" ) @@ -109,7 +110,7 @@ func (f *File) Open() (rc io.ReadCloser, err os.Error) { r := io.NewSectionReader(f.zipr, off+f.bodyOffset, size) switch f.Method { case 0: // store (no compression) - rc = nopCloser{r} + rc = ioutil.NopCloser(r) case 8: // DEFLATE rc = flate.NewReader(r) default: @@ -147,12 +148,6 @@ func (r *checksumReader) Read(b []byte) (n int, err os.Error) { func (r *checksumReader) Close() os.Error { return r.rc.Close() } -type nopCloser struct { - io.Reader -} - -func (f nopCloser) Close() os.Error { return nil } - func readFileHeader(f *File, r io.Reader) (err os.Error) { defer func() { if rerr, ok := recover().(os.Error); ok { diff --git a/libgo/go/big/int.go b/libgo/go/big/int.go index 46e0087343a..ecd70e03ef1 100644 --- a/libgo/go/big/int.go +++ b/libgo/go/big/int.go @@ -8,6 +8,7 @@ package big import ( "fmt" + "os" "rand" ) @@ -393,62 +394,19 @@ func (z *Int) SetString(s string, base int) (*Int, bool) { } -// SetBytes interprets b as the bytes of a big-endian, unsigned integer and -// sets z to that value. -func (z *Int) SetBytes(b []byte) *Int { - const s = _S - z.abs = z.abs.make((len(b) + s - 1) / s) - - j := 0 - for len(b) >= s { - var w Word - - for i := s; i > 0; i-- { - w <<= 8 - w |= Word(b[len(b)-i]) - } - - z.abs[j] = w - j++ - b = b[0 : len(b)-s] - } - - if len(b) > 0 { - var w Word - - for i := len(b); i > 0; i-- { - w <<= 8 - w |= Word(b[len(b)-i]) - } - - z.abs[j] = w - } - - z.abs = z.abs.norm() +// SetBytes interprets buf as the bytes of a big-endian unsigned +// integer, sets z to that value, and returns z. +func (z *Int) SetBytes(buf []byte) *Int { + z.abs = z.abs.setBytes(buf) z.neg = false return z } -// Bytes returns the absolute value of x as a big-endian byte array. +// Bytes returns the absolute value of z as a big-endian byte slice. func (z *Int) Bytes() []byte { - const s = _S - b := make([]byte, len(z.abs)*s) - - for i, w := range z.abs { - wordBytes := b[(len(z.abs)-i-1)*s : (len(z.abs)-i)*s] - for j := s - 1; j >= 0; j-- { - wordBytes[j] = byte(w) - w >>= 8 - } - } - - i := 0 - for i < len(b) && b[i] == 0 { - i++ - } - - return b[i:] + buf := make([]byte, len(z.abs)*_S) + return buf[z.abs.bytes(buf):] } @@ -739,3 +697,34 @@ func (z *Int) Not(x *Int) *Int { z.neg = true // z cannot be zero if x is positive return z } + + +// Gob codec version. Permits backward-compatible changes to the encoding. +const version byte = 1 + +// GobEncode implements the gob.GobEncoder interface. +func (z *Int) GobEncode() ([]byte, os.Error) { + buf := make([]byte, len(z.abs)*_S+1) // extra byte for version and sign bit + i := z.abs.bytes(buf) - 1 // i >= 0 + b := version << 1 // make space for sign bit + if z.neg { + b |= 1 + } + buf[i] = b + return buf[i:], nil +} + + +// GobDecode implements the gob.GobDecoder interface. +func (z *Int) GobDecode(buf []byte) os.Error { + if len(buf) == 0 { + return os.NewError("Int.GobDecode: no data") + } + b := buf[0] + if b>>1 != version { + return os.NewError(fmt.Sprintf("Int.GobDecode: encoding version %d not supported", b>>1)) + } + z.neg = b&1 != 0 + z.abs = z.abs.setBytes(buf[1:]) + return nil +} diff --git a/libgo/go/big/int_test.go b/libgo/go/big/int_test.go index fc981e1da46..c0cc9accf1a 100644 --- a/libgo/go/big/int_test.go +++ b/libgo/go/big/int_test.go @@ -8,6 +8,7 @@ import ( "bytes" "encoding/hex" "fmt" + "gob" "testing" "testing/quick" ) @@ -1053,3 +1054,41 @@ func TestModInverse(t *testing.T) { } } } + + +var gobEncodingTests = []string{ + "0", + "1", + "2", + "10", + "42", + "1234567890", + "298472983472983471903246121093472394872319615612417471234712061", +} + +func TestGobEncoding(t *testing.T) { + var medium bytes.Buffer + enc := gob.NewEncoder(&medium) + dec := gob.NewDecoder(&medium) + for i, test := range gobEncodingTests { + for j := 0; j < 2; j++ { + medium.Reset() // empty buffer for each test case (in case of failures) + stest := test + if j == 0 { + stest = "-" + test + } + var tx Int + tx.SetString(stest, 10) + if err := enc.Encode(&tx); err != nil { + t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err) + } + var rx Int + if err := dec.Decode(&rx); err != nil { + t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err) + } + if rx.Cmp(&tx) != 0 { + t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx) + } + } + } +} diff --git a/libgo/go/big/nat.go b/libgo/go/big/nat.go index a308f69e8cd..a04d3b1d9c1 100644 --- a/libgo/go/big/nat.go +++ b/libgo/go/big/nat.go @@ -1065,3 +1065,50 @@ NextRandom: return true } + + +// bytes writes the value of z into buf using big-endian encoding. +// len(buf) must be >= len(z)*_S. The value of z is encoded in the +// slice buf[i:]. The number i of unused bytes at the beginning of +// buf is returned as result. +func (z nat) bytes(buf []byte) (i int) { + i = len(buf) + for _, d := range z { + for j := 0; j < _S; j++ { + i-- + buf[i] = byte(d) + d >>= 8 + } + } + + for i < len(buf) && buf[i] == 0 { + i++ + } + + return +} + + +// setBytes interprets buf as the bytes of a big-endian unsigned +// integer, sets z to that value, and returns z. +func (z nat) setBytes(buf []byte) nat { + z = z.make((len(buf) + _S - 1) / _S) + + k := 0 + s := uint(0) + var d Word + for i := len(buf); i > 0; i-- { + d |= Word(buf[i-1]) << s + if s += 8; s == _S*8 { + z[k] = d + k++ + s = 0 + d = 0 + } + } + if k < len(z) { + z[k] = d + } + + return z.norm() +} diff --git a/libgo/go/bufio/bufio_test.go b/libgo/go/bufio/bufio_test.go index 059ca6dd223..8028e04dcd9 100644 --- a/libgo/go/bufio/bufio_test.go +++ b/libgo/go/bufio/bufio_test.go @@ -2,9 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package bufio +package bufio_test import ( + . "bufio" "bytes" "fmt" "io" @@ -502,9 +503,8 @@ func TestWriteString(t *testing.T) { b.WriteString("7890") // easy after flush b.WriteString("abcdefghijklmnopqrstuvwxy") // hard b.WriteString("z") - b.Flush() - if b.err != nil { - t.Error("WriteString", b.err) + if err := b.Flush(); err != nil { + t.Error("WriteString", err) } s := "01234567890abcdefghijklmnopqrstuvwxyz" if string(buf.Bytes()) != s { diff --git a/libgo/go/compress/flate/deflate_test.go b/libgo/go/compress/flate/deflate_test.go index ff54164b2cc..ed5884a4b78 100644 --- a/libgo/go/compress/flate/deflate_test.go +++ b/libgo/go/compress/flate/deflate_test.go @@ -191,9 +191,16 @@ func testSync(t *testing.T, level int, input []byte, name string) { t.Errorf("testSync/%d: read wrong bytes: %x vs %x", i, input[lo:hi], out[:hi-lo]) return } - if i == 0 && buf.buf.Len() != 0 { - t.Errorf("testSync/%d (%d, %d, %s): extra data after %d", i, level, len(input), name, hi-lo) - } + // This test originally checked that after reading + // the first half of the input, there was nothing left + // in the read buffer (buf.buf.Len() != 0) but that is + // not necessarily the case: the write Flush may emit + // some extra framing bits that are not necessary + // to process to obtain the first half of the uncompressed + // data. The test ran correctly most of the time, because + // the background goroutine had usually read even + // those extra bits by now, but it's not a useful thing to + // check. buf.WriteMode() } buf.ReadMode() diff --git a/libgo/go/compress/lzw/reader_test.go b/libgo/go/compress/lzw/reader_test.go index 7795a4c1489..4b5dfaadea2 100644 --- a/libgo/go/compress/lzw/reader_test.go +++ b/libgo/go/compress/lzw/reader_test.go @@ -9,6 +9,7 @@ import ( "io" "io/ioutil" "os" + "runtime" "strconv" "strings" "testing" @@ -117,16 +118,34 @@ func (devNull) Write(p []byte) (int, os.Error) { return len(p), nil } -func BenchmarkDecoder(b *testing.B) { +func benchmarkDecoder(b *testing.B, n int) { b.StopTimer() + b.SetBytes(int64(n)) buf0, _ := ioutil.ReadFile("../testdata/e.txt") + buf0 = buf0[:10000] compressed := bytes.NewBuffer(nil) w := NewWriter(compressed, LSB, 8) - io.Copy(w, bytes.NewBuffer(buf0)) + for i := 0; i < n; i += len(buf0) { + io.Copy(w, bytes.NewBuffer(buf0)) + } w.Close() buf1 := compressed.Bytes() + buf0, compressed, w = nil, nil, nil + runtime.GC() b.StartTimer() for i := 0; i < b.N; i++ { io.Copy(devNull{}, NewReader(bytes.NewBuffer(buf1), LSB, 8)) } } + +func BenchmarkDecoder1e4(b *testing.B) { + benchmarkDecoder(b, 1e4) +} + +func BenchmarkDecoder1e5(b *testing.B) { + benchmarkDecoder(b, 1e5) +} + +func BenchmarkDecoder1e6(b *testing.B) { + benchmarkDecoder(b, 1e6) +} diff --git a/libgo/go/compress/lzw/writer_test.go b/libgo/go/compress/lzw/writer_test.go index 715b974aa1e..2e0a8de0a87 100644 --- a/libgo/go/compress/lzw/writer_test.go +++ b/libgo/go/compress/lzw/writer_test.go @@ -8,6 +8,7 @@ import ( "io" "io/ioutil" "os" + "runtime" "testing" ) @@ -99,13 +100,33 @@ func TestWriter(t *testing.T) { } } -func BenchmarkEncoder(b *testing.B) { +func benchmarkEncoder(b *testing.B, n int) { b.StopTimer() - buf, _ := ioutil.ReadFile("../testdata/e.txt") + b.SetBytes(int64(n)) + buf0, _ := ioutil.ReadFile("../testdata/e.txt") + buf0 = buf0[:10000] + buf1 := make([]byte, n) + for i := 0; i < n; i += len(buf0) { + copy(buf1[i:], buf0) + } + buf0 = nil + runtime.GC() b.StartTimer() for i := 0; i < b.N; i++ { w := NewWriter(devNull{}, LSB, 8) - w.Write(buf) + w.Write(buf1) w.Close() } } + +func BenchmarkEncoder1e4(b *testing.B) { + benchmarkEncoder(b, 1e4) +} + +func BenchmarkEncoder1e5(b *testing.B) { + benchmarkEncoder(b, 1e5) +} + +func BenchmarkEncoder1e6(b *testing.B) { + benchmarkEncoder(b, 1e6) +} diff --git a/libgo/go/crypto/ecdsa/ecdsa.go b/libgo/go/crypto/ecdsa/ecdsa.go new file mode 100644 index 00000000000..1f37849c5d5 --- /dev/null +++ b/libgo/go/crypto/ecdsa/ecdsa.go @@ -0,0 +1,149 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ecdsa implements the Elliptic Curve Digital Signature Algorithm, as +// defined in FIPS 186-3. +package ecdsa + +// References: +// [NSA]: Suite B implementor's guide to FIPS 186-3, +// http://www.nsa.gov/ia/_files/ecdsa.pdf +// [SECG]: SECG, SEC1 +// http://www.secg.org/download/aid-780/sec1-v2.pdf + +import ( + "big" + "crypto/elliptic" + "io" + "os" +) + +// PublicKey represents an ECDSA public key. +type PublicKey struct { + *elliptic.Curve + X, Y *big.Int +} + +// PrivateKey represents a ECDSA private key. +type PrivateKey struct { + PublicKey + D *big.Int +} + +var one = new(big.Int).SetInt64(1) + +// randFieldElement returns a random element of the field underlying the given +// curve using the procedure given in [NSA] A.2.1. +func randFieldElement(c *elliptic.Curve, rand io.Reader) (k *big.Int, err os.Error) { + b := make([]byte, c.BitSize/8+8) + _, err = rand.Read(b) + if err != nil { + return + } + + k = new(big.Int).SetBytes(b) + n := new(big.Int).Sub(c.N, one) + k.Mod(k, n) + k.Add(k, one) + return +} + +// GenerateKey generates a public&private key pair. +func GenerateKey(c *elliptic.Curve, rand io.Reader) (priv *PrivateKey, err os.Error) { + k, err := randFieldElement(c, rand) + if err != nil { + return + } + + priv = new(PrivateKey) + priv.PublicKey.Curve = c + priv.D = k + priv.PublicKey.X, priv.PublicKey.Y = c.ScalarBaseMult(k.Bytes()) + return +} + +// hashToInt converts a hash value to an integer. There is some disagreement +// about how this is done. [NSA] suggests that this is done in the obvious +// manner, but [SECG] truncates the hash to the bit-length of the curve order +// first. We follow [SECG] because that's what OpenSSL does. +func hashToInt(hash []byte, c *elliptic.Curve) *big.Int { + orderBits := c.N.BitLen() + orderBytes := (orderBits + 7) / 8 + if len(hash) > orderBytes { + hash = hash[:orderBytes] + } + + ret := new(big.Int).SetBytes(hash) + excess := orderBytes*8 - orderBits + if excess > 0 { + ret.Rsh(ret, uint(excess)) + } + return ret +} + +// Sign signs an arbitrary length hash (which should be the result of hashing a +// larger message) using the private key, priv. It returns the signature as a +// pair of integers. The security of the private key depends on the entropy of +// rand. +func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err os.Error) { + // See [NSA] 3.4.1 + c := priv.PublicKey.Curve + + var k, kInv *big.Int + for { + for { + k, err = randFieldElement(c, rand) + if err != nil { + r = nil + return + } + + kInv = new(big.Int).ModInverse(k, c.N) + r, _ = priv.Curve.ScalarBaseMult(k.Bytes()) + r.Mod(r, priv.Curve.N) + if r.Sign() != 0 { + break + } + } + + e := hashToInt(hash, c) + s = new(big.Int).Mul(priv.D, r) + s.Add(s, e) + s.Mul(s, kInv) + s.Mod(s, priv.PublicKey.Curve.N) + if s.Sign() != 0 { + break + } + } + + return +} + +// Verify verifies the signature in r, s of hash using the public key, pub. It +// returns true iff the signature is valid. +func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool { + // See [NSA] 3.4.2 + c := pub.Curve + + if r.Sign() == 0 || s.Sign() == 0 { + return false + } + if r.Cmp(c.N) >= 0 || s.Cmp(c.N) >= 0 { + return false + } + e := hashToInt(hash, c) + w := new(big.Int).ModInverse(s, c.N) + + u1 := e.Mul(e, w) + u2 := w.Mul(r, w) + + x1, y1 := c.ScalarBaseMult(u1.Bytes()) + x2, y2 := c.ScalarMult(pub.X, pub.Y, u2.Bytes()) + if x1.Cmp(x2) == 0 { + return false + } + x, _ := c.Add(x1, y1, x2, y2) + x.Mod(x, c.N) + return x.Cmp(r) == 0 +} diff --git a/libgo/go/crypto/ecdsa/ecdsa_test.go b/libgo/go/crypto/ecdsa/ecdsa_test.go new file mode 100644 index 00000000000..cc22b7a52f1 --- /dev/null +++ b/libgo/go/crypto/ecdsa/ecdsa_test.go @@ -0,0 +1,218 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ecdsa + +import ( + "big" + "crypto/elliptic" + "crypto/sha1" + "crypto/rand" + "encoding/hex" + "testing" +) + +func testKeyGeneration(t *testing.T, c *elliptic.Curve, tag string) { + priv, err := GenerateKey(c, rand.Reader) + if err != nil { + t.Errorf("%s: error: %s", tag, err) + return + } + if !c.IsOnCurve(priv.PublicKey.X, priv.PublicKey.Y) { + t.Errorf("%s: public key invalid", tag, err) + } +} + +func TestKeyGeneration(t *testing.T) { + testKeyGeneration(t, elliptic.P224(), "p224") + testKeyGeneration(t, elliptic.P256(), "p256") + testKeyGeneration(t, elliptic.P384(), "p384") + testKeyGeneration(t, elliptic.P521(), "p521") +} + +func testSignAndVerify(t *testing.T, c *elliptic.Curve, tag string) { + priv, _ := GenerateKey(c, rand.Reader) + + hashed := []byte("testing") + r, s, err := Sign(rand.Reader, priv, hashed) + if err != nil { + t.Errorf("%s: error signing: %s", tag, err) + return + } + + if !Verify(&priv.PublicKey, hashed, r, s) { + t.Errorf("%s: Verify failed", tag) + } + + hashed[0] ^= 0xff + if Verify(&priv.PublicKey, hashed, r, s) { + t.Errorf("%s: Verify always works!", tag) + } +} + +func TestSignAndVerify(t *testing.T) { + testSignAndVerify(t, elliptic.P224(), "p224") + testSignAndVerify(t, elliptic.P256(), "p256") + testSignAndVerify(t, elliptic.P384(), "p384") + testSignAndVerify(t, elliptic.P521(), "p521") +} + +func fromHex(s string) *big.Int { + r, ok := new(big.Int).SetString(s, 16) + if !ok { + panic("bad hex") + } + return r +} + +// These test vectors were taken from +// http://csrc.nist.gov/groups/STM/cavp/documents/dss/ecdsatestvectors.zip +var testVectors = []struct { + msg string + Qx, Qy string + r, s string + ok bool +}{ + { + "09626b45493672e48f3d1226a3aff3201960e577d33a7f72c7eb055302db8fe8ed61685dd036b554942a5737cd1512cdf811ee0c00e6dd2f08c69f08643be396e85dafda664801e772cdb7396868ac47b172245b41986aa2648cb77fbbfa562581be06651355a0c4b090f9d17d8f0ab6cced4e0c9d386cf465a516630f0231bd", + "9504b5b82d97a264d8b3735e0568decabc4b6ca275bc53cbadfc1c40", + "03426f80e477603b10dee670939623e3da91a94267fc4e51726009ed", + "81d3ac609f9575d742028dd496450a58a60eea2dcf8b9842994916e1", + "96a8c5f382c992e8f30ccce9af120b067ec1d74678fa8445232f75a5", + false, + }, + { + "96b2b6536f6df29be8567a72528aceeaccbaa66c66c534f3868ca9778b02faadb182e4ed34662e73b9d52ecbe9dc8e875fc05033c493108b380689ebf47e5b062e6a0cdb3dd34ce5fe347d92768d72f7b9b377c20aea927043b509c078ed2467d7113405d2ddd458811e6faf41c403a2a239240180f1430a6f4330df5d77de37", + "851e3100368a22478a0029353045ae40d1d8202ef4d6533cfdddafd8", + "205302ac69457dd345e86465afa72ee8c74ca97e2b0b999aec1f10c2", + "4450c2d38b697e990721aa2dbb56578d32b4f5aeb3b9072baa955ee0", + "e26d4b589166f7b4ba4b1c8fce823fa47aad22f8c9c396b8c6526e12", + false, + }, + { + "86778dbb4a068a01047a8d245d632f636c11d2ad350740b36fad90428b454ad0f120cb558d12ea5c8a23db595d87543d06d1ef489263d01ee529871eb68737efdb8ff85bc7787b61514bed85b7e01d6be209e0a4eb0db5c8df58a5c5bf706d76cb2bdf7800208639e05b89517155d11688236e6a47ed37d8e5a2b1e0adea338e", + "ad5bda09d319a717c1721acd6688d17020b31b47eef1edea57ceeffc", + "c8ce98e181770a7c9418c73c63d01494b8b80a41098c5ea50692c984", + "de5558c257ab4134e52c19d8db3b224a1899cbd08cc508ce8721d5e9", + "745db7af5a477e5046705c0a5eff1f52cb94a79d481f0c5a5e108ecd", + true, + }, + { + "4bc6ef1958556686dab1e39c3700054a304cbd8f5928603dcd97fafd1f29e69394679b638f71c9344ce6a535d104803d22119f57b5f9477e253817a52afa9bfbc9811d6cc8c8be6b6566c6ef48b439bbb532abe30627548c598867f3861ba0b154dc1c3deca06eb28df8efd28258554b5179883a36fbb1eecf4f93ee19d41e3d", + "cc5eea2edf964018bdc0504a3793e4d2145142caa09a72ac5fb8d3e8", + "a48d78ae5d08aa725342773975a00d4219cf7a8029bb8cf3c17c374a", + "67b861344b4e416d4094472faf4272f6d54a497177fbc5f9ef292836", + "1d54f3fcdad795bf3b23408ecbac3e1321d1d66f2e4e3d05f41f7020", + false, + }, + { + "bb658732acbf3147729959eb7318a2058308b2739ec58907dd5b11cfa3ecf69a1752b7b7d806fe00ec402d18f96039f0b78dbb90a59c4414fb33f1f4e02e4089de4122cd93df5263a95be4d7084e2126493892816e6a5b4ed123cb705bf930c8f67af0fb4514d5769232a9b008a803af225160ce63f675bd4872c4c97b146e5e", + "6234c936e27bf141fc7534bfc0a7eedc657f91308203f1dcbd642855", + "27983d87ca785ef4892c3591ef4a944b1deb125dd58bd351034a6f84", + "e94e05b42d01d0b965ffdd6c3a97a36a771e8ea71003de76c4ecb13f", + "1dc6464ffeefbd7872a081a5926e9fc3e66d123f1784340ba17737e9", + false, + }, + { + "7c00be9123bfa2c4290be1d8bc2942c7f897d9a5b7917e3aabd97ef1aab890f148400a89abd554d19bec9d8ed911ce57b22fbcf6d30ca2115f13ce0a3f569a23bad39ee645f624c49c60dcfc11e7d2be24de9c905596d8f23624d63dc46591d1f740e46f982bfae453f107e80db23545782be23ce43708245896fc54e1ee5c43", + "9f3f037282aaf14d4772edffff331bbdda845c3f65780498cde334f1", + "8308ee5a16e3bcb721b6bc30000a0419bc1aaedd761be7f658334066", + "6381d7804a8808e3c17901e4d283b89449096a8fba993388fa11dc54", + "8e858f6b5b253686a86b757bad23658cda53115ac565abca4e3d9f57", + false, + }, + { + "cffc122a44840dc705bb37130069921be313d8bde0b66201aebc48add028ca131914ef2e705d6bedd19dc6cf9459bbb0f27cdfe3c50483808ffcdaffbeaa5f062e097180f07a40ef4ab6ed03fe07ed6bcfb8afeb42c97eafa2e8a8df469de07317c5e1494c41547478eff4d8c7d9f0f484ad90fedf6e1c35ee68fa73f1691601", + "a03b88a10d930002c7b17ca6af2fd3e88fa000edf787dc594f8d4fd4", + "e0cf7acd6ddc758e64847fe4df9915ebda2f67cdd5ec979aa57421f5", + "387b84dcf37dc343c7d2c5beb82f0bf8bd894b395a7b894565d296c1", + "4adc12ce7d20a89ce3925e10491c731b15ddb3f339610857a21b53b4", + false, + }, + { + "26e0e0cafd85b43d16255908ccfd1f061c680df75aba3081246b337495783052ba06c60f4a486c1591a4048bae11b4d7fec4f161d80bdc9a7b79d23e44433ed625eab280521a37f23dd3e1bdc5c6a6cfaa026f3c45cf703e76dab57add93fe844dd4cda67dc3bddd01f9152579e49df60969b10f09ce9372fdd806b0c7301866", + "9a8983c42f2b5a87c37a00458b5970320d247f0c8a88536440173f7d", + "15e489ec6355351361900299088cfe8359f04fe0cab78dde952be80c", + "929a21baa173d438ec9f28d6a585a2f9abcfc0a4300898668e476dc0", + "59a853f046da8318de77ff43f26fe95a92ee296fa3f7e56ce086c872", + true, + }, + { + "1078eac124f48ae4f807e946971d0de3db3748dd349b14cca5c942560fb25401b2252744f18ad5e455d2d97ed5ae745f55ff509c6c8e64606afe17809affa855c4c4cdcaf6b69ab4846aa5624ed0687541aee6f2224d929685736c6a23906d974d3c257abce1a3fb8db5951b89ecb0cda92b5207d93f6618fd0f893c32cf6a6e", + "d6e55820bb62c2be97650302d59d667a411956138306bd566e5c3c2b", + "631ab0d64eaf28a71b9cbd27a7a88682a2167cee6251c44e3810894f", + "65af72bc7721eb71c2298a0eb4eed3cec96a737cc49125706308b129", + "bd5a987c78e2d51598dbd9c34a9035b0069c580edefdacee17ad892a", + false, + }, + { + "919deb1fdd831c23481dfdb2475dcbe325b04c34f82561ced3d2df0b3d749b36e255c4928973769d46de8b95f162b53cd666cad9ae145e7fcfba97919f703d864efc11eac5f260a5d920d780c52899e5d76f8fe66936ff82130761231f536e6a3d59792f784902c469aa897aabf9a0678f93446610d56d5e0981e4c8a563556b", + "269b455b1024eb92d860a420f143ac1286b8cce43031562ae7664574", + "baeb6ca274a77c44a0247e5eb12ca72bdd9a698b3f3ae69c9f1aaa57", + "cb4ec2160f04613eb0dfe4608486091a25eb12aa4dec1afe91cfb008", + "40b01d8cd06589481574f958b98ca08ade9d2a8fe31024375c01bb40", + false, + }, + { + "6e012361250dacf6166d2dd1aa7be544c3206a9d43464b3fcd90f3f8cf48d08ec099b59ba6fe7d9bdcfaf244120aed1695d8be32d1b1cd6f143982ab945d635fb48a7c76831c0460851a3d62b7209c30cd9c2abdbe3d2a5282a9fcde1a6f418dd23c409bc351896b9b34d7d3a1a63bbaf3d677e612d4a80fa14829386a64b33f", + "6d2d695efc6b43b13c14111f2109608f1020e3e03b5e21cfdbc82fcd", + "26a4859296b7e360b69cf40be7bd97ceaffa3d07743c8489fc47ca1b", + "9a8cb5f2fdc288b7183c5b32d8e546fc2ed1ca4285eeae00c8b572ad", + "8c623f357b5d0057b10cdb1a1593dab57cda7bdec9cf868157a79b97", + true, + }, + { + "bf6bd7356a52b234fe24d25557200971fc803836f6fec3cade9642b13a8e7af10ab48b749de76aada9d8927f9b12f75a2c383ca7358e2566c4bb4f156fce1fd4e87ef8c8d2b6b1bdd351460feb22cdca0437ac10ca5e0abbbce9834483af20e4835386f8b1c96daaa41554ceee56730aac04f23a5c765812efa746051f396566", + "14250131b2599939cf2d6bc491be80ddfe7ad9de644387ee67de2d40", + "b5dc473b5d014cd504022043c475d3f93c319a8bdcb7262d9e741803", + "4f21642f2201278a95339a80f75cc91f8321fcb3c9462562f6cbf145", + "452a5f816ea1f75dee4fd514fa91a0d6a43622981966c59a1b371ff8", + false, + }, + { + "0eb7f4032f90f0bd3cf9473d6d9525d264d14c031a10acd31a053443ed5fe919d5ac35e0be77813071b4062f0b5fdf58ad5f637b76b0b305aec18f82441b6e607b44cdf6e0e3c7c57f24e6fd565e39430af4a6b1d979821ed0175fa03e3125506847654d7e1ae904ce1190ae38dc5919e257bdac2db142a6e7cd4da6c2e83770", + "d1f342b7790a1667370a1840255ac5bbbdc66f0bc00ae977d99260ac", + "76416cabae2de9a1000b4646338b774baabfa3db4673790771220cdb", + "bc85e3fc143d19a7271b2f9e1c04b86146073f3fab4dda1c3b1f35ca", + "9a5c70ede3c48d5f43307a0c2a4871934424a3303b815df4bb0f128e", + false, + }, + { + "5cc25348a05d85e56d4b03cec450128727bc537c66ec3a9fb613c151033b5e86878632249cba83adcefc6c1e35dcd31702929c3b57871cda5c18d1cf8f9650a25b917efaed56032e43b6fc398509f0d2997306d8f26675f3a8683b79ce17128e006aa0903b39eeb2f1001be65de0520115e6f919de902b32c38d691a69c58c92", + "7e49a7abf16a792e4c7bbc4d251820a2abd22d9f2fc252a7bf59c9a6", + "44236a8fb4791c228c26637c28ae59503a2f450d4cfb0dc42aa843b9", + "084461b4050285a1a85b2113be76a17878d849e6bc489f4d84f15cd8", + "079b5bddcc4d45de8dbdfd39f69817c7e5afa454a894d03ee1eaaac3", + false, + }, + { + "1951533ce33afb58935e39e363d8497a8dd0442018fd96dff167b3b23d7206a3ee182a3194765df4768a3284e23b8696c199b4686e670d60c9d782f08794a4bccc05cffffbd1a12acd9eb1cfa01f7ebe124da66ecff4599ea7720c3be4bb7285daa1a86ebf53b042bd23208d468c1b3aa87381f8e1ad63e2b4c2ba5efcf05845", + "31945d12ebaf4d81f02be2b1768ed80784bf35cf5e2ff53438c11493", + "a62bebffac987e3b9d3ec451eb64c462cdf7b4aa0b1bbb131ceaa0a4", + "bc3c32b19e42b710bca5c6aaa128564da3ddb2726b25f33603d2af3c", + "ed1a719cc0c507edc5239d76fe50e2306c145ad252bd481da04180c0", + false, + }, +} + +func TestVectors(t *testing.T) { + sha := sha1.New() + + for i, test := range testVectors { + pub := PublicKey{ + Curve: elliptic.P224(), + X: fromHex(test.Qx), + Y: fromHex(test.Qy), + } + msg, _ := hex.DecodeString(test.msg) + sha.Reset() + sha.Write(msg) + hashed := sha.Sum() + r := fromHex(test.r) + s := fromHex(test.s) + if Verify(&pub, hashed, r, s) != test.ok { + t.Errorf("%d: bad result", i) + } + } +} diff --git a/libgo/go/crypto/elliptic/elliptic.go b/libgo/go/crypto/elliptic/elliptic.go index beac45ca074..2296e960777 100644 --- a/libgo/go/crypto/elliptic/elliptic.go +++ b/libgo/go/crypto/elliptic/elliptic.go @@ -24,6 +24,7 @@ import ( // See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html type Curve struct { P *big.Int // the order of the underlying field + N *big.Int // the order of the base point B *big.Int // the constant of the curve equation Gx, Gy *big.Int // (x,y) of the base point BitSize int // the size of the underlying field @@ -315,6 +316,7 @@ func initP224() { // See FIPS 186-3, section D.2.2 p224 = new(Curve) p224.P, _ = new(big.Int).SetString("26959946667150639794667015087019630673557916260026308143510066298881", 10) + p224.N, _ = new(big.Int).SetString("26959946667150639794667015087019625940457807714424391721682722368061", 10) p224.B, _ = new(big.Int).SetString("b4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4", 16) p224.Gx, _ = new(big.Int).SetString("b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21", 16) p224.Gy, _ = new(big.Int).SetString("bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34", 16) @@ -325,6 +327,7 @@ func initP256() { // See FIPS 186-3, section D.2.3 p256 = new(Curve) p256.P, _ = new(big.Int).SetString("115792089210356248762697446949407573530086143415290314195533631308867097853951", 10) + p256.N, _ = new(big.Int).SetString("115792089210356248762697446949407573529996955224135760342422259061068512044369", 10) p256.B, _ = new(big.Int).SetString("5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b", 16) p256.Gx, _ = new(big.Int).SetString("6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", 16) p256.Gy, _ = new(big.Int).SetString("4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5", 16) @@ -335,6 +338,7 @@ func initP384() { // See FIPS 186-3, section D.2.4 p384 = new(Curve) p384.P, _ = new(big.Int).SetString("39402006196394479212279040100143613805079739270465446667948293404245721771496870329047266088258938001861606973112319", 10) + p384.N, _ = new(big.Int).SetString("39402006196394479212279040100143613805079739270465446667946905279627659399113263569398956308152294913554433653942643", 10) p384.B, _ = new(big.Int).SetString("b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef", 16) p384.Gx, _ = new(big.Int).SetString("aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7", 16) p384.Gy, _ = new(big.Int).SetString("3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f", 16) @@ -345,6 +349,7 @@ func initP521() { // See FIPS 186-3, section D.2.5 p521 = new(Curve) p521.P, _ = new(big.Int).SetString("6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151", 10) + p521.N, _ = new(big.Int).SetString("6864797660130609714981900799081393217269435300143305409394463459185543183397655394245057746333217197532963996371363321113864768612440380340372808892707005449", 10) p521.B, _ = new(big.Int).SetString("051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00", 16) p521.Gx, _ = new(big.Int).SetString("c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66", 16) p521.Gy, _ = new(big.Int).SetString("11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650", 16) diff --git a/libgo/go/crypto/openpgp/packet/packet.go b/libgo/go/crypto/openpgp/packet/packet.go index 269603ba498..57ff3afbfc7 100644 --- a/libgo/go/crypto/openpgp/packet/packet.go +++ b/libgo/go/crypto/openpgp/packet/packet.go @@ -7,6 +7,7 @@ package packet import ( + "big" "crypto/aes" "crypto/cast5" "crypto/cipher" @@ -166,10 +167,10 @@ func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, return } -// serialiseHeader writes an OpenPGP packet header to w. See RFC 4880, section +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section // 4.2. -func serialiseHeader(w io.Writer, ptype packetType, length int) (err os.Error) { - var buf [5]byte +func serializeHeader(w io.Writer, ptype packetType, length int) (err os.Error) { + var buf [6]byte var n int buf[0] = 0x80 | 0x40 | byte(ptype) @@ -178,16 +179,16 @@ func serialiseHeader(w io.Writer, ptype packetType, length int) (err os.Error) { n = 2 } else if length < 8384 { length -= 192 - buf[1] = byte(length >> 8) + buf[1] = 192 + byte(length>>8) buf[2] = byte(length) n = 3 } else { - buf[0] = 255 - buf[1] = byte(length >> 24) - buf[2] = byte(length >> 16) - buf[3] = byte(length >> 8) - buf[4] = byte(length) - n = 5 + buf[1] = 255 + buf[2] = byte(length >> 24) + buf[3] = byte(length >> 16) + buf[4] = byte(length >> 8) + buf[5] = byte(length) + n = 6 } _, err = w.Write(buf[:n]) @@ -371,7 +372,7 @@ func (cipher CipherFunction) new(key []byte) (block cipher.Block) { // readMPI reads a big integer from r. The bit length returned is the bit // length that was specified in r. This is preserved so that the integer can be -// reserialised exactly. +// reserialized exactly. func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err os.Error) { var buf [2]byte _, err = readFull(r, buf[0:]) @@ -385,7 +386,7 @@ func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err os.Error) { return } -// writeMPI serialises a big integer to r. +// writeMPI serializes a big integer to w. func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err os.Error) { _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) if err == nil { @@ -393,3 +394,8 @@ func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err os.Error) { } return } + +// writeBig serializes a *big.Int to w. +func writeBig(w io.Writer, i *big.Int) os.Error { + return writeMPI(w, uint16(i.BitLen()), i.Bytes()) +} diff --git a/libgo/go/crypto/openpgp/packet/packet_test.go b/libgo/go/crypto/openpgp/packet/packet_test.go index 6789d2abc79..1a4692cd4f5 100644 --- a/libgo/go/crypto/openpgp/packet/packet_test.go +++ b/libgo/go/crypto/openpgp/packet/packet_test.go @@ -190,3 +190,23 @@ func TestReadHeader(t *testing.T) { } } } + +func TestSerializeHeader(t *testing.T) { + tag := packetTypePublicKey + lengths := []int{0, 1, 2, 64, 192, 193, 8000, 8384, 8385, 10000} + + for _, length := range lengths { + buf := bytes.NewBuffer(nil) + serializeHeader(buf, tag, length) + tag2, length2, _, err := readHeader(buf) + if err != nil { + t.Errorf("length %d, err: %s", length, err) + } + if tag2 != tag { + t.Errorf("length %d, tag incorrect (got %d, want %d)", length, tag2, tag) + } + if int(length2) != length { + t.Errorf("length %d, length incorrect (got %d)", length, length2) + } + } +} diff --git a/libgo/go/crypto/openpgp/packet/private_key.go b/libgo/go/crypto/openpgp/packet/private_key.go index b22891755e3..69448239029 100644 --- a/libgo/go/crypto/openpgp/packet/private_key.go +++ b/libgo/go/crypto/openpgp/packet/private_key.go @@ -8,6 +8,7 @@ import ( "big" "bytes" "crypto/cipher" + "crypto/dsa" "crypto/openpgp/error" "crypto/openpgp/s2k" "crypto/rsa" @@ -134,7 +135,16 @@ func (pk *PrivateKey) Decrypt(passphrase []byte) os.Error { } func (pk *PrivateKey) parsePrivateKey(data []byte) (err os.Error) { - // TODO(agl): support DSA and ECDSA private keys. + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err os.Error) { rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) rsaPriv := new(rsa.PrivateKey) rsaPriv.PublicKey = *rsaPub @@ -162,3 +172,22 @@ func (pk *PrivateKey) parsePrivateKey(data []byte) (err os.Error) { return nil } + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err os.Error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x, _, err := readMPI(buf) + if err != nil { + return + } + + dsaPriv.X = new(big.Int).SetBytes(x) + pk.PrivateKey = dsaPriv + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} diff --git a/libgo/go/crypto/openpgp/packet/public_key.go b/libgo/go/crypto/openpgp/packet/public_key.go index 8866bdaaa94..ebef481fb7f 100644 --- a/libgo/go/crypto/openpgp/packet/public_key.go +++ b/libgo/go/crypto/openpgp/packet/public_key.go @@ -11,6 +11,7 @@ import ( "crypto/rsa" "crypto/sha1" "encoding/binary" + "fmt" "hash" "io" "os" @@ -178,12 +179,6 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err os.E return error.InvalidArgumentError("public key cannot generate signatures") } - rsaPublicKey, ok := pk.PublicKey.(*rsa.PublicKey) - if !ok { - // TODO(agl): support DSA and ECDSA keys. - return error.UnsupportedError("non-RSA public key") - } - signed.Write(sig.HashSuffix) hashBytes := signed.Sum() @@ -191,11 +186,28 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err os.E return error.SignatureError("hash tag doesn't match") } - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.Signature) - if err != nil { - return error.SignatureError("RSA verification failure") + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return error.InvalidArgumentError("public key and signature use different algorithms") } - return nil + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature) + if err != nil { + return error.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + if !dsa.Verify(dsaPublicKey, hashBytes, sig.DSASigR, sig.DSASigS) { + return error.SignatureError("DSA verification failure") + } + return nil + default: + panic("shouldn't happen") + } + panic("unreachable") } // VerifyKeySignature returns nil iff sig is a valid signature, make by this @@ -239,9 +251,21 @@ func (pk *PublicKey) VerifyUserIdSignature(id string, sig *Signature) (err os.Er return pk.VerifySignature(h, sig) } +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + // A parsedMPI is used to store the contents of a big integer, along with the // bit length that was specified in the original input. This allows the MPI to -// be reserialised exactly. +// be reserialized exactly. type parsedMPI struct { bytes []byte bitLength uint16 diff --git a/libgo/go/crypto/openpgp/packet/public_key_test.go b/libgo/go/crypto/openpgp/packet/public_key_test.go index c015f64aec9..069388c14dc 100644 --- a/libgo/go/crypto/openpgp/packet/public_key_test.go +++ b/libgo/go/crypto/openpgp/packet/public_key_test.go @@ -16,9 +16,11 @@ var pubKeyTests = []struct { creationTime uint32 pubKeyAlgo PublicKeyAlgorithm keyId uint64 + keyIdString string + keyIdShort string }{ - {rsaPkDataHex, rsaFingerprintHex, 0x4d3c5c10, PubKeyAlgoRSA, 0xa34d7e18c20c31bb}, - {dsaPkDataHex, dsaFingerprintHex, 0x4d432f89, PubKeyAlgoDSA, 0x8e8fbe54062f19ed}, + {rsaPkDataHex, rsaFingerprintHex, 0x4d3c5c10, PubKeyAlgoRSA, 0xa34d7e18c20c31bb, "A34D7E18C20C31BB", "C20C31BB"}, + {dsaPkDataHex, dsaFingerprintHex, 0x4d432f89, PubKeyAlgoDSA, 0x8e8fbe54062f19ed, "8E8FBE54062F19ED", "062F19ED"}, } func TestPublicKeyRead(t *testing.T) { @@ -46,6 +48,12 @@ func TestPublicKeyRead(t *testing.T) { if pk.KeyId != test.keyId { t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId) } + if g, e := pk.KeyIdString(), test.keyIdString; g != e { + t.Errorf("#%d: bad KeyIdString got:%q want:%q", i, g, e) + } + if g, e := pk.KeyIdShortString(), test.keyIdShort; g != e { + t.Errorf("#%d: bad KeyIdShortString got:%q want:%q", i, g, e) + } } } diff --git a/libgo/go/crypto/openpgp/packet/signature.go b/libgo/go/crypto/openpgp/packet/signature.go index fd2518ab41e..719657e76ee 100644 --- a/libgo/go/crypto/openpgp/packet/signature.go +++ b/libgo/go/crypto/openpgp/packet/signature.go @@ -5,7 +5,9 @@ package packet import ( + "big" "crypto" + "crypto/dsa" "crypto/openpgp/error" "crypto/openpgp/s2k" "crypto/rand" @@ -29,7 +31,9 @@ type Signature struct { // of bad signed data. HashTag [2]byte CreationTime uint32 // Unix epoch time - Signature []byte + + RSASignature []byte + DSASigR, DSASigS *big.Int // The following are optional so are nil when not included in the // signature. @@ -66,7 +70,7 @@ func (sig *Signature) parse(r io.Reader) (err os.Error) { sig.SigType = SignatureType(buf[0]) sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: default: err = error.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) return @@ -122,8 +126,20 @@ func (sig *Signature) parse(r io.Reader) (err os.Error) { return } - // We have already checked that the public key algorithm is RSA. - sig.Signature, _, err = readMPI(r) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature, _, err = readMPI(r) + case PubKeyAlgoDSA: + var rBytes, sBytes []byte + rBytes, _, err = readMPI(r) + sig.DSASigR = new(big.Int).SetBytes(rBytes) + if err == nil { + sBytes, _, err = readMPI(r) + sig.DSASigS = new(big.Int).SetBytes(sBytes) + } + default: + panic("unreachable") + } return } @@ -316,8 +332,8 @@ func subpacketLengthLength(length int) int { return 5 } -// serialiseSubpacketLength marshals the given length into to. -func serialiseSubpacketLength(to []byte, length int) int { +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { if length < 192 { to[0] = byte(length) return 1 @@ -336,7 +352,7 @@ func serialiseSubpacketLength(to []byte, length int) int { return 5 } -// subpacketsLength returns the serialised length, in bytes, of the given +// subpacketsLength returns the serialized length, in bytes, of the given // subpackets. func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { for _, subpacket := range subpackets { @@ -349,11 +365,11 @@ func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { return } -// serialiseSubpackets marshals the given subpackets into to. -func serialiseSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { for _, subpacket := range subpackets { if subpacket.hashed == hashed { - n := serialiseSubpacketLength(to, len(subpacket.contents)+1) + n := serializeSubpacketLength(to, len(subpacket.contents)+1) to[n] = byte(subpacket.subpacketType) to = to[1+n:] n = copy(to, subpacket.contents) @@ -381,7 +397,7 @@ func (sig *Signature) buildHashSuffix() (err os.Error) { } sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) sig.HashSuffix[5] = byte(hashedSubpacketsLen) - serialiseSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) + serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) trailer := sig.HashSuffix[l:] trailer[0] = 4 trailer[1] = 0xff @@ -392,32 +408,66 @@ func (sig *Signature) buildHashSuffix() (err os.Error) { return } -// SignRSA signs a message with an RSA private key. The hash, h, must contain -// the hash of message to be signed and will be mutated by this function. -func (sig *Signature) SignRSA(h hash.Hash, priv *rsa.PrivateKey) (err os.Error) { +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err os.Error) { err = sig.buildHashSuffix() if err != nil { return } h.Write(sig.HashSuffix) - digest := h.Sum() + digest = h.Sum() copy(sig.HashTag[:], digest) - sig.Signature, err = rsa.SignPKCS1v15(rand.Reader, priv, sig.Hash, digest) return } -// Serialize marshals sig to w. SignRSA must have been called first. +// SignRSA signs a message with an RSA private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +func (sig *Signature) SignRSA(h hash.Hash, priv *rsa.PrivateKey) (err os.Error) { + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + sig.RSASignature, err = rsa.SignPKCS1v15(rand.Reader, priv, sig.Hash, digest) + return +} + +// SignDSA signs a message with a DSA private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +func (sig *Signature) SignDSA(h hash.Hash, priv *dsa.PrivateKey) (err os.Error) { + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + sig.DSASigR, sig.DSASigS, err = dsa.Sign(rand.Reader, priv, digest) + return +} + +// Serialize marshals sig to w. SignRSA or SignDSA must have been called first. func (sig *Signature) Serialize(w io.Writer) (err os.Error) { - if sig.Signature == nil { - return error.InvalidArgumentError("Signature: need to call SignRSA before Serialize") + if sig.RSASignature == nil && sig.DSASigR == nil { + return error.InvalidArgumentError("Signature: need to call SignRSA or SignDSA before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = len(sig.RSASignature) + case PubKeyAlgoDSA: + sigLength = 2 /* MPI length */ + sigLength += (sig.DSASigR.BitLen() + 7) / 8 + sigLength += 2 /* MPI length */ + sigLength += (sig.DSASigS.BitLen() + 7) / 8 + default: + panic("impossible") } unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) length := len(sig.HashSuffix) - 6 /* trailer not included */ + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + 2 /* length of signature MPI */ + len(sig.Signature) - err = serialiseHeader(w, packetTypeSignature, length) + 2 /* hash tag */ + 2 /* length of signature MPI */ + sigLength + err = serializeHeader(w, packetTypeSignature, length) if err != nil { return } @@ -430,7 +480,7 @@ func (sig *Signature) Serialize(w io.Writer) (err os.Error) { unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serialiseSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) _, err = w.Write(unhashedSubpackets) if err != nil { @@ -440,7 +490,19 @@ func (sig *Signature) Serialize(w io.Writer) (err os.Error) { if err != nil { return } - return writeMPI(w, 8*uint16(len(sig.Signature)), sig.Signature) + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + err = writeMPI(w, 8*uint16(len(sig.RSASignature)), sig.RSASignature) + case PubKeyAlgoDSA: + err = writeBig(w, sig.DSASigR) + if err == nil { + err = writeBig(w, sig.DSASigS) + } + default: + panic("impossible") + } + return } // outputSubpacket represents a subpacket to be marshaled. diff --git a/libgo/go/crypto/openpgp/read_test.go b/libgo/go/crypto/openpgp/read_test.go index 58199e13297..6218d9990dd 100644 --- a/libgo/go/crypto/openpgp/read_test.go +++ b/libgo/go/crypto/openpgp/read_test.go @@ -44,6 +44,17 @@ func TestReadPrivateKeyRing(t *testing.T) { } } +func TestReadDSAKey(t *testing.T) { + kring, err := ReadKeyRing(readerFromHex(dsaTestKeyHex)) + if err != nil { + t.Error(err) + return + } + if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0x0CCC0360 { + t.Errorf("bad parse: %#v", kring) + } +} + func TestGetKeyById(t *testing.T) { kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) @@ -192,7 +203,7 @@ func TestSymmetricallyEncrypted(t *testing.T) { } } -func testDetachedSignature(t *testing.T, kring KeyRing, signature io.Reader, sigInput, tag string) { +func testDetachedSignature(t *testing.T, kring KeyRing, signature io.Reader, sigInput, tag string, expectedSignerKeyId uint64) { signed := bytes.NewBufferString(sigInput) signer, err := CheckDetachedSignature(kring, signed, signature) if err != nil { @@ -203,7 +214,6 @@ func testDetachedSignature(t *testing.T, kring KeyRing, signature io.Reader, sig t.Errorf("%s: signer is nil", tag) return } - expectedSignerKeyId := uint64(0xa34d7e18c20c31bb) if signer.PrimaryKey.KeyId != expectedSignerKeyId { t.Errorf("%s: wrong signer got:%x want:%x", tag, signer.PrimaryKey.KeyId, expectedSignerKeyId) } @@ -211,10 +221,18 @@ func testDetachedSignature(t *testing.T, kring KeyRing, signature io.Reader, sig func TestDetachedSignature(t *testing.T) { kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) - testDetachedSignature(t, kring, readerFromHex(detachedSignatureHex), signedInput, "binary") - testDetachedSignature(t, kring, readerFromHex(detachedSignatureTextHex), signedInput, "text") + testDetachedSignature(t, kring, readerFromHex(detachedSignatureHex), signedInput, "binary", testKey1KeyId) + testDetachedSignature(t, kring, readerFromHex(detachedSignatureTextHex), signedInput, "text", testKey1KeyId) } +func TestDetachedSignatureDSA(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyHex)) + testDetachedSignature(t, kring, readerFromHex(detachedSignatureDSAHex), signedInput, "binary", testKey3KeyId) +} + +const testKey1KeyId = 0xA34D7E18C20C31BB +const testKey3KeyId = 0x338934250CCC0360 + const signedInput = "Signed message\nline 2\nline 3\n" const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" @@ -224,6 +242,8 @@ const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31 const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39" +const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" + const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" @@ -235,3 +255,7 @@ const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c const signedEncryptedMessageHex = "848c032a67d68660df41c70103ff5789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8d2c03b018bd210b1d3791e1aba74b0f1034e122ab72e760492c192383cf5e20b5628bd043272d63df9b923f147eb6091cd897553204832aba48fec54aa447547bb16305a1024713b90e77fd0065f1918271947549205af3c74891af22ee0b56cd29bfec6d6e351901cd4ab3ece7c486f1e32a792d4e474aed98ee84b3f591c7dff37b64e0ecd68fd036d517e412dcadf85840ce184ad7921ad446c4ee28db80447aea1ca8d4f574db4d4e37688158ddd19e14ee2eab4873d46947d65d14a23e788d912cf9a19624ca7352469b72a83866b7c23cb5ace3deab3c7018061b0ba0f39ed2befe27163e5083cf9b8271e3e3d52cc7ad6e2a3bd81d4c3d7022f8d" const symmetricallyEncryptedCompressedHex = "8c0d04030302eb4a03808145d0d260c92f714339e13de5a79881216431925bf67ee2898ea61815f07894cd0703c50d0a76ef64d482196f47a8bc729af9b80bb6" + +const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" diff --git a/libgo/go/crypto/openpgp/write.go b/libgo/go/crypto/openpgp/write.go index 1a2e2bf0407..ef7b11230a9 100644 --- a/libgo/go/crypto/openpgp/write.go +++ b/libgo/go/crypto/openpgp/write.go @@ -6,6 +6,7 @@ package openpgp import ( "crypto" + "crypto/dsa" "crypto/openpgp/armor" "crypto/openpgp/error" "crypto/openpgp/packet" @@ -39,7 +40,7 @@ func DetachSignText(w io.Writer, signer *Entity, message io.Reader) os.Error { // ArmoredDetachSignText signs message (after canonicalising the line endings) // with the private key from signer (which must already have been decrypted) // and writes an armored signature to w. -func SignTextDetachedArmored(w io.Writer, signer *Entity, message io.Reader) os.Error { +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader) os.Error { return armoredDetachSign(w, signer, message, packet.SigTypeText) } @@ -80,6 +81,9 @@ func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.S case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSASignOnly: priv := signer.PrivateKey.PrivateKey.(*rsa.PrivateKey) err = sig.SignRSA(h, priv) + case packet.PubKeyAlgoDSA: + priv := signer.PrivateKey.PrivateKey.(*dsa.PrivateKey) + err = sig.SignDSA(h, priv) default: err = error.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) } diff --git a/libgo/go/crypto/openpgp/write_test.go b/libgo/go/crypto/openpgp/write_test.go index 33e8809f224..42cd0d27f85 100644 --- a/libgo/go/crypto/openpgp/write_test.go +++ b/libgo/go/crypto/openpgp/write_test.go @@ -18,7 +18,7 @@ func TestSignDetached(t *testing.T) { t.Error(err) } - testDetachedSignature(t, kring, out, signedInput, "check") + testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId) } func TestSignTextDetached(t *testing.T) { @@ -30,5 +30,17 @@ func TestSignTextDetached(t *testing.T) { t.Error(err) } - testDetachedSignature(t, kring, out, signedInput, "check") + testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId) +} + +func TestSignDetachedDSA(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyPrivateHex)) + out := bytes.NewBuffer(nil) + message := bytes.NewBufferString(signedInput) + err := DetachSign(out, kring[0], message) + if err != nil { + t.Error(err) + } + + testDetachedSignature(t, kring, out, signedInput, "check", testKey3KeyId) } diff --git a/libgo/go/crypto/tls/common.go b/libgo/go/crypto/tls/common.go index 7135f3d0f71..81b5a07446e 100644 --- a/libgo/go/crypto/tls/common.go +++ b/libgo/go/crypto/tls/common.go @@ -7,6 +7,7 @@ package tls import ( "crypto/rand" "crypto/rsa" + "crypto/x509" "io" "io/ioutil" "sync" @@ -95,6 +96,9 @@ type ConnectionState struct { HandshakeComplete bool CipherSuite uint16 NegotiatedProtocol string + + // the certificate chain that was presented by the other side + PeerCertificates []*x509.Certificate } // A Config structure is used to configure a TLS client or server. After one diff --git a/libgo/go/crypto/tls/conn.go b/libgo/go/crypto/tls/conn.go index d203e8d5169..1e6fe60aec2 100644 --- a/libgo/go/crypto/tls/conn.go +++ b/libgo/go/crypto/tls/conn.go @@ -762,6 +762,7 @@ func (c *Conn) ConnectionState() ConnectionState { if c.handshakeComplete { state.NegotiatedProtocol = c.clientProtocol state.CipherSuite = c.cipherSuite + state.PeerCertificates = c.peerCertificates } return state @@ -776,15 +777,6 @@ func (c *Conn) OCSPResponse() []byte { return c.ocspResponse } -// PeerCertificates returns the certificate chain that was presented by the -// other side. -func (c *Conn) PeerCertificates() []*x509.Certificate { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - - return c.peerCertificates -} - // VerifyHostname checks that the peer certificate chain is valid for // connecting to host. If so, it returns nil; if not, it returns an os.Error // describing the problem. diff --git a/libgo/go/crypto/tls/generate_cert.go b/libgo/go/crypto/tls/generate_cert.go index 3e0c6393893..ee77f949fc0 100644 --- a/libgo/go/crypto/tls/generate_cert.go +++ b/libgo/go/crypto/tls/generate_cert.go @@ -25,7 +25,7 @@ func main() { priv, err := rsa.GenerateKey(rand.Reader, 1024) if err != nil { - log.Exitf("failed to generate private key: %s", err) + log.Fatalf("failed to generate private key: %s", err) return } @@ -46,13 +46,13 @@ func main() { derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) if err != nil { - log.Exitf("Failed to create certificate: %s", err) + log.Fatalf("Failed to create certificate: %s", err) return } certOut, err := os.Open("cert.pem", os.O_WRONLY|os.O_CREAT, 0644) if err != nil { - log.Exitf("failed to open cert.pem for writing: %s", err) + log.Fatalf("failed to open cert.pem for writing: %s", err) return } pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) diff --git a/libgo/go/debug/proc/proc_darwin.go b/libgo/go/debug/proc/proc_darwin.go index 7caf3a21a4f..49f0a5361fb 100644 --- a/libgo/go/debug/proc/proc_darwin.go +++ b/libgo/go/debug/proc/proc_darwin.go @@ -12,6 +12,6 @@ func Attach(pid int) (Process, os.Error) { return nil, os.NewError("debug/proc not implemented on OS X") } -func ForkExec(argv0 string, argv []string, envv []string, dir string, fd []*os.File) (Process, os.Error) { +func StartProcess(argv0 string, argv []string, attr *os.ProcAttr) (Process, os.Error) { return Attach(0) } diff --git a/libgo/go/debug/proc/proc_freebsd.go b/libgo/go/debug/proc/proc_freebsd.go index f6474ce80ca..4df07c365af 100644 --- a/libgo/go/debug/proc/proc_freebsd.go +++ b/libgo/go/debug/proc/proc_freebsd.go @@ -12,6 +12,6 @@ func Attach(pid int) (Process, os.Error) { return nil, os.NewError("debug/proc not implemented on FreeBSD") } -func ForkExec(argv0 string, argv []string, envv []string, dir string, fd []*os.File) (Process, os.Error) { +func StartProcess(argv0 string, argv []string, attr *os.ProcAttr) (Process, os.Error) { return Attach(0) } diff --git a/libgo/go/debug/proc/proc_linux.go b/libgo/go/debug/proc/proc_linux.go index f0cc43a108e..6890a2221ef 100644 --- a/libgo/go/debug/proc/proc_linux.go +++ b/libgo/go/debug/proc/proc_linux.go @@ -1279,25 +1279,31 @@ func Attach(pid int) (Process, os.Error) { return p, nil } -// ForkExec forks the current process and execs argv0, stopping the -// new process after the exec syscall. See os.ForkExec for additional +// StartProcess forks the current process and execs argv0, stopping the +// new process after the exec syscall. See os.StartProcess for additional // details. -func ForkExec(argv0 string, argv []string, envv []string, dir string, fd []*os.File) (Process, os.Error) { +func StartProcess(argv0 string, argv []string, attr *os.ProcAttr) (Process, os.Error) { + sysattr := &syscall.ProcAttr{ + Dir: attr.Dir, + Env: attr.Env, + Ptrace: true, + } p := newProcess(-1) // Create array of integer (system) fds. - intfd := make([]int, len(fd)) - for i, f := range fd { + intfd := make([]int, len(attr.Files)) + for i, f := range attr.Files { if f == nil { intfd[i] = -1 } else { intfd[i] = f.Fd() } } + sysattr.Files = intfd // Fork from the monitor thread so we get the right tracer pid. err := p.do(func() os.Error { - pid, errno := syscall.PtraceForkExec(argv0, argv, envv, dir, intfd) + pid, _, errno := syscall.StartProcess(argv0, argv, sysattr) if errno != 0 { return &os.PathError{"fork/exec", argv0, os.Errno(errno)} } diff --git a/libgo/go/debug/proc/proc_windows.go b/libgo/go/debug/proc/proc_windows.go index dc22faef81b..661474b67aa 100644 --- a/libgo/go/debug/proc/proc_windows.go +++ b/libgo/go/debug/proc/proc_windows.go @@ -12,6 +12,6 @@ func Attach(pid int) (Process, os.Error) { return nil, os.NewError("debug/proc not implemented on windows") } -func ForkExec(argv0 string, argv []string, envv []string, dir string, fd []*os.File) (Process, os.Error) { +func StartProcess(argv0 string, argv []string, attr *os.ProcAttr) (Process, os.Error) { return Attach(0) } diff --git a/libgo/go/exec/exec.go b/libgo/go/exec/exec.go index 80f6f3c7dd4..44e3b65bec8 100644 --- a/libgo/go/exec/exec.go +++ b/libgo/go/exec/exec.go @@ -75,17 +75,19 @@ func modeToFiles(mode, fd int) (*os.File, *os.File, os.Error) { // Run starts the named binary running with // arguments argv and environment envv. +// If the dir argument is not empty, the child changes +// into the directory before executing the binary. // It returns a pointer to a new Cmd representing // the command or an error. // -// The parameters stdin, stdout, and stderr +// The arguments stdin, stdout, and stderr // specify how to handle standard input, output, and error. // The choices are DevNull (connect to /dev/null), // PassThrough (connect to the current process's standard stream), // Pipe (connect to an operating system pipe), and // MergeWithStdout (only for standard error; use the same // file descriptor as was used for standard output). -// If a parameter is Pipe, then the corresponding field (Stdin, Stdout, Stderr) +// If an argument is Pipe, then the corresponding field (Stdin, Stdout, Stderr) // of the returned Cmd is the other end of the pipe. // Otherwise the field in Cmd is nil. func Run(name string, argv, envv []string, dir string, stdin, stdout, stderr int) (c *Cmd, err os.Error) { @@ -105,7 +107,7 @@ func Run(name string, argv, envv []string, dir string, stdin, stdout, stderr int } // Run command. - c.Process, err = os.StartProcess(name, argv, envv, dir, fd[0:]) + c.Process, err = os.StartProcess(name, argv, &os.ProcAttr{Dir: dir, Files: fd[:], Env: envv}) if err != nil { goto Error } diff --git a/libgo/go/exec/exec_test.go b/libgo/go/exec/exec_test.go index 3a3d3b1a53e..5e37b99eeca 100644 --- a/libgo/go/exec/exec_test.go +++ b/libgo/go/exec/exec_test.go @@ -118,3 +118,55 @@ func TestAddEnvVar(t *testing.T) { t.Fatal("close:", err) } } + +var tryargs = []string{ + `2`, + `2 `, + "2 \t", + `2" "`, + `2 ab `, + `2 "ab" `, + `2 \ `, + `2 \\ `, + `2 \" `, + `2 \`, + `2\`, + `2"`, + `2\"`, + `2 "`, + `2 \"`, + ``, + `2 ^ `, + `2 \^`, +} + +func TestArgs(t *testing.T) { + for _, a := range tryargs { + argv := []string{ + "awk", + `BEGIN{printf("%s|%s|%s",ARGV[1],ARGV[2],ARGV[3])}`, + "/dev/null", + a, + "EOF", + } + exe, err := LookPath(argv[0]) + if err != nil { + t.Fatal("run:", err) + } + cmd, err := Run(exe, argv, nil, "", DevNull, Pipe, DevNull) + if err != nil { + t.Fatal("run:", err) + } + buf, err := ioutil.ReadAll(cmd.Stdout) + if err != nil { + t.Fatal("read:", err) + } + expect := "/dev/null|" + a + "|EOF" + if string(buf) != expect { + t.Errorf("read: got %q expect %q", buf, expect) + } + if err = cmd.Close(); err != nil { + t.Fatal("close:", err) + } + } +} diff --git a/libgo/go/exp/eval/stmt.go b/libgo/go/exp/eval/stmt.go index 5c5d4338a1d..f6b7c1cda94 100644 --- a/libgo/go/exp/eval/stmt.go +++ b/libgo/go/exp/eval/stmt.go @@ -287,9 +287,6 @@ func (a *stmtCompiler) compile(s ast.Stmt) { case *ast.SwitchStmt: a.compileSwitchStmt(s) - case *ast.TypeCaseClause: - notimpl = true - case *ast.TypeSwitchStmt: notimpl = true @@ -1012,13 +1009,13 @@ func (a *stmtCompiler) compileSwitchStmt(s *ast.SwitchStmt) { a.diagAt(clause.Pos(), "switch statement must contain case clauses") continue } - if clause.Values == nil { + if clause.List == nil { if hasDefault { a.diagAt(clause.Pos(), "switch statement contains more than one default case") } hasDefault = true } else { - ncases += len(clause.Values) + ncases += len(clause.List) } } @@ -1030,7 +1027,7 @@ func (a *stmtCompiler) compileSwitchStmt(s *ast.SwitchStmt) { if !ok { continue } - for _, v := range clause.Values { + for _, v := range clause.List { e := condbc.compileExpr(condbc.block, false, v) switch { case e == nil: @@ -1077,8 +1074,8 @@ func (a *stmtCompiler) compileSwitchStmt(s *ast.SwitchStmt) { // Save jump PC's pc := a.nextPC() - if clause.Values != nil { - for _ = range clause.Values { + if clause.List != nil { + for _ = range clause.List { casePCs[i] = &pc i++ } diff --git a/libgo/go/exp/eval/stmt_test.go b/libgo/go/exp/eval/stmt_test.go index 4a883ef5ee7..a8a3e162041 100644 --- a/libgo/go/exp/eval/stmt_test.go +++ b/libgo/go/exp/eval/stmt_test.go @@ -27,7 +27,7 @@ var stmtTests = []test{ CErr("i, u := 1, 2", atLeastOneDecl), Val2("i, x := 2, f", "i", 2, "x", 1.0), // Various errors - CErr("1 := 2", "left side of := must be a name"), + CErr("1 := 2", "expected identifier"), CErr("c, a := 1, 1", "cannot assign"), // Unpacking Val2("x, y := oneTwo()", "x", 1, "y", 2), diff --git a/libgo/go/exp/ogle/cmd.go b/libgo/go/exp/ogle/cmd.go index 4f67032d0c3..9920ff6b883 100644 --- a/libgo/go/exp/ogle/cmd.go +++ b/libgo/go/exp/ogle/cmd.go @@ -160,7 +160,7 @@ func cmdLoad(args []byte) os.Error { } else { fname = parts[0] } - tproc, err = proc.ForkExec(fname, parts, os.Environ(), "", []*os.File{os.Stdin, os.Stdout, os.Stderr}) + tproc, err = proc.StartProcess(fname, parts, &os.ProcAttr{Files: []*os.File{os.Stdin, os.Stdout, os.Stderr}}) if err != nil { return err } diff --git a/libgo/go/expvar/expvar.go b/libgo/go/expvar/expvar.go index b1f0f6c1b81..ed6cff78db4 100644 --- a/libgo/go/expvar/expvar.go +++ b/libgo/go/expvar/expvar.go @@ -269,7 +269,7 @@ func Iter() <-chan KeyValue { } func expvarHandler(w http.ResponseWriter, r *http.Request) { - w.SetHeader("content-type", "application/json; charset=utf-8") + w.Header().Set("Content-Type", "application/json; charset=utf-8") fmt.Fprintf(w, "{\n") first := true for name, value := range vars { diff --git a/libgo/go/flag/flag.go b/libgo/go/flag/flag.go index be972057ed7..14f4d522c6a 100644 --- a/libgo/go/flag/flag.go +++ b/libgo/go/flag/flag.go @@ -56,7 +56,7 @@ flag.Bool(...) // global options flag.Parse() // parse leading command - subcmd := flag.Args(0) + subcmd := flag.Arg[0] switch subcmd { // add per-subcommand options } @@ -68,6 +68,7 @@ package flag import ( "fmt" "os" + "sort" "strconv" ) @@ -205,16 +206,34 @@ type allFlags struct { var flags *allFlags -// VisitAll visits the flags, calling fn for each. It visits all flags, even those not set. +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + list := make(sort.StringArray, len(flags)) + i := 0 + for _, f := range flags { + list[i] = f.Name + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[name] + } + return result +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. func VisitAll(fn func(*Flag)) { - for _, f := range flags.formal { + for _, f := range sortFlags(flags.formal) { fn(f) } } -// Visit visits the flags, calling fn for each. It visits only those flags that have been set. +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. func Visit(fn func(*Flag)) { - for _, f := range flags.actual { + for _, f := range sortFlags(flags.actual) { fn(f) } } @@ -260,7 +279,9 @@ var Usage = func() { var panicOnError = false -func fail() { +// failf prints to standard error a formatted error and Usage, and then exits the program. +func failf(format string, a ...interface{}) { + fmt.Fprintf(os.Stderr, format, a...) Usage() if panicOnError { panic("flag parse error") @@ -268,6 +289,7 @@ func fail() { os.Exit(2) } +// NFlag returns the number of flags that have been set. func NFlag() int { return len(flags.actual) } // Arg returns the i'th command-line argument. Arg(0) is the first remaining argument @@ -415,8 +437,7 @@ func (f *allFlags) parseOne() (ok bool) { } name := s[num_minuses:] if len(name) == 0 || name[0] == '-' || name[0] == '=' { - fmt.Fprintln(os.Stderr, "bad flag syntax:", s) - fail() + failf("bad flag syntax: %s\n", s) } // it's a flag. does it have an argument? @@ -434,14 +455,12 @@ func (f *allFlags) parseOne() (ok bool) { m := flags.formal flag, alreadythere := m[name] // BUG if !alreadythere { - fmt.Fprintf(os.Stderr, "flag provided but not defined: -%s\n", name) - fail() + failf("flag provided but not defined: -%s\n", name) } if fv, ok := flag.Value.(*boolValue); ok { // special case: doesn't need an arg if has_value { if !fv.Set(value) { - fmt.Fprintf(os.Stderr, "invalid boolean value %q for flag: -%s\n", value, name) - fail() + failf("invalid boolean value %q for flag: -%s\n", value, name) } } else { fv.Set("true") @@ -454,13 +473,11 @@ func (f *allFlags) parseOne() (ok bool) { value, f.args = f.args[0], f.args[1:] } if !has_value { - fmt.Fprintf(os.Stderr, "flag needs an argument: -%s\n", name) - fail() + failf("flag needs an argument: -%s\n", name) } ok = flag.Value.Set(value) if !ok { - fmt.Fprintf(os.Stderr, "invalid value %q for flag: -%s\n", value, name) - fail() + failf("invalid value %q for flag: -%s\n", value, name) } } flags.actual[name] = flag diff --git a/libgo/go/flag/flag_test.go b/libgo/go/flag/flag_test.go index 30a21e61ae2..1e47d12e48a 100644 --- a/libgo/go/flag/flag_test.go +++ b/libgo/go/flag/flag_test.go @@ -8,6 +8,7 @@ import ( . "flag" "fmt" "os" + "sort" "testing" ) @@ -77,6 +78,12 @@ func TestEverything(t *testing.T) { t.Log(k, *v) } } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } } func TestUsage(t *testing.T) { diff --git a/libgo/go/fmt/format.go b/libgo/go/fmt/format.go index 86057bf693c..caaa7ac1a8a 100644 --- a/libgo/go/fmt/format.go +++ b/libgo/go/fmt/format.go @@ -107,7 +107,7 @@ func (f *fmt) writePadding(n int, padding []byte) { } // Append b to f.buf, padded on left (w > 0) or right (w < 0 or f.minus) -// clear flags aftewards. +// clear flags afterwards. func (f *fmt) pad(b []byte) { var padding []byte var left, right int @@ -124,7 +124,7 @@ func (f *fmt) pad(b []byte) { } // append s to buf, padded on left (w > 0) or right (w < 0 or f.minus). -// clear flags aftewards. +// clear flags afterwards. func (f *fmt) padString(s string) { var padding []byte var left, right int diff --git a/libgo/go/fmt/scan.go b/libgo/go/fmt/scan.go index c0f2bacb69b..36271a8d466 100644 --- a/libgo/go/fmt/scan.go +++ b/libgo/go/fmt/scan.go @@ -35,10 +35,15 @@ type ScanState interface { ReadRune() (rune int, size int, err os.Error) // UnreadRune causes the next call to ReadRune to return the same rune. UnreadRune() os.Error - // Token returns the next space-delimited token from the input. If - // a width has been specified, the returned token will be no longer - // than the width. - Token() (token string, err os.Error) + // Token skips space in the input if skipSpace is true, then returns the + // run of Unicode code points c satisfying f(c). If f is nil, + // !unicode.IsSpace(c) is used; that is, the token will hold non-space + // characters. Newlines are treated as space unless the scan operation + // is Scanln, Fscanln or Sscanln, in which case a newline is treated as + // EOF. The returned slice points to shared data that may be overwritten + // by the next call to Token, a call to a Scan function using the ScanState + // as input, or when the calling Scan method returns. + Token(skipSpace bool, f func(int) bool) (token []byte, err os.Error) // Width returns the value of the width option and whether it has been set. // The unit is Unicode code points. Width() (wid int, ok bool) @@ -134,7 +139,7 @@ type scanError struct { err os.Error } -const EOF = -1 +const eof = -1 // ss is the internal implementation of ScanState. type ss struct { @@ -202,7 +207,7 @@ func (s *ss) getRune() (rune int) { rune, _, err := s.ReadRune() if err != nil { if err == os.EOF { - return EOF + return eof } s.error(err) } @@ -214,7 +219,7 @@ func (s *ss) getRune() (rune int) { // syntax error. func (s *ss) mustReadRune() (rune int) { rune = s.getRune() - if rune == EOF { + if rune == eof { s.error(io.ErrUnexpectedEOF) } return @@ -238,7 +243,7 @@ func (s *ss) errorString(err string) { panic(scanError{os.ErrorString(err)}) } -func (s *ss) Token() (tok string, err os.Error) { +func (s *ss) Token(skipSpace bool, f func(int) bool) (tok []byte, err os.Error) { defer func() { if e := recover(); e != nil { if se, ok := e.(scanError); ok { @@ -248,10 +253,19 @@ func (s *ss) Token() (tok string, err os.Error) { } } }() - tok = s.token() + if f == nil { + f = notSpace + } + s.buf.Reset() + tok = s.token(skipSpace, f) return } +// notSpace is the default scanning function used in Token. +func notSpace(r int) bool { + return !unicode.IsSpace(r) +} + // readRune is a structure to enable reading UTF-8 encoded code points // from an io.Reader. It is used if the Reader given to the scanner does // not already implement io.RuneReader. @@ -364,7 +378,7 @@ func (s *ss) free(old ssave) { func (s *ss) skipSpace(stopAtNewline bool) { for { rune := s.getRune() - if rune == EOF { + if rune == eof { return } if rune == '\n' { @@ -384,24 +398,27 @@ func (s *ss) skipSpace(stopAtNewline bool) { } } + // token returns the next space-delimited string from the input. It // skips white space. For Scanln, it stops at newlines. For Scan, // newlines are treated as spaces. -func (s *ss) token() string { - s.skipSpace(false) +func (s *ss) token(skipSpace bool, f func(int) bool) []byte { + if skipSpace { + s.skipSpace(false) + } // read until white space or newline for { rune := s.getRune() - if rune == EOF { + if rune == eof { break } - if unicode.IsSpace(rune) { + if !f(rune) { s.UnreadRune() break } s.buf.WriteRune(rune) } - return s.buf.String() + return s.buf.Bytes() } // typeError indicates that the type of the operand did not match the format @@ -416,7 +433,7 @@ var boolError = os.ErrorString("syntax error scanning boolean") // If accept is true, it puts the character into the input token. func (s *ss) consume(ok string, accept bool) bool { rune := s.getRune() - if rune == EOF { + if rune == eof { return false } if strings.IndexRune(ok, rune) >= 0 { @@ -425,7 +442,7 @@ func (s *ss) consume(ok string, accept bool) bool { } return true } - if rune != EOF && accept { + if rune != eof && accept { s.UnreadRune() } return false @@ -434,7 +451,7 @@ func (s *ss) consume(ok string, accept bool) bool { // peek reports whether the next character is in the ok string, without consuming it. func (s *ss) peek(ok string) bool { rune := s.getRune() - if rune != EOF { + if rune != eof { s.UnreadRune() } return strings.IndexRune(ok, rune) >= 0 @@ -729,7 +746,7 @@ func (s *ss) convertString(verb int) (str string) { case 'x': str = s.hexString() default: - str = s.token() // %s and %v just return the next word + str = string(s.token(true, notSpace)) // %s and %v just return the next word } // Empty strings other than with %q are not OK. if len(str) == 0 && verb != 'q' && s.maxWid > 0 { @@ -797,7 +814,7 @@ func (s *ss) hexDigit(digit int) int { // There must be either two hexadecimal digits or a space character in the input. func (s *ss) hexByte() (b byte, ok bool) { rune1 := s.getRune() - if rune1 == EOF { + if rune1 == eof { return } if unicode.IsSpace(rune1) { @@ -953,7 +970,7 @@ func (s *ss) doScan(a []interface{}) (numProcessed int, err os.Error) { if !s.nlIsSpace { for { rune := s.getRune() - if rune == '\n' || rune == EOF { + if rune == '\n' || rune == eof { break } if !unicode.IsSpace(rune) { @@ -993,7 +1010,7 @@ func (s *ss) advance(format string) (i int) { // There was space in the format, so there should be space (EOF) // in the input. inputc := s.getRune() - if inputc == EOF { + if inputc == eof { return } if !unicode.IsSpace(inputc) { diff --git a/libgo/go/fmt/scan_test.go b/libgo/go/fmt/scan_test.go index 65adb023686..8d2e6f5c64e 100644 --- a/libgo/go/fmt/scan_test.go +++ b/libgo/go/fmt/scan_test.go @@ -88,14 +88,15 @@ type FloatTest struct { type Xs string func (x *Xs) Scan(state ScanState, verb int) os.Error { - tok, err := state.Token() + tok, err := state.Token(true, func(r int) bool { return r == verb }) if err != nil { return err } - if !regexp.MustCompile("^" + string(verb) + "+$").MatchString(tok) { + s := string(tok) + if !regexp.MustCompile("^" + string(verb) + "+$").MatchString(s) { return os.ErrorString("syntax error for xs") } - *x = Xs(tok) + *x = Xs(s) return nil } @@ -113,9 +114,11 @@ func (s *IntString) Scan(state ScanState, verb int) os.Error { return err } - if _, err := Fscan(state, &s.s); err != nil { + tok, err := state.Token(true, nil) + if err != nil { return err } + s.s = string(tok) return nil } @@ -331,7 +334,7 @@ var multiTests = []ScanfMultiTest{ {"%c%c%c", "2\u50c2X", args(&i, &j, &k), args('2', '\u50c2', 'X'), ""}, // Custom scanners. - {"%2e%f", "eefffff", args(&x, &y), args(Xs("ee"), Xs("fffff")), ""}, + {"%e%f", "eefffff", args(&x, &y), args(Xs("ee"), Xs("fffff")), ""}, {"%4v%s", "12abcd", args(&z, &s), args(IntString{12, "ab"}, "cd"), ""}, // Errors @@ -476,22 +479,12 @@ func verifyInf(str string, t *testing.T) { } } - func TestInf(t *testing.T) { for _, s := range []string{"inf", "+inf", "-inf", "INF", "-INF", "+INF", "Inf", "-Inf", "+Inf"} { verifyInf(s, t) } } -// TODO: there's no conversion from []T to ...T, but we can fake it. These -// functions do the faking. We index the table by the length of the param list. -var fscanf = []func(io.Reader, string, []interface{}) (int, os.Error){ - 0: func(r io.Reader, f string, i []interface{}) (int, os.Error) { return Fscanf(r, f) }, - 1: func(r io.Reader, f string, i []interface{}) (int, os.Error) { return Fscanf(r, f, i[0]) }, - 2: func(r io.Reader, f string, i []interface{}) (int, os.Error) { return Fscanf(r, f, i[0], i[1]) }, - 3: func(r io.Reader, f string, i []interface{}) (int, os.Error) { return Fscanf(r, f, i[0], i[1], i[2]) }, -} - func testScanfMulti(name string, t *testing.T) { sliceType := reflect.Typeof(make([]interface{}, 1)).(*reflect.SliceType) for _, test := range multiTests { @@ -501,7 +494,7 @@ func testScanfMulti(name string, t *testing.T) { } else { r = newReader(test.text) } - n, err := fscanf[len(test.in)](r, test.format, test.in) + n, err := Fscanf(r, test.format, test.in...) if err != nil { if test.err == "" { t.Errorf("got error scanning (%q, %q): %q", test.format, test.text, err) @@ -830,12 +823,12 @@ func testScanInts(t *testing.T, scan func(*RecursiveInt, *bytes.Buffer) os.Error i := 1 for ; r != nil; r = r.next { if r.i != i { - t.Fatal("bad scan: expected %d got %d", i, r.i) + t.Fatalf("bad scan: expected %d got %d", i, r.i) } i++ } if i-1 != intCount { - t.Fatal("bad scan count: expected %d got %d", intCount, i-1) + t.Fatalf("bad scan count: expected %d got %d", intCount, i-1) } } diff --git a/libgo/go/go/ast/ast.go b/libgo/go/go/ast/ast.go index abafb5663b3..4a4c12b7c0a 100644 --- a/libgo/go/go/ast/ast.go +++ b/libgo/go/go/ast/ast.go @@ -602,12 +602,12 @@ type ( Else Stmt // else branch; or nil } - // A CaseClause represents a case of an expression switch statement. + // A CaseClause represents a case of an expression or type switch statement. CaseClause struct { - Case token.Pos // position of "case" or "default" keyword - Values []Expr // nil means default case - Colon token.Pos // position of ":" - Body []Stmt // statement list; or nil + Case token.Pos // position of "case" or "default" keyword + List []Expr // list of expressions or types; nil means default case + Colon token.Pos // position of ":" + Body []Stmt // statement list; or nil } // A SwitchStmt node represents an expression switch statement. @@ -618,20 +618,12 @@ type ( Body *BlockStmt // CaseClauses only } - // A TypeCaseClause represents a case of a type switch statement. - TypeCaseClause struct { - Case token.Pos // position of "case" or "default" keyword - Types []Expr // nil means default case - Colon token.Pos // position of ":" - Body []Stmt // statement list; or nil - } - // An TypeSwitchStmt node represents a type switch statement. TypeSwitchStmt struct { Switch token.Pos // position of "switch" keyword Init Stmt // initalization statement; or nil - Assign Stmt // x := y.(type) - Body *BlockStmt // TypeCaseClauses only + Assign Stmt // x := y.(type) or y.(type) + Body *BlockStmt // CaseClauses only } // A CommClause node represents a case of a select statement. @@ -687,7 +679,6 @@ func (s *BlockStmt) Pos() token.Pos { return s.Lbrace } func (s *IfStmt) Pos() token.Pos { return s.If } func (s *CaseClause) Pos() token.Pos { return s.Case } func (s *SwitchStmt) Pos() token.Pos { return s.Switch } -func (s *TypeCaseClause) Pos() token.Pos { return s.Case } func (s *TypeSwitchStmt) Pos() token.Pos { return s.Switch } func (s *CommClause) Pos() token.Pos { return s.Case } func (s *SelectStmt) Pos() token.Pos { return s.Select } @@ -734,13 +725,7 @@ func (s *CaseClause) End() token.Pos { } return s.Colon + 1 } -func (s *SwitchStmt) End() token.Pos { return s.Body.End() } -func (s *TypeCaseClause) End() token.Pos { - if n := len(s.Body); n > 0 { - return s.Body[n-1].End() - } - return s.Colon + 1 -} +func (s *SwitchStmt) End() token.Pos { return s.Body.End() } func (s *TypeSwitchStmt) End() token.Pos { return s.Body.End() } func (s *CommClause) End() token.Pos { if n := len(s.Body); n > 0 { @@ -772,7 +757,6 @@ func (s *BlockStmt) stmtNode() {} func (s *IfStmt) stmtNode() {} func (s *CaseClause) stmtNode() {} func (s *SwitchStmt) stmtNode() {} -func (s *TypeCaseClause) stmtNode() {} func (s *TypeSwitchStmt) stmtNode() {} func (s *CommClause) stmtNode() {} func (s *SelectStmt) stmtNode() {} @@ -937,11 +921,13 @@ func (d *FuncDecl) declNode() {} // via Doc and Comment fields. // type File struct { - Doc *CommentGroup // associated documentation; or nil - Package token.Pos // position of "package" keyword - Name *Ident // package name - Decls []Decl // top-level declarations; or nil - Comments []*CommentGroup // list of all comments in the source file + Doc *CommentGroup // associated documentation; or nil + Package token.Pos // position of "package" keyword + Name *Ident // package name + Decls []Decl // top-level declarations; or nil + Scope *Scope // package scope + Unresolved []*Ident // unresolved global identifiers + Comments []*CommentGroup // list of all comments in the source file } @@ -959,7 +945,7 @@ func (f *File) End() token.Pos { // type Package struct { Name string // package name - Scope *Scope // package scope; or nil + Scope *Scope // package scope Files map[string]*File // Go source files by filename } diff --git a/libgo/go/go/ast/filter.go b/libgo/go/go/ast/filter.go index 0c3cef4b27b..4da487ce02a 100644 --- a/libgo/go/go/ast/filter.go +++ b/libgo/go/go/ast/filter.go @@ -425,5 +425,6 @@ func MergePackageFiles(pkg *Package, mode MergeMode) *File { } } - return &File{doc, pos, NewIdent(pkg.Name), decls, comments} + // TODO(gri) need to compute pkgScope and unresolved identifiers! + return &File{doc, pos, NewIdent(pkg.Name), decls, nil, nil, comments} } diff --git a/libgo/go/go/ast/print.go b/libgo/go/go/ast/print.go index d71490d4a9f..82c334ece67 100644 --- a/libgo/go/go/ast/print.go +++ b/libgo/go/go/ast/print.go @@ -30,15 +30,19 @@ func NotNilFilter(_ string, value reflect.Value) bool { // Fprint prints the (sub-)tree starting at AST node x to w. +// If fset != nil, position information is interpreted relative +// to that file set. Otherwise positions are printed as integer +// values (file set specific offsets). // // A non-nil FieldFilter f may be provided to control the output: // struct fields for which f(fieldname, fieldvalue) is true are // are printed; all others are filtered from the output. // -func Fprint(w io.Writer, x interface{}, f FieldFilter) (n int, err os.Error) { +func Fprint(w io.Writer, fset *token.FileSet, x interface{}, f FieldFilter) (n int, err os.Error) { // setup printer p := printer{ output: w, + fset: fset, filter: f, ptrmap: make(map[interface{}]int), last: '\n', // force printing of line number on first line @@ -65,14 +69,15 @@ func Fprint(w io.Writer, x interface{}, f FieldFilter) (n int, err os.Error) { // Print prints x to standard output, skipping nil fields. -// Print(x) is the same as Fprint(os.Stdout, x, NotNilFilter). -func Print(x interface{}) (int, os.Error) { - return Fprint(os.Stdout, x, NotNilFilter) +// Print(fset, x) is the same as Fprint(os.Stdout, fset, x, NotNilFilter). +func Print(fset *token.FileSet, x interface{}) (int, os.Error) { + return Fprint(os.Stdout, fset, x, NotNilFilter) } type printer struct { output io.Writer + fset *token.FileSet filter FieldFilter ptrmap map[interface{}]int // *reflect.PtrValue -> line number written int // number of bytes written to output @@ -137,16 +142,6 @@ func (p *printer) printf(format string, args ...interface{}) { // probably be in a different package. func (p *printer) print(x reflect.Value) { - // Note: This test is only needed because AST nodes - // embed a token.Position, and thus all of them - // understand the String() method (but it only - // applies to the Position field). - // TODO: Should reconsider this AST design decision. - if pos, ok := x.Interface().(token.Position); ok { - p.printf("%s", pos) - return - } - if !NotNilFilter("", x) { p.printf("nil") return @@ -163,6 +158,7 @@ func (p *printer) print(x reflect.Value) { p.print(key) p.printf(": ") p.print(v.Elem(key)) + p.printf("\n") } p.indent-- p.printf("}") @@ -212,6 +208,11 @@ func (p *printer) print(x reflect.Value) { p.printf("}") default: - p.printf("%v", x.Interface()) + value := x.Interface() + // position values can be printed nicely if we have a file set + if pos, ok := value.(token.Pos); ok && p.fset != nil { + value = p.fset.Position(pos) + } + p.printf("%v", value) } } diff --git a/libgo/go/go/ast/scope.go b/libgo/go/go/ast/scope.go index 956a208aede..91866dcf57b 100644 --- a/libgo/go/go/ast/scope.go +++ b/libgo/go/go/ast/scope.go @@ -2,31 +2,31 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file implements scopes, the objects they contain, -// and object types. +// This file implements scopes and the objects they contain. package ast +import ( + "bytes" + "fmt" + "go/token" +) + + // A Scope maintains the set of named language entities declared // in the scope and a link to the immediately surrounding (outer) // scope. // type Scope struct { Outer *Scope - Objects []*Object // in declaration order - // Implementation note: In some cases (struct fields, - // function parameters) we need the source order of - // variables. Thus for now, we store scope entries - // in a linear list. If scopes become very large - // (say, for packages), we may need to change this - // to avoid slow lookups. + Objects map[string]*Object } // NewScope creates a new scope nested in the outer scope. func NewScope(outer *Scope) *Scope { - const n = 4 // initial scope capacity, must be > 0 - return &Scope{outer, make([]*Object, 0, n)} + const n = 4 // initial scope capacity + return &Scope{outer, make(map[string]*Object, n)} } @@ -34,73 +34,108 @@ func NewScope(outer *Scope) *Scope { // found in scope s, otherwise it returns nil. Outer scopes // are ignored. // -// Lookup always returns nil if name is "_", even if the scope -// contains objects with that name. -// func (s *Scope) Lookup(name string) *Object { - if name != "_" { - for _, obj := range s.Objects { - if obj.Name == name { - return obj - } - } - } - return nil + return s.Objects[name] } // Insert attempts to insert a named object into the scope s. -// If the scope does not contain an object with that name yet -// or if the object is named "_", Insert inserts the object -// and returns it. Otherwise, Insert leaves the scope unchanged -// and returns the object found in the scope instead. +// If the scope does not contain an object with that name yet, +// Insert inserts the object and returns it. Otherwise, Insert +// leaves the scope unchanged and returns the object found in +// the scope instead. // -func (s *Scope) Insert(obj *Object) *Object { - alt := s.Lookup(obj.Name) - if alt == nil { - s.append(obj) +func (s *Scope) Insert(obj *Object) (alt *Object) { + if alt = s.Objects[obj.Name]; alt == nil { + s.Objects[obj.Name] = obj alt = obj } - return alt + return } -func (s *Scope) append(obj *Object) { - s.Objects = append(s.Objects, obj) +// Debugging support +func (s *Scope) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "scope %p {", s) + if s != nil && len(s.Objects) > 0 { + fmt.Fprintln(&buf) + for _, obj := range s.Objects { + fmt.Fprintf(&buf, "\t%s %s\n", obj.Kind, obj.Name) + } + } + fmt.Fprintf(&buf, "}\n") + return buf.String() } + // ---------------------------------------------------------------------------- // Objects -// An Object describes a language entity such as a package, -// constant, type, variable, or function (incl. methods). +// An Object describes a named language entity such as a package, +// constant, type, variable, function (incl. methods), or label. // type Object struct { - Kind Kind - Name string // declared name - Type *Type - Decl interface{} // corresponding Field, XxxSpec or FuncDecl - N int // value of iota for this declaration + Kind ObjKind + Name string // declared name + Decl interface{} // corresponding Field, XxxSpec, FuncDecl, or LabeledStmt; or nil + Type interface{} // place holder for type information; may be nil } // NewObj creates a new object of a given kind and name. -func NewObj(kind Kind, name string) *Object { +func NewObj(kind ObjKind, name string) *Object { return &Object{Kind: kind, Name: name} } -// Kind describes what an object represents. -type Kind int +// Pos computes the source position of the declaration of an object name. +// The result may be an invalid position if it cannot be computed +// (obj.Decl may be nil or not correct). +func (obj *Object) Pos() token.Pos { + name := obj.Name + switch d := obj.Decl.(type) { + case *Field: + for _, n := range d.Names { + if n.Name == name { + return n.Pos() + } + } + case *ValueSpec: + for _, n := range d.Names { + if n.Name == name { + return n.Pos() + } + } + case *TypeSpec: + if d.Name.Name == name { + return d.Name.Pos() + } + case *FuncDecl: + if d.Name.Name == name { + return d.Name.Pos() + } + case *LabeledStmt: + if d.Label.Name == name { + return d.Label.Pos() + } + } + return token.NoPos +} + + +// ObKind describes what an object represents. +type ObjKind int // The list of possible Object kinds. const ( - Bad Kind = iota // for error handling - Pkg // package - Con // constant - Typ // type - Var // variable - Fun // function or method + Bad ObjKind = iota // for error handling + Pkg // package + Con // constant + Typ // type + Var // variable + Fun // function or method + Lbl // label ) @@ -111,132 +146,8 @@ var objKindStrings = [...]string{ Typ: "type", Var: "var", Fun: "func", + Lbl: "label", } -func (kind Kind) String() string { return objKindStrings[kind] } - - -// IsExported returns whether obj is exported. -func (obj *Object) IsExported() bool { return IsExported(obj.Name) } - - -// ---------------------------------------------------------------------------- -// Types - -// A Type represents a Go type. -type Type struct { - Form Form - Obj *Object // corresponding type name, or nil - Scope *Scope // fields and methods, always present - N uint // basic type id, array length, number of function results, or channel direction - Key, Elt *Type // map key and array, pointer, slice, map or channel element - Params *Scope // function (receiver, input and result) parameters, tuple expressions (results of function calls), or nil - Expr Expr // corresponding AST expression -} - - -// NewType creates a new type of a given form. -func NewType(form Form) *Type { - return &Type{Form: form, Scope: NewScope(nil)} -} - - -// Form describes the form of a type. -type Form int - -// The list of possible type forms. -const ( - BadType Form = iota // for error handling - Unresolved // type not fully setup - Basic - Array - Struct - Pointer - Function - Method - Interface - Slice - Map - Channel - Tuple -) - - -var formStrings = [...]string{ - BadType: "badType", - Unresolved: "unresolved", - Basic: "basic", - Array: "array", - Struct: "struct", - Pointer: "pointer", - Function: "function", - Method: "method", - Interface: "interface", - Slice: "slice", - Map: "map", - Channel: "channel", - Tuple: "tuple", -} - - -func (form Form) String() string { return formStrings[form] } - - -// The list of basic type id's. -const ( - Bool = iota - Byte - Uint - Int - Float - Complex - Uintptr - String - - Uint8 - Uint16 - Uint32 - Uint64 - - Int8 - Int16 - Int32 - Int64 - - Float32 - Float64 - - Complex64 - Complex128 - - // TODO(gri) ideal types are missing -) - - -var BasicTypes = map[uint]string{ - Bool: "bool", - Byte: "byte", - Uint: "uint", - Int: "int", - Float: "float", - Complex: "complex", - Uintptr: "uintptr", - String: "string", - - Uint8: "uint8", - Uint16: "uint16", - Uint32: "uint32", - Uint64: "uint64", - - Int8: "int8", - Int16: "int16", - Int32: "int32", - Int64: "int64", - - Float32: "float32", - Float64: "float64", - - Complex64: "complex64", - Complex128: "complex128", -} +func (kind ObjKind) String() string { return objKindStrings[kind] } diff --git a/libgo/go/go/ast/walk.go b/libgo/go/go/ast/walk.go index 20c337c3be9..95c4b3a3564 100644 --- a/libgo/go/go/ast/walk.go +++ b/libgo/go/go/ast/walk.go @@ -234,7 +234,7 @@ func Walk(v Visitor, node Node) { } case *CaseClause: - walkExprList(v, n.Values) + walkExprList(v, n.List) walkStmtList(v, n.Body) case *SwitchStmt: @@ -246,12 +246,6 @@ func Walk(v Visitor, node Node) { } Walk(v, n.Body) - case *TypeCaseClause: - for _, x := range n.Types { - Walk(v, x) - } - walkStmtList(v, n.Body) - case *TypeSwitchStmt: if n.Init != nil { Walk(v, n.Init) diff --git a/libgo/go/go/parser/interface.go b/libgo/go/go/parser/interface.go index 84d699a6793..6f35b495efa 100644 --- a/libgo/go/go/parser/interface.go +++ b/libgo/go/go/parser/interface.go @@ -14,7 +14,7 @@ import ( "io" "io/ioutil" "os" - pathutil "path" + "path/filepath" ) @@ -198,7 +198,7 @@ func ParseDir(fset *token.FileSet, path string, filter func(*os.FileInfo) bool, for i := 0; i < len(list); i++ { d := &list[i] if filter == nil || filter(d) { - filenames[n] = pathutil.Join(path, d.Name) + filenames[n] = filepath.Join(path, d.Name) n++ } } diff --git a/libgo/go/go/parser/parser.go b/libgo/go/go/parser/parser.go index 7c5843f3637..b0e8c8ad7a8 100644 --- a/libgo/go/go/parser/parser.go +++ b/libgo/go/go/parser/parser.go @@ -17,10 +17,6 @@ import ( ) -// noPos is used when there is no corresponding source position for a token. -var noPos token.Position - - // The mode parameter to the Parse* functions is a set of flags (or 0). // They control the amount of source code parsed and other optional // parser functionality. @@ -30,6 +26,7 @@ const ( ImportsOnly // parsing stops after import declarations ParseComments // parse comments and add them to AST Trace // print a trace of parsed productions + DeclarationErrors // report declaration errors ) @@ -46,16 +43,26 @@ type parser struct { // Comments comments []*ast.CommentGroup - leadComment *ast.CommentGroup // the last lead comment - lineComment *ast.CommentGroup // the last line comment + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment // Next token - pos token.Pos // token position - tok token.Token // one token look-ahead - lit []byte // token literal + pos token.Pos // token position + tok token.Token // one token look-ahead + lit_ []byte // token literal (slice into original source, don't hold on to it) // Non-syntactic parser control exprLev int // < 0: in control clause, >= 0: in expression + + // Ordinary identifer scopes + pkgScope *ast.Scope // pkgScope.Outer == nil + topScope *ast.Scope // top-most scope; may be pkgScope + unresolved []*ast.Ident // unresolved global identifiers + + // Label scope + // (maintained by open/close LabelScope) + labelScope *ast.Scope // label scope for current function + targetStack [][]*ast.Ident // stack of unresolved labels } @@ -72,9 +79,126 @@ func scannerMode(mode uint) uint { func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) { p.file = fset.AddFile(filename, fset.Base(), len(src)) p.scanner.Init(p.file, src, p, scannerMode(mode)) + p.mode = mode p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently) + p.next() + + // set up the pkgScope here (as opposed to in parseFile) because + // there are other parser entry points (ParseExpr, etc.) + p.openScope() + p.pkgScope = p.topScope + + // for the same reason, set up a label scope + p.openLabelScope() +} + + +func (p *parser) lit() []byte { + // make a copy of p.lit_ so that we don't hold on to + // a copy of the entire source indirectly in the AST + t := make([]byte, len(p.lit_)) + copy(t, p.lit_) + return t +} + + +// ---------------------------------------------------------------------------- +// Scoping support + +func (p *parser) openScope() { + p.topScope = ast.NewScope(p.topScope) +} + + +func (p *parser) closeScope() { + p.topScope = p.topScope.Outer +} + + +func (p *parser) openLabelScope() { + p.labelScope = ast.NewScope(p.labelScope) + p.targetStack = append(p.targetStack, nil) +} + + +func (p *parser) closeLabelScope() { + // resolve labels + n := len(p.targetStack) - 1 + scope := p.labelScope + for _, ident := range p.targetStack[n] { + ident.Obj = scope.Lookup(ident.Name) + if ident.Obj == nil && p.mode&DeclarationErrors != 0 { + p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name)) + } + } + // pop label scope + p.targetStack = p.targetStack[0:n] + p.labelScope = p.labelScope.Outer +} + + +func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) { + for _, ident := range idents { + if ident.Name != "_" { + obj := ast.NewObj(kind, ident.Name) + // remember the corresponding declaration for redeclaration + // errors and global variable resolution/typechecking phase + obj.Decl = decl + alt := scope.Insert(obj) + if alt != obj && p.mode&DeclarationErrors != 0 { + prevDecl := "" + if pos := alt.Pos(); pos.IsValid() { + prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos)) + } + p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl)) + } + ident.Obj = obj + } + } +} + + +func (p *parser) shortVarDecl(idents []*ast.Ident) { + // Go spec: A short variable declaration may redeclare variables + // provided they were originally declared in the same block with + // the same type, and at least one of the non-blank variables is new. + n := 0 // number of new variables + for _, ident := range idents { + if ident.Name != "_" { + obj := ast.NewObj(ast.Var, ident.Name) + // short var declarations cannot have redeclaration errors + // and are not global => no need to remember the respective + // declaration + alt := p.topScope.Insert(obj) + if alt == obj { + n++ // new declaration + } + ident.Obj = alt + } + } + if n == 0 && p.mode&DeclarationErrors != 0 { + p.error(idents[0].Pos(), "no new variables on left side of :=") + } +} + + +func (p *parser) resolve(ident *ast.Ident) { + if ident.Name == "_" { + return + } + // try to resolve the identifier + for s := p.topScope; s != nil; s = s.Outer { + if obj := s.Lookup(ident.Name); obj != nil { + ident.Obj = obj + return + } + } + // collect unresolved global identifiers; ignore the others + if p.topScope == p.pkgScope { + p.unresolved = append(p.unresolved, ident) + } } @@ -120,7 +244,7 @@ func (p *parser) next0() { s := p.tok.String() switch { case p.tok.IsLiteral(): - p.printTrace(s, string(p.lit)) + p.printTrace(s, string(p.lit_)) case p.tok.IsOperator(), p.tok.IsKeyword(): p.printTrace("\"" + s + "\"") default: @@ -128,7 +252,7 @@ func (p *parser) next0() { } } - p.pos, p.tok, p.lit = p.scanner.Scan() + p.pos, p.tok, p.lit_ = p.scanner.Scan() } // Consume a comment and return it and the line on which it ends. @@ -136,15 +260,15 @@ func (p *parser) consumeComment() (comment *ast.Comment, endline int) { // /*-style comments may end on a different line than where they start. // Scan the comment for '\n' chars and adjust endline accordingly. endline = p.file.Line(p.pos) - if p.lit[1] == '*' { - for _, b := range p.lit { + if p.lit_[1] == '*' { + for _, b := range p.lit_ { if b == '\n' { endline++ } } } - comment = &ast.Comment{p.pos, p.lit} + comment = &ast.Comment{p.pos, p.lit()} p.next0() return @@ -234,12 +358,12 @@ func (p *parser) errorExpected(pos token.Pos, msg string) { if pos == p.pos { // the error happened at the current position; // make the error message more specific - if p.tok == token.SEMICOLON && p.lit[0] == '\n' { + if p.tok == token.SEMICOLON && p.lit_[0] == '\n' { msg += ", found newline" } else { msg += ", found '" + p.tok.String() + "'" if p.tok.IsLiteral() { - msg += " " + string(p.lit) + msg += " " + string(p.lit_) } } } @@ -271,7 +395,7 @@ func (p *parser) parseIdent() *ast.Ident { pos := p.pos name := "_" if p.tok == token.IDENT { - name = string(p.lit) + name = string(p.lit_) p.next() } else { p.expect(token.IDENT) // use expect() error handling @@ -339,13 +463,16 @@ func (p *parser) parseQualifiedIdent() ast.Expr { defer un(trace(p, "QualifiedIdent")) } - var x ast.Expr = p.parseIdent() + ident := p.parseIdent() + p.resolve(ident) + var x ast.Expr = ident if p.tok == token.PERIOD { // first identifier is a package identifier p.next() sel := p.parseIdent() x = &ast.SelectorExpr{x, sel} } + return x } @@ -407,7 +534,7 @@ func (p *parser) parseFieldDecl() *ast.Field { // optional tag var tag *ast.BasicLit if p.tok == token.STRING { - tag = &ast.BasicLit{p.pos, p.tok, p.lit} + tag = &ast.BasicLit{p.pos, p.tok, p.lit()} p.next() } @@ -426,7 +553,7 @@ func (p *parser) parseFieldDecl() *ast.Field { } } - p.expectSemi() + p.expectSemi() // call before accessing p.linecomment return &ast.Field{doc, idents, typ, tag, p.lineComment} } @@ -519,7 +646,7 @@ func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) { } -func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) { +func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) { if p.trace { defer un(trace(p, "ParameterList")) } @@ -528,7 +655,11 @@ func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) { if typ != nil { // IdentifierList Type idents := p.makeIdentList(list) - params = append(params, &ast.Field{nil, idents, typ, nil, nil}) + field := &ast.Field{nil, idents, typ, nil, nil} + params = append(params, field) + // Go spec: The scope of an identifier denoting a function + // parameter or result variable is the function body. + p.declare(field, scope, ast.Var, idents...) if p.tok == token.COMMA { p.next() } @@ -536,7 +667,11 @@ func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) { for p.tok != token.RPAREN && p.tok != token.EOF { idents := p.parseIdentList() typ := p.parseVarType(ellipsisOk) - params = append(params, &ast.Field{nil, idents, typ, nil, nil}) + field := &ast.Field{nil, idents, typ, nil, nil} + params = append(params, field) + // Go spec: The scope of an identifier denoting a function + // parameter or result variable is the function body. + p.declare(field, scope, ast.Var, idents...) if p.tok != token.COMMA { break } @@ -555,7 +690,7 @@ func (p *parser) parseParameterList(ellipsisOk bool) (params []*ast.Field) { } -func (p *parser) parseParameters(ellipsisOk bool) *ast.FieldList { +func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList { if p.trace { defer un(trace(p, "Parameters")) } @@ -563,7 +698,7 @@ func (p *parser) parseParameters(ellipsisOk bool) *ast.FieldList { var params []*ast.Field lparen := p.expect(token.LPAREN) if p.tok != token.RPAREN { - params = p.parseParameterList(ellipsisOk) + params = p.parseParameterList(scope, ellipsisOk) } rparen := p.expect(token.RPAREN) @@ -571,13 +706,13 @@ func (p *parser) parseParameters(ellipsisOk bool) *ast.FieldList { } -func (p *parser) parseResult() *ast.FieldList { +func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList { if p.trace { defer un(trace(p, "Result")) } if p.tok == token.LPAREN { - return p.parseParameters(false) + return p.parseParameters(scope, false) } typ := p.tryType() @@ -591,27 +726,28 @@ func (p *parser) parseResult() *ast.FieldList { } -func (p *parser) parseSignature() (params, results *ast.FieldList) { +func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) { if p.trace { defer un(trace(p, "Signature")) } - params = p.parseParameters(true) - results = p.parseResult() + params = p.parseParameters(scope, true) + results = p.parseResult(scope) return } -func (p *parser) parseFuncType() *ast.FuncType { +func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) { if p.trace { defer un(trace(p, "FuncType")) } pos := p.expect(token.FUNC) - params, results := p.parseSignature() + scope := ast.NewScope(p.topScope) // function scope + params, results := p.parseSignature(scope) - return &ast.FuncType{pos, params, results} + return &ast.FuncType{pos, params, results}, scope } @@ -627,13 +763,14 @@ func (p *parser) parseMethodSpec() *ast.Field { if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN { // method idents = []*ast.Ident{ident} - params, results := p.parseSignature() + scope := ast.NewScope(nil) // method scope + params, results := p.parseSignature(scope) typ = &ast.FuncType{token.NoPos, params, results} } else { // embedded interface typ = x } - p.expectSemi() + p.expectSemi() // call before accessing p.linecomment return &ast.Field{doc, idents, typ, nil, p.lineComment} } @@ -706,7 +843,8 @@ func (p *parser) tryRawType(ellipsisOk bool) ast.Expr { case token.MUL: return p.parsePointerType() case token.FUNC: - return p.parseFuncType() + typ, _ := p.parseFuncType() + return typ case token.INTERFACE: return p.parseInterfaceType() case token.MAP: @@ -745,13 +883,17 @@ func (p *parser) parseStmtList() (list []ast.Stmt) { } -func (p *parser) parseBody() *ast.BlockStmt { +func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt { if p.trace { defer un(trace(p, "Body")) } lbrace := p.expect(token.LBRACE) + p.topScope = scope // open function scope + p.openLabelScope() list := p.parseStmtList() + p.closeLabelScope() + p.closeScope() rbrace := p.expect(token.RBRACE) return &ast.BlockStmt{lbrace, list, rbrace} @@ -764,7 +906,9 @@ func (p *parser) parseBlockStmt() *ast.BlockStmt { } lbrace := p.expect(token.LBRACE) + p.openScope() list := p.parseStmtList() + p.closeScope() rbrace := p.expect(token.RBRACE) return &ast.BlockStmt{lbrace, list, rbrace} @@ -779,14 +923,14 @@ func (p *parser) parseFuncTypeOrLit() ast.Expr { defer un(trace(p, "FuncTypeOrLit")) } - typ := p.parseFuncType() + typ, scope := p.parseFuncType() if p.tok != token.LBRACE { // function type only return typ } p.exprLev++ - body := p.parseBody() + body := p.parseBody(scope) p.exprLev-- return &ast.FuncLit{typ, body} @@ -803,10 +947,12 @@ func (p *parser) parseOperand() ast.Expr { switch p.tok { case token.IDENT: - return p.parseIdent() + ident := p.parseIdent() + p.resolve(ident) + return ident case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING: - x := &ast.BasicLit{p.pos, p.tok, p.lit} + x := &ast.BasicLit{p.pos, p.tok, p.lit()} p.next() return x @@ -1202,6 +1348,9 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { pos, tok := p.pos, p.tok p.next() y := p.parseExprList() + if tok == token.DEFINE { + p.shortVarDecl(p.makeIdentList(x)) + } return &ast.AssignStmt{x, pos, tok, y} } @@ -1216,7 +1365,12 @@ func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt { colon := p.pos p.next() if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent { - return &ast.LabeledStmt{label, colon, p.parseStmt()} + // Go spec: The scope of a label is the body of the function + // in which it is declared and excludes the body of any nested + // function. + stmt := &ast.LabeledStmt{label, colon, p.parseStmt()} + p.declare(stmt, p.labelScope, ast.Lbl, label) + return stmt } p.error(x[0].Pos(), "illegal label declaration") return &ast.BadStmt{x[0].Pos(), colon + 1} @@ -1304,14 +1458,17 @@ func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt { defer un(trace(p, "BranchStmt")) } - s := &ast.BranchStmt{p.pos, tok, nil} - p.expect(tok) + pos := p.expect(tok) + var label *ast.Ident if tok != token.FALLTHROUGH && p.tok == token.IDENT { - s.Label = p.parseIdent() + label = p.parseIdent() + // add to list of unresolved targets + n := len(p.targetStack) - 1 + p.targetStack[n] = append(p.targetStack[n], label) } p.expectSemi() - return s + return &ast.BranchStmt{pos, tok, label} } @@ -1333,6 +1490,8 @@ func (p *parser) parseIfStmt() *ast.IfStmt { } pos := p.expect(token.IF) + p.openScope() + defer p.closeScope() var s ast.Stmt var x ast.Expr @@ -1368,28 +1527,6 @@ func (p *parser) parseIfStmt() *ast.IfStmt { } -func (p *parser) parseCaseClause() *ast.CaseClause { - if p.trace { - defer un(trace(p, "CaseClause")) - } - - // SwitchCase - pos := p.pos - var x []ast.Expr - if p.tok == token.CASE { - p.next() - x = p.parseExprList() - } else { - p.expect(token.DEFAULT) - } - - colon := p.expect(token.COLON) - body := p.parseStmtList() - - return &ast.CaseClause{pos, x, colon, body} -} - - func (p *parser) parseTypeList() (list []ast.Expr) { if p.trace { defer un(trace(p, "TypeList")) @@ -1405,25 +1542,30 @@ func (p *parser) parseTypeList() (list []ast.Expr) { } -func (p *parser) parseTypeCaseClause() *ast.TypeCaseClause { +func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause { if p.trace { - defer un(trace(p, "TypeCaseClause")) + defer un(trace(p, "CaseClause")) } - // TypeSwitchCase pos := p.pos - var types []ast.Expr + var list []ast.Expr if p.tok == token.CASE { p.next() - types = p.parseTypeList() + if exprSwitch { + list = p.parseExprList() + } else { + list = p.parseTypeList() + } } else { p.expect(token.DEFAULT) } colon := p.expect(token.COLON) + p.openScope() body := p.parseStmtList() + p.closeScope() - return &ast.TypeCaseClause{pos, types, colon, body} + return &ast.CaseClause{pos, list, colon, body} } @@ -1447,6 +1589,8 @@ func (p *parser) parseSwitchStmt() ast.Stmt { } pos := p.expect(token.SWITCH) + p.openScope() + defer p.closeScope() var s1, s2 ast.Stmt if p.tok != token.LBRACE { @@ -1466,28 +1610,21 @@ func (p *parser) parseSwitchStmt() ast.Stmt { p.exprLev = prevLev } - if isExprSwitch(s2) { - lbrace := p.expect(token.LBRACE) - var list []ast.Stmt - for p.tok == token.CASE || p.tok == token.DEFAULT { - list = append(list, p.parseCaseClause()) - } - rbrace := p.expect(token.RBRACE) - body := &ast.BlockStmt{lbrace, list, rbrace} - p.expectSemi() - return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body} - } - - // type switch - // TODO(gri): do all the checks! + exprSwitch := isExprSwitch(s2) lbrace := p.expect(token.LBRACE) var list []ast.Stmt for p.tok == token.CASE || p.tok == token.DEFAULT { - list = append(list, p.parseTypeCaseClause()) + list = append(list, p.parseCaseClause(exprSwitch)) } rbrace := p.expect(token.RBRACE) p.expectSemi() body := &ast.BlockStmt{lbrace, list, rbrace} + + if exprSwitch { + return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body} + } + // type switch + // TODO(gri): do all the checks! return &ast.TypeSwitchStmt{pos, s1, s2, body} } @@ -1497,7 +1634,7 @@ func (p *parser) parseCommClause() *ast.CommClause { defer un(trace(p, "CommClause")) } - // CommCase + p.openScope() pos := p.pos var comm ast.Stmt if p.tok == token.CASE { @@ -1518,7 +1655,7 @@ func (p *parser) parseCommClause() *ast.CommClause { pos := p.pos tok := p.tok var rhs ast.Expr - if p.tok == token.ASSIGN || p.tok == token.DEFINE { + if tok == token.ASSIGN || tok == token.DEFINE { // RecvStmt with assignment if len(lhs) > 2 { p.errorExpected(lhs[0].Pos(), "1 or 2 expressions") @@ -1527,6 +1664,9 @@ func (p *parser) parseCommClause() *ast.CommClause { } p.next() rhs = p.parseExpr() + if tok == token.DEFINE { + p.shortVarDecl(p.makeIdentList(lhs)) + } } else { // rhs must be single receive operation if len(lhs) > 1 { @@ -1552,6 +1692,7 @@ func (p *parser) parseCommClause() *ast.CommClause { colon := p.expect(token.COLON) body := p.parseStmtList() + p.closeScope() return &ast.CommClause{pos, comm, colon, body} } @@ -1582,6 +1723,8 @@ func (p *parser) parseForStmt() ast.Stmt { } pos := p.expect(token.FOR) + p.openScope() + defer p.closeScope() var s1, s2, s3 ast.Stmt if p.tok != token.LBRACE { @@ -1631,18 +1774,16 @@ func (p *parser) parseForStmt() ast.Stmt { return &ast.BadStmt{pos, body.End()} } if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE { - // rhs is range expression; check lhs + // rhs is range expression + // (any short variable declaration was handled by parseSimpleStat above) return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body} - } else { - p.errorExpected(s2.Pos(), "range clause") - return &ast.BadStmt{pos, body.End()} } - } else { - // regular for statement - return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body} + p.errorExpected(s2.Pos(), "range clause") + return &ast.BadStmt{pos, body.End()} } - panic("unreachable") + // regular for statement + return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body} } @@ -1706,36 +1847,37 @@ func (p *parser) parseStmt() (s ast.Stmt) { // ---------------------------------------------------------------------------- // Declarations -type parseSpecFunction func(p *parser, doc *ast.CommentGroup) ast.Spec +type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec -func parseImportSpec(p *parser, doc *ast.CommentGroup) ast.Spec { +func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { if p.trace { defer un(trace(p, "ImportSpec")) } var ident *ast.Ident - if p.tok == token.PERIOD { + switch p.tok { + case token.PERIOD: ident = &ast.Ident{p.pos, ".", nil} p.next() - } else if p.tok == token.IDENT { + case token.IDENT: ident = p.parseIdent() } var path *ast.BasicLit if p.tok == token.STRING { - path = &ast.BasicLit{p.pos, p.tok, p.lit} + path = &ast.BasicLit{p.pos, p.tok, p.lit()} p.next() } else { p.expect(token.STRING) // use expect() error handling } - p.expectSemi() + p.expectSemi() // call before accessing p.linecomment return &ast.ImportSpec{doc, ident, path, p.lineComment} } -func parseConstSpec(p *parser, doc *ast.CommentGroup) ast.Spec { +func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec { if p.trace { defer un(trace(p, "ConstSpec")) } @@ -1743,30 +1885,44 @@ func parseConstSpec(p *parser, doc *ast.CommentGroup) ast.Spec { idents := p.parseIdentList() typ := p.tryType() var values []ast.Expr - if typ != nil || p.tok == token.ASSIGN { + if typ != nil || p.tok == token.ASSIGN || iota == 0 { p.expect(token.ASSIGN) values = p.parseExprList() } - p.expectSemi() + p.expectSemi() // call before accessing p.linecomment + + // Go spec: The scope of a constant or variable identifier declared inside + // a function begins at the end of the ConstSpec or VarSpec and ends at + // the end of the innermost containing block. + // (Global identifiers are resolved in a separate phase after parsing.) + spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment} + p.declare(spec, p.topScope, ast.Con, idents...) - return &ast.ValueSpec{doc, idents, typ, values, p.lineComment} + return spec } -func parseTypeSpec(p *parser, doc *ast.CommentGroup) ast.Spec { +func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { if p.trace { defer un(trace(p, "TypeSpec")) } ident := p.parseIdent() typ := p.parseType() - p.expectSemi() + p.expectSemi() // call before accessing p.linecomment + + // Go spec: The scope of a type identifier declared inside a function begins + // at the identifier in the TypeSpec and ends at the end of the innermost + // containing block. + // (Global identifiers are resolved in a separate phase after parsing.) + spec := &ast.TypeSpec{doc, ident, typ, p.lineComment} + p.declare(spec, p.topScope, ast.Typ, ident) - return &ast.TypeSpec{doc, ident, typ, p.lineComment} + return spec } -func parseVarSpec(p *parser, doc *ast.CommentGroup) ast.Spec { +func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec { if p.trace { defer un(trace(p, "VarSpec")) } @@ -1778,9 +1934,16 @@ func parseVarSpec(p *parser, doc *ast.CommentGroup) ast.Spec { p.expect(token.ASSIGN) values = p.parseExprList() } - p.expectSemi() + p.expectSemi() // call before accessing p.linecomment + + // Go spec: The scope of a constant or variable identifier declared inside + // a function begins at the end of the ConstSpec or VarSpec and ends at + // the end of the innermost containing block. + // (Global identifiers are resolved in a separate phase after parsing.) + spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment} + p.declare(spec, p.topScope, ast.Var, idents...) - return &ast.ValueSpec{doc, idents, typ, values, p.lineComment} + return spec } @@ -1796,26 +1959,26 @@ func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.Gen if p.tok == token.LPAREN { lparen = p.pos p.next() - for p.tok != token.RPAREN && p.tok != token.EOF { - list = append(list, f(p, p.leadComment)) + for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ { + list = append(list, f(p, p.leadComment, iota)) } rparen = p.expect(token.RPAREN) p.expectSemi() } else { - list = append(list, f(p, nil)) + list = append(list, f(p, nil, 0)) } return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen} } -func (p *parser) parseReceiver() *ast.FieldList { +func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList { if p.trace { defer un(trace(p, "Receiver")) } pos := p.pos - par := p.parseParameters(false) + par := p.parseParameters(scope, false) // must have exactly one receiver if par.NumFields() != 1 { @@ -1844,22 +2007,37 @@ func (p *parser) parseFuncDecl() *ast.FuncDecl { doc := p.leadComment pos := p.expect(token.FUNC) + scope := ast.NewScope(p.topScope) // function scope var recv *ast.FieldList if p.tok == token.LPAREN { - recv = p.parseReceiver() + recv = p.parseReceiver(scope) } ident := p.parseIdent() - params, results := p.parseSignature() + + params, results := p.parseSignature(scope) var body *ast.BlockStmt if p.tok == token.LBRACE { - body = p.parseBody() + body = p.parseBody(scope) } p.expectSemi() - return &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body} + decl := &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body} + if recv == nil { + // Go spec: The scope of an identifier denoting a constant, type, + // variable, or function (but not method) declared at top level + // (outside any function) is the package block. + // + // init() functions cannot be referred to and there may + // be more than one - don't put them in the pkgScope + if ident.Name != "init" { + p.declare(decl, p.pkgScope, ast.Fun, ident) + } + } + + return decl } @@ -1918,6 +2096,8 @@ func (p *parser) parseFile() *ast.File { // package clause doc := p.leadComment pos := p.expect(token.PACKAGE) + // Go spec: The package clause is not a declaration; + // the package name does not appear in any scope. ident := p.parseIdent() p.expectSemi() @@ -1940,5 +2120,20 @@ func (p *parser) parseFile() *ast.File { } } - return &ast.File{doc, pos, ident, decls, p.comments} + if p.topScope != p.pkgScope { + panic("internal error: imbalanced scopes") + } + + // resolve global identifiers within the same file + i := 0 + for _, ident := range p.unresolved { + // i <= index for current ident + ident.Obj = p.pkgScope.Lookup(ident.Name) + if ident.Obj == nil { + p.unresolved[i] = ident + i++ + } + } + + return &ast.File{doc, pos, ident, decls, p.pkgScope, p.unresolved[0:i], p.comments} } diff --git a/libgo/go/go/parser/parser_test.go b/libgo/go/go/parser/parser_test.go index 38535627a75..2f1ee6bfc09 100644 --- a/libgo/go/go/parser/parser_test.go +++ b/libgo/go/go/parser/parser_test.go @@ -21,6 +21,7 @@ var illegalInputs = []interface{}{ `package p; func f() { if /* should have condition */ {} };`, `package p; func f() { if ; /* should have condition */ {} };`, `package p; func f() { if f(); /* should have condition */ {} };`, + `package p; const c; /* should have constant value */`, } @@ -73,7 +74,7 @@ var validFiles = []string{ func TestParse3(t *testing.T) { for _, filename := range validFiles { - _, err := ParseFile(fset, filename, nil, 0) + _, err := ParseFile(fset, filename, nil, DeclarationErrors) if err != nil { t.Errorf("ParseFile(%s): %v", filename, err) } diff --git a/libgo/go/go/printer/nodes.go b/libgo/go/go/printer/nodes.go index 7933c2f1820..2238b6bedc8 100644 --- a/libgo/go/go/printer/nodes.go +++ b/libgo/go/go/printer/nodes.go @@ -108,17 +108,6 @@ func (p *printer) identList(list []*ast.Ident, indent bool, multiLine *bool) { } -// Compute the key size of a key:value expression. -// Returns 0 if the expression doesn't fit onto a single line. -func (p *printer) keySize(pair *ast.KeyValueExpr) int { - if p.nodeSize(pair, infinity) <= infinity { - // entire expression fits on one line - return key size - return p.nodeSize(pair.Key, infinity) - } - return 0 -} - - // Print a list of expressions. If the list spans multiple // source lines, the original line breaks are respected between // expressions. Sets multiLine to true if the list spans multiple @@ -204,17 +193,21 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp // the key and the node size into the decision process useFF := true - // determine size + // determine element size: all bets are off if we don't have + // position information for the previous and next token (likely + // generated code - simply ignore the size in this case by setting + // it to 0) prevSize := size const infinity = 1e6 // larger than any source line size = p.nodeSize(x, infinity) pair, isPair := x.(*ast.KeyValueExpr) - if size <= infinity { + if size <= infinity && prev.IsValid() && next.IsValid() { // x fits on a single line if isPair { size = p.nodeSize(pair.Key, infinity) // size <= infinity } } else { + // size too large or we don't have good layout information size = 0 } @@ -244,7 +237,6 @@ func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exp // lines are broken using newlines so comments remain aligned // unless forceFF is set or there are multiple expressions on // the same line in which case formfeed is used - // broken with a formfeed if p.linebreak(line, linebreakMin, ws, useFF || prevBreak+1 < i) { ws = ignore *multiLine = true @@ -375,7 +367,7 @@ func (p *printer) setLineComment(text string) { } -func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprContext) { +func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) { p.nesting++ defer func() { p.nesting-- @@ -384,15 +376,15 @@ func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprC lbrace := fields.Opening list := fields.List rbrace := fields.Closing + srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.fset.Position(lbrace).Line == p.fset.Position(rbrace).Line - if !isIncomplete && !p.commentBefore(p.fset.Position(rbrace)) { + if !isIncomplete && !p.commentBefore(p.fset.Position(rbrace)) && srcIsOneLine { // possibly a one-line struct/interface if len(list) == 0 { // no blank between keyword and {} in this case p.print(lbrace, token.LBRACE, rbrace, token.RBRACE) return - } else if ctxt&(compositeLit|structType) == compositeLit|structType && - p.isOneLineFieldList(list) { // for now ignore interfaces + } else if isStruct && p.isOneLineFieldList(list) { // for now ignore interfaces // small enough - print on one line // (don't use identList and ignore source line breaks) p.print(lbrace, token.LBRACE, blank) @@ -414,7 +406,7 @@ func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprC // at least one entry or incomplete p.print(blank, lbrace, token.LBRACE, indent, formfeed) - if ctxt&structType != 0 { + if isStruct { sep := vtab if len(list) == 1 { @@ -497,15 +489,6 @@ func (p *printer) fieldList(fields *ast.FieldList, isIncomplete bool, ctxt exprC // ---------------------------------------------------------------------------- // Expressions -// exprContext describes the syntactic environment in which an expression node is printed. -type exprContext uint - -const ( - compositeLit exprContext = 1 << iota - structType -) - - func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) { switch e.Op.Precedence() { case 4: @@ -650,7 +633,7 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiL printBlank := prec < cutoff ws := indent - p.expr1(x.X, prec, depth+diffPrec(x.X, prec), 0, multiLine) + p.expr1(x.X, prec, depth+diffPrec(x.X, prec), multiLine) if printBlank { p.print(blank) } @@ -669,7 +652,7 @@ func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int, multiL if printBlank { p.print(blank) } - p.expr1(x.Y, prec+1, depth+1, 0, multiLine) + p.expr1(x.Y, prec+1, depth+1, multiLine) if ws == ignore { p.print(unindent) } @@ -742,7 +725,7 @@ func selectorExprList(expr ast.Expr) (list []ast.Expr) { // Sets multiLine to true if the expression spans multiple lines. -func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multiLine *bool) { +func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) { p.print(expr.Pos()) switch x := expr.(type) { @@ -792,7 +775,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi // TODO(gri) Remove this code if it cannot be reached. p.print(blank) } - p.expr1(x.X, prec, depth, 0, multiLine) + p.expr1(x.X, prec, depth, multiLine) } case *ast.BasicLit: @@ -818,7 +801,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi p.exprList(token.NoPos, parts, depth, periodSep, multiLine, token.NoPos) case *ast.TypeAssertExpr: - p.expr1(x.X, token.HighestPrec, depth, 0, multiLine) + p.expr1(x.X, token.HighestPrec, depth, multiLine) p.print(token.PERIOD, token.LPAREN) if x.Type != nil { p.expr(x.Type, multiLine) @@ -829,14 +812,14 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi case *ast.IndexExpr: // TODO(gri): should treat[] like parentheses and undo one level of depth - p.expr1(x.X, token.HighestPrec, 1, 0, multiLine) + p.expr1(x.X, token.HighestPrec, 1, multiLine) p.print(x.Lbrack, token.LBRACK) p.expr0(x.Index, depth+1, multiLine) p.print(x.Rbrack, token.RBRACK) case *ast.SliceExpr: // TODO(gri): should treat[] like parentheses and undo one level of depth - p.expr1(x.X, token.HighestPrec, 1, 0, multiLine) + p.expr1(x.X, token.HighestPrec, 1, multiLine) p.print(x.Lbrack, token.LBRACK) if x.Low != nil { p.expr0(x.Low, depth+1, multiLine) @@ -856,7 +839,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi if len(x.Args) > 1 { depth++ } - p.expr1(x.Fun, token.HighestPrec, depth, 0, multiLine) + p.expr1(x.Fun, token.HighestPrec, depth, multiLine) p.print(x.Lparen, token.LPAREN) p.exprList(x.Lparen, x.Args, depth, commaSep|commaTerm, multiLine, x.Rparen) if x.Ellipsis.IsValid() { @@ -867,7 +850,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi case *ast.CompositeLit: // composite literal elements that are composite literals themselves may have the type omitted if x.Type != nil { - p.expr1(x.Type, token.HighestPrec, depth, compositeLit, multiLine) + p.expr1(x.Type, token.HighestPrec, depth, multiLine) } p.print(x.Lbrace, token.LBRACE) p.exprList(x.Lbrace, x.Elts, 1, commaSep|commaTerm, multiLine, x.Rbrace) @@ -892,7 +875,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi case *ast.StructType: p.print(token.STRUCT) - p.fieldList(x.Fields, x.Incomplete, ctxt|structType) + p.fieldList(x.Fields, true, x.Incomplete) case *ast.FuncType: p.print(token.FUNC) @@ -900,7 +883,7 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi case *ast.InterfaceType: p.print(token.INTERFACE) - p.fieldList(x.Methods, x.Incomplete, ctxt) + p.fieldList(x.Methods, false, x.Incomplete) case *ast.MapType: p.print(token.MAP, token.LBRACK) @@ -929,14 +912,14 @@ func (p *printer) expr1(expr ast.Expr, prec1, depth int, ctxt exprContext, multi func (p *printer) expr0(x ast.Expr, depth int, multiLine *bool) { - p.expr1(x, token.LowestPrec, depth, 0, multiLine) + p.expr1(x, token.LowestPrec, depth, multiLine) } // Sets multiLine to true if the expression spans multiple lines. func (p *printer) expr(x ast.Expr, multiLine *bool) { const depth = 1 - p.expr1(x, token.LowestPrec, depth, 0, multiLine) + p.expr1(x, token.LowestPrec, depth, multiLine) } @@ -1145,9 +1128,9 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) { } case *ast.CaseClause: - if s.Values != nil { + if s.List != nil { p.print(token.CASE) - p.exprList(s.Pos(), s.Values, 1, blankStart|commaSep, multiLine, s.Colon) + p.exprList(s.Pos(), s.List, 1, blankStart|commaSep, multiLine, s.Colon) } else { p.print(token.DEFAULT) } @@ -1160,16 +1143,6 @@ func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) { p.block(s.Body, 0) *multiLine = true - case *ast.TypeCaseClause: - if s.Types != nil { - p.print(token.CASE) - p.exprList(s.Pos(), s.Types, 1, blankStart|commaSep, multiLine, s.Colon) - } else { - p.print(token.DEFAULT) - } - p.print(s.Colon, token.COLON) - p.stmtList(s.Body, 1, nextIsRBrace) - case *ast.TypeSwitchStmt: p.print(token.SWITCH) if s.Init != nil { @@ -1331,13 +1304,23 @@ func (p *printer) genDecl(d *ast.GenDecl, multiLine *bool) { // any control chars. Otherwise, the result is > maxSize. // func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) { + // nodeSize invokes the printer, which may invoke nodeSize + // recursively. For deep composite literal nests, this can + // lead to an exponential algorithm. Remember previous + // results to prune the recursion (was issue 1628). + if size, found := p.nodeSizes[n]; found { + return size + } + size = maxSize + 1 // assume n doesn't fit + p.nodeSizes[n] = size + // nodeSize computation must be indendent of particular // style so that we always get the same decision; print // in RawFormat cfg := Config{Mode: RawFormat} var buf bytes.Buffer - if _, err := cfg.Fprint(&buf, p.fset, n); err != nil { + if _, err := cfg.fprint(&buf, p.fset, n, p.nodeSizes); err != nil { return } if buf.Len() <= maxSize { @@ -1347,6 +1330,7 @@ func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) { } } size = buf.Len() // n fits + p.nodeSizes[n] = size } return } diff --git a/libgo/go/go/printer/printer.go b/libgo/go/go/printer/printer.go index 48e2af1b736..a43e4a12c77 100644 --- a/libgo/go/go/printer/printer.go +++ b/libgo/go/go/printer/printer.go @@ -12,7 +12,7 @@ import ( "go/token" "io" "os" - "path" + "path/filepath" "runtime" "tabwriter" ) @@ -94,22 +94,23 @@ type printer struct { // written using writeItem. last token.Position - // HTML support - lastTaggedLine int // last line for which a line tag was written - // The list of all source comments, in order of appearance. comments []*ast.CommentGroup // may be nil cindex int // current comment index useNodeComments bool // if not set, ignore lead and line comments of nodes + + // Cache of already computed node sizes. + nodeSizes map[ast.Node]int } -func (p *printer) init(output io.Writer, cfg *Config, fset *token.FileSet) { +func (p *printer) init(output io.Writer, cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) { p.output = output p.Config = *cfg p.fset = fset p.errors = make(chan os.Error) p.buffer = make([]whiteSpace, 0, 16) // whitespace sequences are short + p.nodeSizes = nodeSizes } @@ -244,7 +245,7 @@ func (p *printer) writeItem(pos token.Position, data []byte) { } if debug { // do not update p.pos - use write0 - _, filename := path.Split(pos.Filename) + _, filename := filepath.Split(pos.Filename) p.write0([]byte(fmt.Sprintf("[%s:%d:%d]", filename, pos.Line, pos.Column))) } p.write(data) @@ -994,13 +995,8 @@ type Config struct { } -// Fprint "pretty-prints" an AST node to output and returns the number -// of bytes written and an error (if any) for a given configuration cfg. -// Position information is interpreted relative to the file set fset. -// The node type must be *ast.File, or assignment-compatible to ast.Expr, -// ast.Decl, ast.Spec, or ast.Stmt. -// -func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) (int, os.Error) { +// fprint implements Fprint and takes a nodesSizes map for setting up the printer state. +func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (int, os.Error) { // redirect output through a trimmer to eliminate trailing whitespace // (Input to a tabwriter must be untrimmed since trailing tabs provide // formatting information. The tabwriter could provide trimming @@ -1029,7 +1025,7 @@ func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{ // setup printer and print node var p printer - p.init(output, cfg, fset) + p.init(output, cfg, fset, nodeSizes) go func() { switch n := node.(type) { case ast.Expr: @@ -1076,6 +1072,17 @@ func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{ } +// Fprint "pretty-prints" an AST node to output and returns the number +// of bytes written and an error (if any) for a given configuration cfg. +// Position information is interpreted relative to the file set fset. +// The node type must be *ast.File, or assignment-compatible to ast.Expr, +// ast.Decl, ast.Spec, or ast.Stmt. +// +func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node interface{}) (int, os.Error) { + return cfg.fprint(output, fset, node, make(map[ast.Node]int)) +} + + // Fprint "pretty-prints" an AST node to output. // It calls Config.Fprint with default settings. // diff --git a/libgo/go/go/printer/printer_test.go b/libgo/go/go/printer/printer_test.go index 565075aa20c..3ff087e2993 100644 --- a/libgo/go/go/printer/printer_test.go +++ b/libgo/go/go/printer/printer_test.go @@ -11,8 +11,9 @@ import ( "go/ast" "go/parser" "go/token" - "path" + "path/filepath" "testing" + "time" ) @@ -45,7 +46,7 @@ const ( ) -func check(t *testing.T, source, golden string, mode checkMode) { +func runcheck(t *testing.T, source, golden string, mode checkMode) { // parse source prog, err := parser.ParseFile(fset, source, nil, parser.ParseComments) if err != nil { @@ -109,6 +110,32 @@ func check(t *testing.T, source, golden string, mode checkMode) { } +func check(t *testing.T, source, golden string, mode checkMode) { + // start a timer to produce a time-out signal + tc := make(chan int) + go func() { + time.Sleep(20e9) // plenty of a safety margin, even for very slow machines + tc <- 0 + }() + + // run the test + cc := make(chan int) + go func() { + runcheck(t, source, golden, mode) + cc <- 0 + }() + + // wait for the first finisher + select { + case <-tc: + // test running past time out + t.Errorf("%s: running too slowly", source) + case <-cc: + // test finished within alloted time margin + } +} + + type entry struct { source, golden string mode checkMode @@ -124,13 +151,14 @@ var data = []entry{ {"expressions.input", "expressions.raw", rawFormat}, {"declarations.input", "declarations.golden", 0}, {"statements.input", "statements.golden", 0}, + {"slow.input", "slow.golden", 0}, } func TestFiles(t *testing.T) { for _, e := range data { - source := path.Join(dataDir, e.source) - golden := path.Join(dataDir, e.golden) + source := filepath.Join(dataDir, e.source) + golden := filepath.Join(dataDir, e.golden) check(t, source, golden, e.mode) // TODO(gri) check that golden is idempotent //check(t, golden, golden, e.mode); diff --git a/libgo/go/go/printer/testdata/expressions.golden b/libgo/go/go/printer/testdata/expressions.golden index 7f18f338a63..314d3213c74 100644 --- a/libgo/go/go/printer/testdata/expressions.golden +++ b/libgo/go/go/printer/testdata/expressions.golden @@ -224,11 +224,7 @@ func _() { _ = struct{ x int }{0} _ = struct{ x, y, z int }{0, 1, 2} _ = struct{ int }{0} - _ = struct { - s struct { - int - } - }{struct{ int }{0}} // compositeLit context not propagated => multiLine result + _ = struct{ s struct{ int } }{struct{ int }{0}} } diff --git a/libgo/go/go/printer/testdata/expressions.input b/libgo/go/go/printer/testdata/expressions.input index 6bcd9b5f89e..cac22af4313 100644 --- a/libgo/go/go/printer/testdata/expressions.input +++ b/libgo/go/go/printer/testdata/expressions.input @@ -224,7 +224,7 @@ func _() { _ = struct{ x int }{0} _ = struct{ x, y, z int }{0, 1, 2} _ = struct{ int }{0} - _ = struct{ s struct { int } }{struct{ int}{0}} // compositeLit context not propagated => multiLine result + _ = struct{ s struct { int } }{struct{ int}{0} } } diff --git a/libgo/go/go/printer/testdata/expressions.raw b/libgo/go/go/printer/testdata/expressions.raw index f1944c94bb4..f22ceeb476f 100644 --- a/libgo/go/go/printer/testdata/expressions.raw +++ b/libgo/go/go/printer/testdata/expressions.raw @@ -224,11 +224,7 @@ func _() { _ = struct{ x int }{0} _ = struct{ x, y, z int }{0, 1, 2} _ = struct{ int }{0} - _ = struct { - s struct { - int - } - }{struct{ int }{0}} // compositeLit context not propagated => multiLine result + _ = struct{ s struct{ int } }{struct{ int }{0}} } diff --git a/libgo/go/go/printer/testdata/slow.golden b/libgo/go/go/printer/testdata/slow.golden new file mode 100644 index 00000000000..43a15cb1d08 --- /dev/null +++ b/libgo/go/go/printer/testdata/slow.golden @@ -0,0 +1,85 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package deepequal_test + +import ( + "testing" + "google3/spam/archer/frontend/deepequal" +) + +func TestTwoNilValues(t *testing.T) { + if err := deepequal.Check(nil, nil); err != nil { + t.Errorf("expected nil, saw %v", err) + } +} + +type Foo struct { + bar *Bar + bang *Bar +} + +type Bar struct { + baz *Baz + foo []*Foo +} + +type Baz struct { + entries map[int]interface{} + whatever string +} + +func newFoo() *Foo { + return &Foo{bar: &Bar{baz: &Baz{ + entries: map[int]interface{}{ + 42: &Foo{}, + 21: &Bar{}, + 11: &Baz{whatever: "it's just a test"}}}}, + bang: &Bar{foo: []*Foo{ + &Foo{bar: &Bar{baz: &Baz{ + entries: map[int]interface{}{ + 43: &Foo{}, + 22: &Bar{}, + 13: &Baz{whatever: "this is nuts"}}}}, + bang: &Bar{foo: []*Foo{ + &Foo{bar: &Bar{baz: &Baz{ + entries: map[int]interface{}{ + 61: &Foo{}, + 71: &Bar{}, + 11: &Baz{whatever: "no, it's Go"}}}}, + bang: &Bar{foo: []*Foo{ + &Foo{bar: &Bar{baz: &Baz{ + entries: map[int]interface{}{ + 0: &Foo{}, + -2: &Bar{}, + -11: &Baz{whatever: "we need to go deeper"}}}}, + bang: &Bar{foo: []*Foo{ + &Foo{bar: &Bar{baz: &Baz{ + entries: map[int]interface{}{ + -2: &Foo{}, + -5: &Bar{}, + -7: &Baz{whatever: "are you serious?"}}}}, + bang: &Bar{foo: []*Foo{}}}, + &Foo{bar: &Bar{baz: &Baz{ + entries: map[int]interface{}{ + -100: &Foo{}, + 50: &Bar{}, + 20: &Baz{whatever: "na, not really ..."}}}}, + bang: &Bar{foo: []*Foo{}}}}}}}}}, + &Foo{bar: &Bar{baz: &Baz{ + entries: map[int]interface{}{ + 2: &Foo{}, + 1: &Bar{}, + -1: &Baz{whatever: "... it's just a test."}}}}, + bang: &Bar{foo: []*Foo{}}}}}}}}} +} + +func TestElaborate(t *testing.T) { + a := newFoo() + b := newFoo() + + if err := deepequal.Check(a, b); err != nil { + t.Errorf("expected nil, saw %v", err) + } +} diff --git a/libgo/go/go/printer/testdata/slow.input b/libgo/go/go/printer/testdata/slow.input new file mode 100644 index 00000000000..0e5a23d8860 --- /dev/null +++ b/libgo/go/go/printer/testdata/slow.input @@ -0,0 +1,85 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package deepequal_test + +import ( + "testing" + "google3/spam/archer/frontend/deepequal" +) + +func TestTwoNilValues(t *testing.T) { + if err := deepequal.Check(nil, nil); err != nil { + t.Errorf("expected nil, saw %v", err) + } +} + +type Foo struct { + bar *Bar + bang *Bar +} + +type Bar struct { + baz *Baz + foo []*Foo +} + +type Baz struct { + entries map[int]interface{} + whatever string +} + +func newFoo() (*Foo) { +return &Foo{bar: &Bar{ baz: &Baz{ +entries: map[int]interface{}{ +42: &Foo{}, +21: &Bar{}, +11: &Baz{ whatever: "it's just a test" }}}}, + bang: &Bar{foo: []*Foo{ +&Foo{bar: &Bar{ baz: &Baz{ +entries: map[int]interface{}{ +43: &Foo{}, +22: &Bar{}, +13: &Baz{ whatever: "this is nuts" }}}}, + bang: &Bar{foo: []*Foo{ +&Foo{bar: &Bar{ baz: &Baz{ +entries: map[int]interface{}{ +61: &Foo{}, +71: &Bar{}, +11: &Baz{ whatever: "no, it's Go" }}}}, + bang: &Bar{foo: []*Foo{ +&Foo{bar: &Bar{ baz: &Baz{ +entries: map[int]interface{}{ +0: &Foo{}, +-2: &Bar{}, +-11: &Baz{ whatever: "we need to go deeper" }}}}, + bang: &Bar{foo: []*Foo{ +&Foo{bar: &Bar{ baz: &Baz{ +entries: map[int]interface{}{ +-2: &Foo{}, +-5: &Bar{}, +-7: &Baz{ whatever: "are you serious?" }}}}, + bang: &Bar{foo: []*Foo{}}}, +&Foo{bar: &Bar{ baz: &Baz{ +entries: map[int]interface{}{ +-100: &Foo{}, +50: &Bar{}, +20: &Baz{ whatever: "na, not really ..." }}}}, + bang: &Bar{foo: []*Foo{}}}}}}}}}, +&Foo{bar: &Bar{ baz: &Baz{ +entries: map[int]interface{}{ +2: &Foo{}, +1: &Bar{}, +-1: &Baz{ whatever: "... it's just a test." }}}}, + bang: &Bar{foo: []*Foo{}}}}}}}}} +} + +func TestElaborate(t *testing.T) { + a := newFoo() + b := newFoo() + + if err := deepequal.Check(a, b); err != nil { + t.Errorf("expected nil, saw %v", err) + } +} diff --git a/libgo/go/go/scanner/scanner.go b/libgo/go/go/scanner/scanner.go index 2ae296b3f15..59fed9dffc6 100644 --- a/libgo/go/go/scanner/scanner.go +++ b/libgo/go/go/scanner/scanner.go @@ -23,7 +23,7 @@ package scanner import ( "bytes" "go/token" - "path" + "path/filepath" "strconv" "unicode" "utf8" @@ -118,7 +118,7 @@ func (S *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode uint panic("file size does not match src len") } S.file = file - S.dir, _ = path.Split(file.Name()) + S.dir, _ = filepath.Split(file.Name()) S.src = src S.err = err S.mode = mode @@ -177,13 +177,13 @@ var prefix = []byte("//line ") func (S *Scanner) interpretLineComment(text []byte) { if bytes.HasPrefix(text, prefix) { // get filename and line number, if any - if i := bytes.Index(text, []byte{':'}); i > 0 { + if i := bytes.LastIndex(text, []byte{':'}); i > 0 { if line, err := strconv.Atoi(string(text[i+1:])); err == nil && line > 0 { // valid //line filename:line comment; - filename := path.Clean(string(text[len(prefix):i])) - if filename[0] != '/' { + filename := filepath.Clean(string(text[len(prefix):i])) + if !filepath.IsAbs(filename) { // make filename relative to current directory - filename = path.Join(S.dir, filename) + filename = filepath.Join(S.dir, filename) } // update scanner position S.file.AddLineInfo(S.lineOffset, filename, line-1) // -1 since comment applies to next line diff --git a/libgo/go/go/scanner/scanner_test.go b/libgo/go/go/scanner/scanner_test.go index c622ff482f3..93f34581b7f 100644 --- a/libgo/go/go/scanner/scanner_test.go +++ b/libgo/go/go/scanner/scanner_test.go @@ -7,6 +7,8 @@ package scanner import ( "go/token" "os" + "path/filepath" + "runtime" "testing" ) @@ -443,32 +445,41 @@ func TestSemis(t *testing.T) { } } - -var segments = []struct { +type segment struct { srcline string // a line of source text filename string // filename for current token line int // line number for current token -}{ +} + +var segments = []segment{ // exactly one token per line since the test consumes one token per segment - {" line1", "dir/TestLineComments", 1}, - {"\nline2", "dir/TestLineComments", 2}, - {"\nline3 //line File1.go:100", "dir/TestLineComments", 3}, // bad line comment, ignored - {"\nline4", "dir/TestLineComments", 4}, - {"\n//line File1.go:100\n line100", "dir/File1.go", 100}, - {"\n//line File2.go:200\n line200", "dir/File2.go", 200}, + {" line1", filepath.Join("dir", "TestLineComments"), 1}, + {"\nline2", filepath.Join("dir", "TestLineComments"), 2}, + {"\nline3 //line File1.go:100", filepath.Join("dir", "TestLineComments"), 3}, // bad line comment, ignored + {"\nline4", filepath.Join("dir", "TestLineComments"), 4}, + {"\n//line File1.go:100\n line100", filepath.Join("dir", "File1.go"), 100}, + {"\n//line File2.go:200\n line200", filepath.Join("dir", "File2.go"), 200}, {"\n//line :1\n line1", "dir", 1}, - {"\n//line foo:42\n line42", "dir/foo", 42}, - {"\n //line foo:42\n line44", "dir/foo", 44}, // bad line comment, ignored - {"\n//line foo 42\n line46", "dir/foo", 46}, // bad line comment, ignored - {"\n//line foo:42 extra text\n line48", "dir/foo", 48}, // bad line comment, ignored - {"\n//line /bar:42\n line42", "/bar", 42}, - {"\n//line ./foo:42\n line42", "dir/foo", 42}, - {"\n//line a/b/c/File1.go:100\n line100", "dir/a/b/c/File1.go", 100}, + {"\n//line foo:42\n line42", filepath.Join("dir", "foo"), 42}, + {"\n //line foo:42\n line44", filepath.Join("dir", "foo"), 44}, // bad line comment, ignored + {"\n//line foo 42\n line46", filepath.Join("dir", "foo"), 46}, // bad line comment, ignored + {"\n//line foo:42 extra text\n line48", filepath.Join("dir", "foo"), 48}, // bad line comment, ignored + {"\n//line /bar:42\n line42", string(filepath.Separator) + "bar", 42}, + {"\n//line ./foo:42\n line42", filepath.Join("dir", "foo"), 42}, + {"\n//line a/b/c/File1.go:100\n line100", filepath.Join("dir", "a", "b", "c", "File1.go"), 100}, +} + +var winsegments = []segment{ + {"\n//line c:\\dir\\File1.go:100\n line100", "c:\\dir\\File1.go", 100}, } // Verify that comments of the form "//line filename:line" are interpreted correctly. func TestLineComments(t *testing.T) { + if runtime.GOOS == "windows" { + segments = append(segments, winsegments...) + } + // make source var src string for _, e := range segments { @@ -477,7 +488,7 @@ func TestLineComments(t *testing.T) { // verify scan var S Scanner - file := fset.AddFile("dir/TestLineComments", fset.Base(), len(src)) + file := fset.AddFile(filepath.Join("dir", "TestLineComments"), fset.Base(), len(src)) S.Init(file, []byte(src), nil, 0) for _, s := range segments { p, _, lit := S.Scan() diff --git a/libgo/go/go/typechecker/scope.go b/libgo/go/go/typechecker/scope.go index 114c93ea86e..bd24f4ca42d 100644 --- a/libgo/go/go/typechecker/scope.go +++ b/libgo/go/go/typechecker/scope.go @@ -2,15 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file implements scope support functions. +// DEPRECATED FILE - WILL GO AWAY EVENTUALLY. +// +// Scope handling is now done in go/parser. +// The functionality here is only present to +// keep the typechecker running for now. package typechecker -import ( - "fmt" - "go/ast" - "go/token" -) +import "go/ast" func (tc *typechecker) openScope() *ast.Scope { @@ -24,52 +24,25 @@ func (tc *typechecker) closeScope() { } -// objPos computes the source position of the declaration of an object name. -// Only required for error reporting, so doesn't have to be fast. -func objPos(obj *ast.Object) (pos token.Pos) { - switch d := obj.Decl.(type) { - case *ast.Field: - for _, n := range d.Names { - if n.Name == obj.Name { - return n.Pos() - } - } - case *ast.ValueSpec: - for _, n := range d.Names { - if n.Name == obj.Name { - return n.Pos() - } - } - case *ast.TypeSpec: - return d.Name.Pos() - case *ast.FuncDecl: - return d.Name.Pos() - } - if debug { - fmt.Printf("decl = %T\n", obj.Decl) - } - panic("unreachable") -} - - // declInScope declares an object of a given kind and name in scope and sets the object's Decl and N fields. // It returns the newly allocated object. If an object with the same name already exists in scope, an error // is reported and the object is not inserted. -// (Objects with _ name are always inserted into a scope without errors, but they cannot be found.) -func (tc *typechecker) declInScope(scope *ast.Scope, kind ast.Kind, name *ast.Ident, decl interface{}, n int) *ast.Object { +func (tc *typechecker) declInScope(scope *ast.Scope, kind ast.ObjKind, name *ast.Ident, decl interface{}, n int) *ast.Object { obj := ast.NewObj(kind, name.Name) obj.Decl = decl - obj.N = n + //obj.N = n name.Obj = obj - if alt := scope.Insert(obj); alt != obj { - tc.Errorf(name.Pos(), "%s already declared at %s", name.Name, objPos(alt)) + if name.Name != "_" { + if alt := scope.Insert(obj); alt != obj { + tc.Errorf(name.Pos(), "%s already declared at %s", name.Name, tc.fset.Position(alt.Pos()).String()) + } } return obj } // decl is the same as declInScope(tc.topScope, ...) -func (tc *typechecker) decl(kind ast.Kind, name *ast.Ident, decl interface{}, n int) *ast.Object { +func (tc *typechecker) decl(kind ast.ObjKind, name *ast.Ident, decl interface{}, n int) *ast.Object { return tc.declInScope(tc.topScope, kind, name, decl, n) } @@ -91,7 +64,7 @@ func (tc *typechecker) find(name *ast.Ident) (obj *ast.Object) { // findField returns the object with the given name if visible in the type's scope. // If no such object is found, an error is reported and a bad object is returned instead. -func (tc *typechecker) findField(typ *ast.Type, name *ast.Ident) (obj *ast.Object) { +func (tc *typechecker) findField(typ *Type, name *ast.Ident) (obj *ast.Object) { // TODO(gri) This is simplistic at the moment and ignores anonymous fields. obj = typ.Scope.Lookup(name.Name) if obj == nil { @@ -100,20 +73,3 @@ func (tc *typechecker) findField(typ *ast.Type, name *ast.Ident) (obj *ast.Objec } return } - - -// printScope prints the objects in a scope. -func printScope(scope *ast.Scope) { - fmt.Printf("scope %p {", scope) - if scope != nil && len(scope.Objects) > 0 { - fmt.Println() - for _, obj := range scope.Objects { - form := "void" - if obj.Type != nil { - form = obj.Type.Form.String() - } - fmt.Printf("\t%s\t%s\n", obj.Name, form) - } - } - fmt.Printf("}\n") -} diff --git a/libgo/go/go/typechecker/testdata/test0.go b/libgo/go/go/typechecker/testdata/test0.src similarity index 100% rename from libgo/go/go/typechecker/testdata/test0.go rename to libgo/go/go/typechecker/testdata/test0.src diff --git a/libgo/go/go/typechecker/testdata/test1.go b/libgo/go/go/typechecker/testdata/test1.src similarity index 83% rename from libgo/go/go/typechecker/testdata/test1.go rename to libgo/go/go/typechecker/testdata/test1.src index b0808ee7acb..b5531fb9f5b 100644 --- a/libgo/go/go/typechecker/testdata/test1.go +++ b/libgo/go/go/typechecker/testdata/test1.src @@ -7,7 +7,7 @@ package P1 const ( - c1 /* ERROR "missing initializer" */ + c1 = 0 c2 int = 0 c3, c4 = 0 ) diff --git a/libgo/go/go/typechecker/testdata/test3.go b/libgo/go/go/typechecker/testdata/test3.src similarity index 80% rename from libgo/go/go/typechecker/testdata/test3.go rename to libgo/go/go/typechecker/testdata/test3.src index ea35808a09f..2e1a9fa8f5b 100644 --- a/libgo/go/go/typechecker/testdata/test3.go +++ b/libgo/go/go/typechecker/testdata/test3.src @@ -27,8 +27,11 @@ func (T) m1 /* ERROR "already declared" */ () {} func (x *T) m2(u, x /* ERROR "already declared" */ int) {} func (x *T) m3(a, b, c int) (u, x /* ERROR "already declared" */ int) {} -func (T) _(x, x /* ERROR "already declared" */ int) {} -func (T) _() (x, x /* ERROR "already declared" */ int) {} +// The following are disabled for now because the typechecker +// in in the process of being rewritten and cannot handle them +// at the moment +//func (T) _(x, x /* "already declared" */ int) {} +//func (T) _() (x, x /* "already declared" */ int) {} //func (PT) _() {} diff --git a/libgo/go/go/typechecker/testdata/test4.go b/libgo/go/go/typechecker/testdata/test4.src similarity index 84% rename from libgo/go/go/typechecker/testdata/test4.go rename to libgo/go/go/typechecker/testdata/test4.src index bb9aee3ad3a..94d3558f9cd 100644 --- a/libgo/go/go/typechecker/testdata/test4.go +++ b/libgo/go/go/typechecker/testdata/test4.src @@ -7,5 +7,5 @@ package P4 const ( - c0 /* ERROR "missing initializer" */ + c0 = 0 ) diff --git a/libgo/go/go/typechecker/type.go b/libgo/go/go/typechecker/type.go new file mode 100644 index 00000000000..62b4e9d3e4a --- /dev/null +++ b/libgo/go/go/typechecker/type.go @@ -0,0 +1,125 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typechecker + +import "go/ast" + + +// A Type represents a Go type. +type Type struct { + Form Form + Obj *ast.Object // corresponding type name, or nil + Scope *ast.Scope // fields and methods, always present + N uint // basic type id, array length, number of function results, or channel direction + Key, Elt *Type // map key and array, pointer, slice, map or channel element + Params *ast.Scope // function (receiver, input and result) parameters, tuple expressions (results of function calls), or nil + Expr ast.Expr // corresponding AST expression +} + + +// NewType creates a new type of a given form. +func NewType(form Form) *Type { + return &Type{Form: form, Scope: ast.NewScope(nil)} +} + + +// Form describes the form of a type. +type Form int + +// The list of possible type forms. +const ( + BadType Form = iota // for error handling + Unresolved // type not fully setup + Basic + Array + Struct + Pointer + Function + Method + Interface + Slice + Map + Channel + Tuple +) + + +var formStrings = [...]string{ + BadType: "badType", + Unresolved: "unresolved", + Basic: "basic", + Array: "array", + Struct: "struct", + Pointer: "pointer", + Function: "function", + Method: "method", + Interface: "interface", + Slice: "slice", + Map: "map", + Channel: "channel", + Tuple: "tuple", +} + + +func (form Form) String() string { return formStrings[form] } + + +// The list of basic type id's. +const ( + Bool = iota + Byte + Uint + Int + Float + Complex + Uintptr + String + + Uint8 + Uint16 + Uint32 + Uint64 + + Int8 + Int16 + Int32 + Int64 + + Float32 + Float64 + + Complex64 + Complex128 + + // TODO(gri) ideal types are missing +) + + +var BasicTypes = map[uint]string{ + Bool: "bool", + Byte: "byte", + Uint: "uint", + Int: "int", + Float: "float", + Complex: "complex", + Uintptr: "uintptr", + String: "string", + + Uint8: "uint8", + Uint16: "uint16", + Uint32: "uint32", + Uint64: "uint64", + + Int8: "int8", + Int16: "int16", + Int32: "int32", + Int64: "int64", + + Float32: "float32", + Float64: "float64", + + Complex64: "complex64", + Complex128: "complex128", +} diff --git a/libgo/go/go/typechecker/typechecker.go b/libgo/go/go/typechecker/typechecker.go index e9aefa2402b..4fc5647f0d5 100644 --- a/libgo/go/go/typechecker/typechecker.go +++ b/libgo/go/go/typechecker/typechecker.go @@ -65,6 +65,7 @@ type typechecker struct { fset *token.FileSet scanner.ErrorVector importer Importer + globals []*ast.Object // list of global objects topScope *ast.Scope // current top-most scope cyclemap map[*ast.Object]bool // for cycle detection iota int // current value of iota @@ -94,7 +95,7 @@ phase 1: declare all global objects; also collect all function and method declar - report global double declarations phase 2: bind methods to their receiver base types - - received base types must be declared in the package, thus for + - receiver base types must be declared in the package, thus for each method a corresponding (unresolved) type must exist - report method double declarations and errors with base types @@ -142,16 +143,16 @@ func (tc *typechecker) checkPackage(pkg *ast.Package) { } // phase 3: resolve all global objects - // (note that objects with _ name are also in the scope) tc.cyclemap = make(map[*ast.Object]bool) - for _, obj := range tc.topScope.Objects { + for _, obj := range tc.globals { tc.resolve(obj) } assert(len(tc.cyclemap) == 0) // 4: sequentially typecheck function and method bodies for _, f := range funcs { - tc.checkBlock(f.Body.List, f.Name.Obj.Type) + ftype, _ := f.Name.Obj.Type.(*Type) + tc.checkBlock(f.Body.List, ftype) } pkg.Scope = tc.topScope @@ -183,11 +184,11 @@ func (tc *typechecker) declGlobal(global ast.Decl) { } } for _, name := range s.Names { - tc.decl(ast.Con, name, s, iota) + tc.globals = append(tc.globals, tc.decl(ast.Con, name, s, iota)) } case token.VAR: for _, name := range s.Names { - tc.decl(ast.Var, name, s, 0) + tc.globals = append(tc.globals, tc.decl(ast.Var, name, s, 0)) } default: panic("unreachable") @@ -196,9 +197,10 @@ func (tc *typechecker) declGlobal(global ast.Decl) { iota++ case *ast.TypeSpec: obj := tc.decl(ast.Typ, s.Name, s, 0) + tc.globals = append(tc.globals, obj) // give all type objects an unresolved type so // that we can collect methods in the type scope - typ := ast.NewType(ast.Unresolved) + typ := NewType(Unresolved) obj.Type = typ typ.Obj = obj default: @@ -208,7 +210,7 @@ func (tc *typechecker) declGlobal(global ast.Decl) { case *ast.FuncDecl: if d.Recv == nil { - tc.decl(ast.Fun, d.Name, d, 0) + tc.globals = append(tc.globals, tc.decl(ast.Fun, d.Name, d, 0)) } default: @@ -239,8 +241,8 @@ func (tc *typechecker) bindMethod(method *ast.FuncDecl) { } else if obj.Kind != ast.Typ { tc.Errorf(name.Pos(), "invalid receiver: %s is not a type", name.Name) } else { - typ := obj.Type - assert(typ.Form == ast.Unresolved) + typ := obj.Type.(*Type) + assert(typ.Form == Unresolved) scope = typ.Scope } } @@ -261,7 +263,7 @@ func (tc *typechecker) bindMethod(method *ast.FuncDecl) { func (tc *typechecker) resolve(obj *ast.Object) { // check for declaration cycles if tc.cyclemap[obj] { - tc.Errorf(objPos(obj), "illegal cycle in declaration of %s", obj.Name) + tc.Errorf(obj.Pos(), "illegal cycle in declaration of %s", obj.Name) obj.Kind = ast.Bad return } @@ -271,7 +273,7 @@ func (tc *typechecker) resolve(obj *ast.Object) { }() // resolve non-type objects - typ := obj.Type + typ, _ := obj.Type.(*Type) if typ == nil { switch obj.Kind { case ast.Bad: @@ -282,12 +284,12 @@ func (tc *typechecker) resolve(obj *ast.Object) { case ast.Var: tc.declVar(obj) - //obj.Type = tc.typeFor(nil, obj.Decl.(*ast.ValueSpec).Type, false) + obj.Type = tc.typeFor(nil, obj.Decl.(*ast.ValueSpec).Type, false) case ast.Fun: - obj.Type = ast.NewType(ast.Function) + obj.Type = NewType(Function) t := obj.Decl.(*ast.FuncDecl).Type - tc.declSignature(obj.Type, nil, t.Params, t.Results) + tc.declSignature(obj.Type.(*Type), nil, t.Params, t.Results) default: // type objects have non-nil types when resolve is called @@ -300,32 +302,34 @@ func (tc *typechecker) resolve(obj *ast.Object) { } // resolve type objects - if typ.Form == ast.Unresolved { + if typ.Form == Unresolved { tc.typeFor(typ, typ.Obj.Decl.(*ast.TypeSpec).Type, false) // provide types for all methods for _, obj := range typ.Scope.Objects { if obj.Kind == ast.Fun { assert(obj.Type == nil) - obj.Type = ast.NewType(ast.Method) + obj.Type = NewType(Method) f := obj.Decl.(*ast.FuncDecl) t := f.Type - tc.declSignature(obj.Type, f.Recv, t.Params, t.Results) + tc.declSignature(obj.Type.(*Type), f.Recv, t.Params, t.Results) } } } } -func (tc *typechecker) checkBlock(body []ast.Stmt, ftype *ast.Type) { +func (tc *typechecker) checkBlock(body []ast.Stmt, ftype *Type) { tc.openScope() defer tc.closeScope() // inject function/method parameters into block scope, if any if ftype != nil { for _, par := range ftype.Params.Objects { - obj := tc.topScope.Insert(par) - assert(obj == par) // ftype has no double declarations + if par.Name != "_" { + obj := tc.topScope.Insert(par) + assert(obj == par) // ftype has no double declarations + } } } @@ -362,8 +366,8 @@ func (tc *typechecker) declFields(scope *ast.Scope, fields *ast.FieldList, ref b } -func (tc *typechecker) declSignature(typ *ast.Type, recv, params, results *ast.FieldList) { - assert((typ.Form == ast.Method) == (recv != nil)) +func (tc *typechecker) declSignature(typ *Type, recv, params, results *ast.FieldList) { + assert((typ.Form == Method) == (recv != nil)) typ.Params = ast.NewScope(nil) tc.declFields(typ.Params, recv, true) tc.declFields(typ.Params, params, true) @@ -371,7 +375,7 @@ func (tc *typechecker) declSignature(typ *ast.Type, recv, params, results *ast.F } -func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Type) { +func (tc *typechecker) typeFor(def *Type, x ast.Expr, ref bool) (typ *Type) { x = unparen(x) // type name @@ -381,10 +385,10 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty if obj.Kind != ast.Typ { tc.Errorf(t.Pos(), "%s is not a type", t.Name) if def == nil { - typ = ast.NewType(ast.BadType) + typ = NewType(BadType) } else { typ = def - typ.Form = ast.BadType + typ.Form = BadType } typ.Expr = x return @@ -393,7 +397,7 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty if !ref { tc.resolve(obj) // check for cycles even if type resolved } - typ = obj.Type + typ = obj.Type.(*Type) if def != nil { // new type declaration: copy type structure @@ -410,7 +414,7 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty // type literal typ = def if typ == nil { - typ = ast.NewType(ast.BadType) + typ = NewType(BadType) } typ.Expr = x @@ -419,42 +423,42 @@ func (tc *typechecker) typeFor(def *ast.Type, x ast.Expr, ref bool) (typ *ast.Ty if debug { fmt.Println("qualified identifier unimplemented") } - typ.Form = ast.BadType + typ.Form = BadType case *ast.StarExpr: - typ.Form = ast.Pointer + typ.Form = Pointer typ.Elt = tc.typeFor(nil, t.X, true) case *ast.ArrayType: if t.Len != nil { - typ.Form = ast.Array + typ.Form = Array // TODO(gri) compute the real length // (this may call resolve recursively) (*typ).N = 42 } else { - typ.Form = ast.Slice + typ.Form = Slice } typ.Elt = tc.typeFor(nil, t.Elt, t.Len == nil) case *ast.StructType: - typ.Form = ast.Struct + typ.Form = Struct tc.declFields(typ.Scope, t.Fields, false) case *ast.FuncType: - typ.Form = ast.Function + typ.Form = Function tc.declSignature(typ, nil, t.Params, t.Results) case *ast.InterfaceType: - typ.Form = ast.Interface + typ.Form = Interface tc.declFields(typ.Scope, t.Methods, true) case *ast.MapType: - typ.Form = ast.Map + typ.Form = Map typ.Key = tc.typeFor(nil, t.Key, true) typ.Elt = tc.typeFor(nil, t.Value, true) case *ast.ChanType: - typ.Form = ast.Channel + typ.Form = Channel typ.N = uint(t.Dir) typ.Elt = tc.typeFor(nil, t.Value, true) diff --git a/libgo/go/go/typechecker/typechecker_test.go b/libgo/go/go/typechecker/typechecker_test.go index 33f4a6223ff..3988ff1680b 100644 --- a/libgo/go/go/typechecker/typechecker_test.go +++ b/libgo/go/go/typechecker/typechecker_test.go @@ -93,7 +93,7 @@ func expectedErrors(t *testing.T, pkg *ast.Package) (list scanner.ErrorList) { func testFilter(f *os.FileInfo) bool { - return strings.HasSuffix(f.Name, ".go") && f.Name[0] != '.' + return strings.HasSuffix(f.Name, ".src") && f.Name[0] != '.' } diff --git a/libgo/go/go/typechecker/universe.go b/libgo/go/go/typechecker/universe.go index db950737f39..cf4434993e1 100644 --- a/libgo/go/go/typechecker/universe.go +++ b/libgo/go/go/typechecker/universe.go @@ -24,8 +24,8 @@ func init() { Universe = ast.NewScope(nil) // basic types - for n, name := range ast.BasicTypes { - typ := ast.NewType(ast.Basic) + for n, name := range BasicTypes { + typ := NewType(Basic) typ.N = n obj := ast.NewObj(ast.Typ, name) obj.Type = typ diff --git a/libgo/go/gob/codec_test.go b/libgo/go/gob/codec_test.go index c822d6863ac..28042ccaa3a 100644 --- a/libgo/go/gob/codec_test.go +++ b/libgo/go/gob/codec_test.go @@ -50,7 +50,7 @@ func testError(t *testing.T) { func TestUintCodec(t *testing.T) { defer testError(t) b := new(bytes.Buffer) - encState := newEncoderState(nil, b) + encState := newEncoderState(b) for _, tt := range encodeT { b.Reset() encState.encodeUint(tt.x) @@ -58,7 +58,7 @@ func TestUintCodec(t *testing.T) { t.Errorf("encodeUint: %#x encode: expected % x got % x", tt.x, tt.b, b.Bytes()) } } - decState := newDecodeState(nil, b) + decState := newDecodeState(b) for u := uint64(0); ; u = (u + 1) * 7 { b.Reset() encState.encodeUint(u) @@ -75,9 +75,9 @@ func TestUintCodec(t *testing.T) { func verifyInt(i int64, t *testing.T) { defer testError(t) var b = new(bytes.Buffer) - encState := newEncoderState(nil, b) + encState := newEncoderState(b) encState.encodeInt(i) - decState := newDecodeState(nil, b) + decState := newDecodeState(b) decState.buf = make([]byte, 8) j := decState.decodeInt() if i != j { @@ -111,9 +111,16 @@ var complexResult = []byte{0x07, 0xFE, 0x31, 0x40, 0xFE, 0x33, 0x40} // The result of encoding "hello" with field number 7 var bytesResult = []byte{0x07, 0x05, 'h', 'e', 'l', 'l', 'o'} -func newencoderState(b *bytes.Buffer) *encoderState { +func newDecodeState(buf *bytes.Buffer) *decoderState { + d := new(decoderState) + d.b = buf + d.buf = make([]byte, uint64Size) + return d +} + +func newEncoderState(b *bytes.Buffer) *encoderState { b.Reset() - state := newEncoderState(nil, b) + state := &encoderState{enc: nil, b: b} state.fieldnum = -1 return state } @@ -127,7 +134,7 @@ func TestScalarEncInstructions(t *testing.T) { { data := struct{ a bool }{true} instr := &encInstr{encBool, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(boolResult, b.Bytes()) { t.Errorf("bool enc instructions: expected % x got % x", boolResult, b.Bytes()) @@ -139,7 +146,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a int }{17} instr := &encInstr{encInt, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(signedResult, b.Bytes()) { t.Errorf("int enc instructions: expected % x got % x", signedResult, b.Bytes()) @@ -151,7 +158,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a uint }{17} instr := &encInstr{encUint, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(unsignedResult, b.Bytes()) { t.Errorf("uint enc instructions: expected % x got % x", unsignedResult, b.Bytes()) @@ -163,7 +170,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a int8 }{17} instr := &encInstr{encInt8, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(signedResult, b.Bytes()) { t.Errorf("int8 enc instructions: expected % x got % x", signedResult, b.Bytes()) @@ -175,7 +182,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a uint8 }{17} instr := &encInstr{encUint8, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(unsignedResult, b.Bytes()) { t.Errorf("uint8 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) @@ -187,7 +194,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a int16 }{17} instr := &encInstr{encInt16, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(signedResult, b.Bytes()) { t.Errorf("int16 enc instructions: expected % x got % x", signedResult, b.Bytes()) @@ -199,7 +206,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a uint16 }{17} instr := &encInstr{encUint16, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(unsignedResult, b.Bytes()) { t.Errorf("uint16 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) @@ -211,7 +218,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a int32 }{17} instr := &encInstr{encInt32, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(signedResult, b.Bytes()) { t.Errorf("int32 enc instructions: expected % x got % x", signedResult, b.Bytes()) @@ -223,7 +230,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a uint32 }{17} instr := &encInstr{encUint32, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(unsignedResult, b.Bytes()) { t.Errorf("uint32 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) @@ -235,7 +242,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a int64 }{17} instr := &encInstr{encInt64, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(signedResult, b.Bytes()) { t.Errorf("int64 enc instructions: expected % x got % x", signedResult, b.Bytes()) @@ -247,7 +254,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a uint64 }{17} instr := &encInstr{encUint64, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(unsignedResult, b.Bytes()) { t.Errorf("uint64 enc instructions: expected % x got % x", unsignedResult, b.Bytes()) @@ -259,7 +266,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a float32 }{17} instr := &encInstr{encFloat32, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(floatResult, b.Bytes()) { t.Errorf("float32 enc instructions: expected % x got % x", floatResult, b.Bytes()) @@ -271,7 +278,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a float64 }{17} instr := &encInstr{encFloat64, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(floatResult, b.Bytes()) { t.Errorf("float64 enc instructions: expected % x got % x", floatResult, b.Bytes()) @@ -283,7 +290,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a []byte }{[]byte("hello")} instr := &encInstr{encUint8Array, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(bytesResult, b.Bytes()) { t.Errorf("bytes enc instructions: expected % x got % x", bytesResult, b.Bytes()) @@ -295,7 +302,7 @@ func TestScalarEncInstructions(t *testing.T) { b.Reset() data := struct{ a string }{"hello"} instr := &encInstr{encString, 6, 0, 0} - state := newencoderState(b) + state := newEncoderState(b) instr.op(instr, state, unsafe.Pointer(&data)) if !bytes.Equal(bytesResult, b.Bytes()) { t.Errorf("string enc instructions: expected % x got % x", bytesResult, b.Bytes()) @@ -303,7 +310,7 @@ func TestScalarEncInstructions(t *testing.T) { } } -func execDec(typ string, instr *decInstr, state *decodeState, t *testing.T, p unsafe.Pointer) { +func execDec(typ string, instr *decInstr, state *decoderState, t *testing.T, p unsafe.Pointer) { defer testError(t) v := int(state.decodeUint()) if v+state.fieldnum != 6 { @@ -313,9 +320,9 @@ func execDec(typ string, instr *decInstr, state *decodeState, t *testing.T, p un state.fieldnum = 6 } -func newDecodeStateFromData(data []byte) *decodeState { +func newDecodeStateFromData(data []byte) *decoderState { b := bytes.NewBuffer(data) - state := newDecodeState(nil, b) + state := newDecodeState(b) state.fieldnum = -1 return state } @@ -997,9 +1004,9 @@ func TestInvalidField(t *testing.T) { var bad0 Bad0 bad0.CH = make(chan int) b := new(bytes.Buffer) - var nilEncoder *Encoder - err := nilEncoder.encode(b, reflect.NewValue(&bad0), userType(reflect.Typeof(&bad0))) - if err == nil { + dummyEncoder := new(Encoder) // sufficient for this purpose. + dummyEncoder.encode(b, reflect.NewValue(&bad0), userType(reflect.Typeof(&bad0))) + if err := dummyEncoder.err; err == nil { t.Error("expected error; got none") } else if strings.Index(err.String(), "type") < 0 { t.Error("expected type error; got", err) diff --git a/libgo/go/gob/decode.go b/libgo/go/gob/decode.go index 8f599e10041..f8159d4ea32 100644 --- a/libgo/go/gob/decode.go +++ b/libgo/go/gob/decode.go @@ -13,38 +13,47 @@ import ( "math" "os" "reflect" - "unicode" "unsafe" - "utf8" ) var ( errBadUint = os.ErrorString("gob: encoded unsigned integer out of range") errBadType = os.ErrorString("gob: unknown type id or corrupted data") - errRange = os.ErrorString("gob: internal error: field numbers out of bounds") + errRange = os.ErrorString("gob: bad data: field numbers out of bounds") ) -// The execution state of an instance of the decoder. A new state +// decoderState is the execution state of an instance of the decoder. A new state // is created for nested objects. -type decodeState struct { +type decoderState struct { dec *Decoder // The buffer is stored with an extra indirection because it may be replaced // if we load a type during decode (when reading an interface value). b *bytes.Buffer fieldnum int // the last field number read. buf []byte + next *decoderState // for free list } // We pass the bytes.Buffer separately for easier testing of the infrastructure // without requiring a full Decoder. -func newDecodeState(dec *Decoder, buf *bytes.Buffer) *decodeState { - d := new(decodeState) - d.dec = dec +func (dec *Decoder) newDecoderState(buf *bytes.Buffer) *decoderState { + d := dec.freeList + if d == nil { + d = new(decoderState) + d.dec = dec + d.buf = make([]byte, uint64Size) + } else { + dec.freeList = d.next + } d.b = buf - d.buf = make([]byte, uint64Size) return d } +func (dec *Decoder) freeDecoderState(d *decoderState) { + d.next = dec.freeList + dec.freeList = d +} + func overflow(name string) os.ErrorString { return os.ErrorString(`value for "` + name + `" out of range`) } @@ -85,7 +94,7 @@ func decodeUintReader(r io.Reader, buf []byte) (x uint64, width int, err os.Erro // decodeUint reads an encoded unsigned integer from state.r. // Does not check for overflow. -func (state *decodeState) decodeUint() (x uint64) { +func (state *decoderState) decodeUint() (x uint64) { b, err := state.b.ReadByte() if err != nil { error(err) @@ -112,7 +121,7 @@ func (state *decodeState) decodeUint() (x uint64) { // decodeInt reads an encoded signed integer from state.r. // Does not check for overflow. -func (state *decodeState) decodeInt() int64 { +func (state *decoderState) decodeInt() int64 { x := state.decodeUint() if x&1 != 0 { return ^int64(x >> 1) @@ -120,7 +129,8 @@ func (state *decodeState) decodeInt() int64 { return int64(x >> 1) } -type decOp func(i *decInstr, state *decodeState, p unsafe.Pointer) +// decOp is the signature of a decoding operator for a given type. +type decOp func(i *decInstr, state *decoderState, p unsafe.Pointer) // The 'instructions' of the decoding machine type decInstr struct { @@ -150,26 +160,31 @@ func decIndirect(p unsafe.Pointer, indir int) unsafe.Pointer { return p } -func ignoreUint(i *decInstr, state *decodeState, p unsafe.Pointer) { +// ignoreUint discards a uint value with no destination. +func ignoreUint(i *decInstr, state *decoderState, p unsafe.Pointer) { state.decodeUint() } -func ignoreTwoUints(i *decInstr, state *decodeState, p unsafe.Pointer) { +// ignoreTwoUints discards a uint value with no destination. It's used to skip +// complex values. +func ignoreTwoUints(i *decInstr, state *decoderState, p unsafe.Pointer) { state.decodeUint() state.decodeUint() } -func decBool(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decBool decodes a uiint and stores it as a boolean through p. +func decBool(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(bool)) } p = *(*unsafe.Pointer)(p) } - *(*bool)(p) = state.decodeInt() != 0 + *(*bool)(p) = state.decodeUint() != 0 } -func decInt8(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decInt8 decodes an integer and stores it as an int8 through p. +func decInt8(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int8)) @@ -184,7 +199,8 @@ func decInt8(i *decInstr, state *decodeState, p unsafe.Pointer) { } } -func decUint8(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decUint8 decodes an unsigned integer and stores it as a uint8 through p. +func decUint8(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint8)) @@ -199,7 +215,8 @@ func decUint8(i *decInstr, state *decodeState, p unsafe.Pointer) { } } -func decInt16(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decInt16 decodes an integer and stores it as an int16 through p. +func decInt16(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int16)) @@ -214,7 +231,8 @@ func decInt16(i *decInstr, state *decodeState, p unsafe.Pointer) { } } -func decUint16(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decUint16 decodes an unsigned integer and stores it as a uint16 through p. +func decUint16(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint16)) @@ -229,7 +247,8 @@ func decUint16(i *decInstr, state *decodeState, p unsafe.Pointer) { } } -func decInt32(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decInt32 decodes an integer and stores it as an int32 through p. +func decInt32(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int32)) @@ -244,7 +263,8 @@ func decInt32(i *decInstr, state *decodeState, p unsafe.Pointer) { } } -func decUint32(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decUint32 decodes an unsigned integer and stores it as a uint32 through p. +func decUint32(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint32)) @@ -259,7 +279,8 @@ func decUint32(i *decInstr, state *decodeState, p unsafe.Pointer) { } } -func decInt64(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decInt64 decodes an integer and stores it as an int64 through p. +func decInt64(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(int64)) @@ -269,7 +290,8 @@ func decInt64(i *decInstr, state *decodeState, p unsafe.Pointer) { *(*int64)(p) = int64(state.decodeInt()) } -func decUint64(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decUint64 decodes an unsigned integer and stores it as a uint64 through p. +func decUint64(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(uint64)) @@ -294,7 +316,9 @@ func floatFromBits(u uint64) float64 { return math.Float64frombits(v) } -func storeFloat32(i *decInstr, state *decodeState, p unsafe.Pointer) { +// storeFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point +// number, and stores it through p. It's a helper function for float32 and complex64. +func storeFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) { v := floatFromBits(state.decodeUint()) av := v if av < 0 { @@ -308,7 +332,9 @@ func storeFloat32(i *decInstr, state *decodeState, p unsafe.Pointer) { } } -func decFloat32(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decFloat32 decodes an unsigned integer, treats it as a 32-bit floating-point +// number, and stores it through p. +func decFloat32(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(float32)) @@ -318,7 +344,9 @@ func decFloat32(i *decInstr, state *decodeState, p unsafe.Pointer) { storeFloat32(i, state, p) } -func decFloat64(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decFloat64 decodes an unsigned integer, treats it as a 64-bit floating-point +// number, and stores it through p. +func decFloat64(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(float64)) @@ -328,8 +356,10 @@ func decFloat64(i *decInstr, state *decodeState, p unsafe.Pointer) { *(*float64)(p) = floatFromBits(uint64(state.decodeUint())) } -// Complex numbers are just a pair of floating-point numbers, real part first. -func decComplex64(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decComplex64 decodes a pair of unsigned integers, treats them as a +// pair of floating point numbers, and stores them as a complex64 through p. +// The real part comes first. +func decComplex64(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex64)) @@ -340,7 +370,10 @@ func decComplex64(i *decInstr, state *decodeState, p unsafe.Pointer) { storeFloat32(i, state, unsafe.Pointer(uintptr(p)+uintptr(unsafe.Sizeof(float32(0))))) } -func decComplex128(i *decInstr, state *decodeState, p unsafe.Pointer) { +// decComplex128 decodes a pair of unsigned integers, treats them as a +// pair of floating point numbers, and stores them as a complex128 through p. +// The real part comes first. +func decComplex128(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new(complex128)) @@ -352,8 +385,10 @@ func decComplex128(i *decInstr, state *decodeState, p unsafe.Pointer) { *(*complex128)(p) = complex(real, imag) } +// decUint8Array decodes byte array and stores through p a slice header +// describing the data. // uint8 arrays are encoded as an unsigned count followed by the raw bytes. -func decUint8Array(i *decInstr, state *decodeState, p unsafe.Pointer) { +func decUint8Array(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new([]uint8)) @@ -365,8 +400,10 @@ func decUint8Array(i *decInstr, state *decodeState, p unsafe.Pointer) { *(*[]uint8)(p) = b } +// decString decodes byte array and stores through p a string header +// describing the data. // Strings are encoded as an unsigned count followed by the raw bytes. -func decString(i *decInstr, state *decodeState, p unsafe.Pointer) { +func decString(i *decInstr, state *decoderState, p unsafe.Pointer) { if i.indir > 0 { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe.Pointer(new([]byte)) @@ -375,10 +412,18 @@ func decString(i *decInstr, state *decodeState, p unsafe.Pointer) { } b := make([]byte, state.decodeUint()) state.b.Read(b) - *(*string)(p) = string(b) + // It would be a shame to do the obvious thing here, + // *(*string)(p) = string(b) + // because we've already allocated the storage and this would + // allocate again and copy. So we do this ugly hack, which is even + // even more unsafe than it looks as it depends the memory + // representation of a string matching the beginning of the memory + // representation of a byte slice (a byte slice is longer). + *(*string)(p) = *(*string)(unsafe.Pointer(&b)) } -func ignoreUint8Array(i *decInstr, state *decodeState, p unsafe.Pointer) { +// ignoreUint8Array skips over the data for a byte slice value with no destination. +func ignoreUint8Array(i *decInstr, state *decoderState, p unsafe.Pointer) { b := make([]byte, state.decodeUint()) state.b.Read(b) } @@ -409,9 +454,16 @@ func allocate(rtyp reflect.Type, p uintptr, indir int) uintptr { return *(*uintptr)(up) } +// decodeSingle decodes a top-level value that is not a struct and stores it through p. +// Such values are preceded by a zero, making them have the memory layout of a +// struct field (although with an illegal field number). func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, p uintptr) (err os.Error) { - p = allocate(ut.base, p, ut.indir) - state := newDecodeState(dec, &dec.buf) + indir := ut.indir + if ut.isGobDecoder { + indir = int(ut.decIndir) + } + p = allocate(ut.base, p, indir) + state := dec.newDecoderState(&dec.buf) state.fieldnum = singletonField basep := p delta := int(state.decodeUint()) @@ -424,16 +476,18 @@ func (dec *Decoder) decodeSingle(engine *decEngine, ut *userTypeInfo, p uintptr) ptr = decIndirect(ptr, instr.indir) } instr.op(instr, state, ptr) + dec.freeDecoderState(state) return nil } +// decodeSingle decodes a top-level struct and stores it through p. // Indir is for the value, not the type. At the time of the call it may // differ from ut.indir, which was computed when the engine was built. // This state cannot arise for decodeSingle, which is called directly // from the user's value, not from the innards of an engine. -func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p uintptr, indir int) (err os.Error) { +func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p uintptr, indir int) { p = allocate(ut.base.(*reflect.StructType), p, indir) - state := newDecodeState(dec, &dec.buf) + state := dec.newDecoderState(&dec.buf) state.fieldnum = -1 basep := p for state.b.Len() > 0 { @@ -457,11 +511,12 @@ func (dec *Decoder) decodeStruct(engine *decEngine, ut *userTypeInfo, p uintptr, instr.op(instr, state, p) state.fieldnum = fieldnum } - return nil + dec.freeDecoderState(state) } -func (dec *Decoder) ignoreStruct(engine *decEngine) (err os.Error) { - state := newDecodeState(dec, &dec.buf) +// ignoreStruct discards the data for a struct with no destination. +func (dec *Decoder) ignoreStruct(engine *decEngine) { + state := dec.newDecoderState(&dec.buf) state.fieldnum = -1 for state.b.Len() > 0 { delta := int(state.decodeUint()) @@ -479,11 +534,13 @@ func (dec *Decoder) ignoreStruct(engine *decEngine) (err os.Error) { instr.op(instr, state, unsafe.Pointer(nil)) state.fieldnum = fieldnum } - return nil + dec.freeDecoderState(state) } -func (dec *Decoder) ignoreSingle(engine *decEngine) (err os.Error) { - state := newDecodeState(dec, &dec.buf) +// ignoreSingle discards the data for a top-level non-struct value with no +// destination. It's used when calling Decode with a nil value. +func (dec *Decoder) ignoreSingle(engine *decEngine) { + state := dec.newDecoderState(&dec.buf) state.fieldnum = singletonField delta := int(state.decodeUint()) if delta != 0 { @@ -491,10 +548,11 @@ func (dec *Decoder) ignoreSingle(engine *decEngine) (err os.Error) { } instr := &engine.instr[singletonField] instr.op(instr, state, unsafe.Pointer(nil)) - return nil + dec.freeDecoderState(state) } -func (dec *Decoder) decodeArrayHelper(state *decodeState, p uintptr, elemOp decOp, elemWid uintptr, length, elemIndir int, ovfl os.ErrorString) { +// decodeArrayHelper does the work for decoding arrays and slices. +func (dec *Decoder) decodeArrayHelper(state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, elemIndir int, ovfl os.ErrorString) { instr := &decInstr{elemOp, 0, elemIndir, 0, ovfl} for i := 0; i < length; i++ { up := unsafe.Pointer(p) @@ -506,7 +564,10 @@ func (dec *Decoder) decodeArrayHelper(state *decodeState, p uintptr, elemOp decO } } -func (dec *Decoder) decodeArray(atyp *reflect.ArrayType, state *decodeState, p uintptr, elemOp decOp, elemWid uintptr, length, indir, elemIndir int, ovfl os.ErrorString) { +// decodeArray decodes an array and stores it through p, that is, p points to the zeroth element. +// The length is an unsigned integer preceding the elements. Even though the length is redundant +// (it's part of the type), it's a useful check and is included in the encoding. +func (dec *Decoder) decodeArray(atyp *reflect.ArrayType, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, length, indir, elemIndir int, ovfl os.ErrorString) { if indir > 0 { p = allocate(atyp, p, 1) // All but the last level has been allocated by dec.Indirect } @@ -516,7 +577,9 @@ func (dec *Decoder) decodeArray(atyp *reflect.ArrayType, state *decodeState, p u dec.decodeArrayHelper(state, p, elemOp, elemWid, length, elemIndir, ovfl) } -func decodeIntoValue(state *decodeState, op decOp, indir int, v reflect.Value, ovfl os.ErrorString) reflect.Value { +// decodeIntoValue is a helper for map decoding. Since maps are decoded using reflection, +// unlike the other items we can't use a pointer directly. +func decodeIntoValue(state *decoderState, op decOp, indir int, v reflect.Value, ovfl os.ErrorString) reflect.Value { instr := &decInstr{op, 0, indir, 0, ovfl} up := unsafe.Pointer(v.UnsafeAddr()) if indir > 1 { @@ -526,7 +589,11 @@ func decodeIntoValue(state *decodeState, op decOp, indir int, v reflect.Value, o return v } -func (dec *Decoder) decodeMap(mtyp *reflect.MapType, state *decodeState, p uintptr, keyOp, elemOp decOp, indir, keyIndir, elemIndir int, ovfl os.ErrorString) { +// decodeMap decodes a map and stores its header through p. +// Maps are encoded as a length followed by key:value pairs. +// Because the internals of maps are not visible to us, we must +// use reflection rather than pointer magic. +func (dec *Decoder) decodeMap(mtyp *reflect.MapType, state *decoderState, p uintptr, keyOp, elemOp decOp, indir, keyIndir, elemIndir int, ovfl os.ErrorString) { if indir > 0 { p = allocate(mtyp, p, 1) // All but the last level has been allocated by dec.Indirect } @@ -538,7 +605,7 @@ func (dec *Decoder) decodeMap(mtyp *reflect.MapType, state *decodeState, p uintp // Maps cannot be accessed by moving addresses around the way // that slices etc. can. We must recover a full reflection value for // the iteration. - v := reflect.NewValue(unsafe.Unreflect(mtyp, unsafe.Pointer((p)))).(*reflect.MapValue) + v := reflect.NewValue(unsafe.Unreflect(mtyp, unsafe.Pointer(p))).(*reflect.MapValue) n := int(state.decodeUint()) for i := 0; i < n; i++ { key := decodeIntoValue(state, keyOp, keyIndir, reflect.MakeZero(mtyp.Key()), ovfl) @@ -547,21 +614,24 @@ func (dec *Decoder) decodeMap(mtyp *reflect.MapType, state *decodeState, p uintp } } -func (dec *Decoder) ignoreArrayHelper(state *decodeState, elemOp decOp, length int) { +// ignoreArrayHelper does the work for discarding arrays and slices. +func (dec *Decoder) ignoreArrayHelper(state *decoderState, elemOp decOp, length int) { instr := &decInstr{elemOp, 0, 0, 0, os.ErrorString("no error")} for i := 0; i < length; i++ { elemOp(instr, state, nil) } } -func (dec *Decoder) ignoreArray(state *decodeState, elemOp decOp, length int) { +// ignoreArray discards the data for an array value with no destination. +func (dec *Decoder) ignoreArray(state *decoderState, elemOp decOp, length int) { if n := state.decodeUint(); n != uint64(length) { errorf("gob: length mismatch in ignoreArray") } dec.ignoreArrayHelper(state, elemOp, length) } -func (dec *Decoder) ignoreMap(state *decodeState, keyOp, elemOp decOp) { +// ignoreMap discards the data for a map value with no destination. +func (dec *Decoder) ignoreMap(state *decoderState, keyOp, elemOp decOp) { n := int(state.decodeUint()) keyInstr := &decInstr{keyOp, 0, 0, 0, os.ErrorString("no error")} elemInstr := &decInstr{elemOp, 0, 0, 0, os.ErrorString("no error")} @@ -571,7 +641,9 @@ func (dec *Decoder) ignoreMap(state *decodeState, keyOp, elemOp decOp) { } } -func (dec *Decoder) decodeSlice(atyp *reflect.SliceType, state *decodeState, p uintptr, elemOp decOp, elemWid uintptr, indir, elemIndir int, ovfl os.ErrorString) { +// decodeSlice decodes a slice and stores the slice header through p. +// Slices are encoded as an unsigned length followed by the elements. +func (dec *Decoder) decodeSlice(atyp *reflect.SliceType, state *decoderState, p uintptr, elemOp decOp, elemWid uintptr, indir, elemIndir int, ovfl os.ErrorString) { n := int(uintptr(state.decodeUint())) if indir > 0 { up := unsafe.Pointer(p) @@ -590,7 +662,8 @@ func (dec *Decoder) decodeSlice(atyp *reflect.SliceType, state *decodeState, p u dec.decodeArrayHelper(state, hdrp.Data, elemOp, elemWid, n, elemIndir, ovfl) } -func (dec *Decoder) ignoreSlice(state *decodeState, elemOp decOp) { +// ignoreSlice skips over the data for a slice value with no destination. +func (dec *Decoder) ignoreSlice(state *decoderState, elemOp decOp) { dec.ignoreArrayHelper(state, elemOp, int(state.decodeUint())) } @@ -609,9 +682,10 @@ func setInterfaceValue(ivalue *reflect.InterfaceValue, value reflect.Value) { ivalue.Set(value) } -// decodeInterface receives the name of a concrete type followed by its value. +// decodeInterface decodes an interface value and stores it through p. +// Interfaces are encoded as the name of a concrete type followed by a value. // If the name is empty, the value is nil and no value is sent. -func (dec *Decoder) decodeInterface(ityp *reflect.InterfaceType, state *decodeState, p uintptr, indir int) { +func (dec *Decoder) decodeInterface(ityp *reflect.InterfaceType, state *decoderState, p uintptr, indir int) { // Create an interface reflect.Value. We need one even for the nil case. ivalue := reflect.MakeZero(ityp).(*reflect.InterfaceValue) // Read the name of the concrete type. @@ -655,7 +729,8 @@ func (dec *Decoder) decodeInterface(ityp *reflect.InterfaceType, state *decodeSt *(*[2]uintptr)(unsafe.Pointer(p)) = ivalue.Get() } -func (dec *Decoder) ignoreInterface(state *decodeState) { +// ignoreInterface discards the data for an interface value with no destination. +func (dec *Decoder) ignoreInterface(state *decoderState) { // Read the name of the concrete type. b := make([]byte, state.decodeUint()) _, err := state.b.Read(b) @@ -670,6 +745,32 @@ func (dec *Decoder) ignoreInterface(state *decodeState) { state.b.Next(int(state.decodeUint())) } +// decodeGobDecoder decodes something implementing the GobDecoder interface. +// The data is encoded as a byte slice. +func (dec *Decoder) decodeGobDecoder(state *decoderState, v reflect.Value, index int) { + // Read the bytes for the value. + b := make([]byte, state.decodeUint()) + _, err := state.b.Read(b) + if err != nil { + error(err) + } + // We know it's a GobDecoder, so just call the method directly. + err = v.Interface().(GobDecoder).GobDecode(b) + if err != nil { + error(err) + } +} + +// ignoreGobDecoder discards the data for a GobDecoder value with no destination. +func (dec *Decoder) ignoreGobDecoder(state *decoderState) { + // Read the bytes for the value. + b := make([]byte, state.decodeUint()) + _, err := state.b.Read(b) + if err != nil { + error(err) + } +} + // Index by Go types. var decOpTable = [...]decOp{ reflect.Bool: decBool, @@ -699,10 +800,14 @@ var decIgnoreOpMap = map[typeId]decOp{ tComplex: ignoreTwoUints, } -// Return the decoding op for the base type under rt and +// decOpFor returns the decoding op for the base type under rt and // the indirection count to reach it. func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProgress map[reflect.Type]*decOp) (*decOp, int) { ut := userType(rt) + // If the type implements GobEncoder, we handle it without further processing. + if ut.isGobDecoder { + return dec.gobDecodeOpFor(ut) + } // If this type is already in progress, it's a recursive type (e.g. map[string]*T). // Return the pointer to the op we're already building. if opPtr := inProgress[rt]; opPtr != nil { @@ -724,7 +829,7 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg elemId := dec.wireType[wireId].ArrayT.Elem elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress) ovfl := overflow(name) - op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { state.dec.decodeArray(t, state, uintptr(p), *elemOp, t.Elem().Size(), t.Len(), i.indir, elemIndir, ovfl) } @@ -735,7 +840,7 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg keyOp, keyIndir := dec.decOpFor(keyId, t.Key(), name, inProgress) elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress) ovfl := overflow(name) - op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { up := unsafe.Pointer(p) state.dec.decodeMap(t, state, uintptr(up), *keyOp, *elemOp, i.indir, keyIndir, elemIndir, ovfl) } @@ -754,26 +859,23 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg } elemOp, elemIndir := dec.decOpFor(elemId, t.Elem(), name, inProgress) ovfl := overflow(name) - op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { state.dec.decodeSlice(t, state, uintptr(p), *elemOp, t.Elem().Size(), i.indir, elemIndir, ovfl) } case *reflect.StructType: // Generate a closure that calls out to the engine for the nested type. - enginePtr, err := dec.getDecEnginePtr(wireId, typ) + enginePtr, err := dec.getDecEnginePtr(wireId, userType(typ)) if err != nil { error(err) } - op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { // indirect through enginePtr to delay evaluation for recursive structs. - err = dec.decodeStruct(*enginePtr, userType(typ), uintptr(p), i.indir) - if err != nil { - error(err) - } + dec.decodeStruct(*enginePtr, userType(typ), uintptr(p), i.indir) } case *reflect.InterfaceType: - op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { - dec.decodeInterface(t, state, uintptr(p), i.indir) + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { + state.dec.decodeInterface(t, state, uintptr(p), i.indir) } } } @@ -783,15 +885,15 @@ func (dec *Decoder) decOpFor(wireId typeId, rt reflect.Type, name string, inProg return &op, indir } -// Return the decoding op for a field that has no destination. +// decIgnoreOpFor returns the decoding op for a field that has no destination. func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { op, ok := decIgnoreOpMap[wireId] if !ok { if wireId == tInterface { // Special case because it's a method: the ignored item might // define types and we need to record their state in the decoder. - op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { - dec.ignoreInterface(state) + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { + state.dec.ignoreInterface(state) } return op } @@ -799,11 +901,11 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { wire := dec.wireType[wireId] switch { case wire == nil: - panic("internal error: can't find ignore op for type " + wireId.string()) + errorf("gob: bad data: undefined type %s", wireId.string()) case wire.ArrayT != nil: elemId := wire.ArrayT.Elem elemOp := dec.decIgnoreOpFor(elemId) - op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { state.dec.ignoreArray(state, elemOp, wire.ArrayT.Len) } @@ -812,14 +914,14 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { elemId := dec.wireType[wireId].MapT.Elem keyOp := dec.decIgnoreOpFor(keyId) elemOp := dec.decIgnoreOpFor(elemId) - op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { state.dec.ignoreMap(state, keyOp, elemOp) } case wire.SliceT != nil: elemId := wire.SliceT.Elem elemOp := dec.decIgnoreOpFor(elemId) - op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { state.dec.ignoreSlice(state, elemOp) } @@ -829,28 +931,75 @@ func (dec *Decoder) decIgnoreOpFor(wireId typeId) decOp { if err != nil { error(err) } - op = func(i *decInstr, state *decodeState, p unsafe.Pointer) { + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { // indirect through enginePtr to delay evaluation for recursive structs state.dec.ignoreStruct(*enginePtr) } + + case wire.GobEncoderT != nil: + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { + state.dec.ignoreGobDecoder(state) + } } } if op == nil { - errorf("ignore can't handle type %s", wireId.string()) + errorf("gob: bad data: ignore can't handle type %s", wireId.string()) } return op } -// Are these two gob Types compatible? -// Answers the question for basic types, arrays, and slices. +// gobDecodeOpFor returns the op for a type that is known to implement +// GobDecoder. +func (dec *Decoder) gobDecodeOpFor(ut *userTypeInfo) (*decOp, int) { + rt := ut.user + if ut.decIndir == -1 { + rt = reflect.PtrTo(rt) + } else if ut.decIndir > 0 { + for i := int8(0); i < ut.decIndir; i++ { + rt = rt.(*reflect.PtrType).Elem() + } + } + var op decOp + op = func(i *decInstr, state *decoderState, p unsafe.Pointer) { + // Allocate the underlying data, but hold on to the address we have, + // since we need it to get to the receiver's address. + allocate(ut.base, uintptr(p), ut.indir) + var v reflect.Value + if ut.decIndir == -1 { + // Need to climb up one level to turn value into pointer. + v = reflect.NewValue(unsafe.Unreflect(rt, unsafe.Pointer(&p))) + } else { + if ut.decIndir > 0 { + p = decIndirect(p, int(ut.decIndir)) + } + v = reflect.NewValue(unsafe.Unreflect(rt, p)) + } + state.dec.decodeGobDecoder(state, v, methodIndex(rt, gobDecodeMethodName)) + } + return &op, int(ut.decIndir) + +} + +// compatibleType asks: Are these two gob Types compatible? +// Answers the question for basic types, arrays, maps and slices, plus +// GobEncoder/Decoder pairs. // Structs are considered ok; fields will be checked later. func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId, inProgress map[reflect.Type]typeId) bool { if rhs, ok := inProgress[fr]; ok { return rhs == fw } inProgress[fr] = fw - fr = userType(fr).base - switch t := fr.(type) { + ut := userType(fr) + wire, ok := dec.wireType[fw] + // If fr is a GobDecoder, the wire type must be GobEncoder. + // And if fr is not a GobDecoder, the wire type must not be either. + if ut.isGobDecoder != (ok && wire.GobEncoderT != nil) { // the parentheses look odd but are correct. + return false + } + if ut.isGobDecoder { // This test trumps all others. + return true + } + switch t := ut.base.(type) { default: // chan, etc: cannot handle. return false @@ -869,14 +1018,12 @@ func (dec *Decoder) compatibleType(fr reflect.Type, fw typeId, inProgress map[re case *reflect.InterfaceType: return fw == tInterface case *reflect.ArrayType: - wire, ok := dec.wireType[fw] if !ok || wire.ArrayT == nil { return false } array := wire.ArrayT return t.Len() == array.Len && dec.compatibleType(t.Elem(), array.Elem, inProgress) case *reflect.MapType: - wire, ok := dec.wireType[fw] if !ok || wire.MapT == nil { return false } @@ -911,8 +1058,13 @@ func (dec *Decoder) typeString(remoteId typeId) string { return dec.wireType[remoteId].string() } - -func (dec *Decoder) compileSingle(remoteId typeId, rt reflect.Type) (engine *decEngine, err os.Error) { +// compileSingle compiles the decoder engine for a non-struct top-level value, including +// GobDecoders. +func (dec *Decoder) compileSingle(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err os.Error) { + rt := ut.base + if ut.isGobDecoder { + rt = ut.user + } engine = new(decEngine) engine.instr = make([]decInstr, 1) // one item name := rt.String() // best we can do @@ -926,6 +1078,7 @@ func (dec *Decoder) compileSingle(remoteId typeId, rt reflect.Type) (engine *dec return } +// compileIgnoreSingle compiles the decoder engine for a non-struct top-level value that will be discarded. func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err os.Error) { engine = new(decEngine) engine.instr = make([]decInstr, 1) // one item @@ -936,16 +1089,13 @@ func (dec *Decoder) compileIgnoreSingle(remoteId typeId) (engine *decEngine, err return } -// Is this an exported - upper case - name? -func isExported(name string) bool { - rune, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(rune) -} - -func (dec *Decoder) compileDec(remoteId typeId, rt reflect.Type) (engine *decEngine, err os.Error) { +// compileDec compiles the decoder engine for a value. If the value is not a struct, +// it calls out to compileSingle. +func (dec *Decoder) compileDec(remoteId typeId, ut *userTypeInfo) (engine *decEngine, err os.Error) { + rt := ut.base srt, ok := rt.(*reflect.StructType) - if !ok { - return dec.compileSingle(remoteId, rt) + if !ok || ut.isGobDecoder { + return dec.compileSingle(remoteId, ut) } var wireStruct *structType // Builtin types can come from global pool; the rest must be defined by the decoder. @@ -990,7 +1140,9 @@ func (dec *Decoder) compileDec(remoteId typeId, rt reflect.Type) (engine *decEng return } -func (dec *Decoder) getDecEnginePtr(remoteId typeId, rt reflect.Type) (enginePtr **decEngine, err os.Error) { +// getDecEnginePtr returns the engine for the specified type. +func (dec *Decoder) getDecEnginePtr(remoteId typeId, ut *userTypeInfo) (enginePtr **decEngine, err os.Error) { + rt := ut.base decoderMap, ok := dec.decoderCache[rt] if !ok { decoderMap = make(map[typeId]**decEngine) @@ -1000,7 +1152,7 @@ func (dec *Decoder) getDecEnginePtr(remoteId typeId, rt reflect.Type) (enginePtr // To handle recursive types, mark this engine as underway before compiling. enginePtr = new(*decEngine) decoderMap[remoteId] = enginePtr - *enginePtr, err = dec.compileDec(remoteId, rt) + *enginePtr, err = dec.compileDec(remoteId, ut) if err != nil { decoderMap[remoteId] = nil, false } @@ -1008,11 +1160,12 @@ func (dec *Decoder) getDecEnginePtr(remoteId typeId, rt reflect.Type) (enginePtr return } -// When ignoring struct data, in effect we compile it into this type +// emptyStruct is the type we compile into when ignoring a struct value. type emptyStruct struct{} var emptyStructType = reflect.Typeof(emptyStruct{}) +// getDecEnginePtr returns the engine for the specified type when the value is to be discarded. func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, err os.Error) { var ok bool if enginePtr, ok = dec.ignorerCache[wireId]; !ok { @@ -1021,7 +1174,7 @@ func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, er dec.ignorerCache[wireId] = enginePtr wire := dec.wireType[wireId] if wire != nil && wire.StructT != nil { - *enginePtr, err = dec.compileDec(wireId, emptyStructType) + *enginePtr, err = dec.compileDec(wireId, userType(emptyStructType)) } else { *enginePtr, err = dec.compileIgnoreSingle(wireId) } @@ -1032,41 +1185,51 @@ func (dec *Decoder) getIgnoreEnginePtr(wireId typeId) (enginePtr **decEngine, er return } -func (dec *Decoder) decodeValue(wireId typeId, val reflect.Value) (err os.Error) { - defer catchError(&err) +// decodeValue decodes the data stream representing a value and stores it in val. +func (dec *Decoder) decodeValue(wireId typeId, val reflect.Value) { + defer catchError(&dec.err) // If the value is nil, it means we should just ignore this item. if val == nil { - return dec.decodeIgnoredValue(wireId) + dec.decodeIgnoredValue(wireId) + return } // Dereference down to the underlying struct type. ut := userType(val.Type()) base := ut.base indir := ut.indir - enginePtr, err := dec.getDecEnginePtr(wireId, base) - if err != nil { - return err + if ut.isGobDecoder { + indir = int(ut.decIndir) + } + var enginePtr **decEngine + enginePtr, dec.err = dec.getDecEnginePtr(wireId, ut) + if dec.err != nil { + return } engine := *enginePtr - if st, ok := base.(*reflect.StructType); ok { + if st, ok := base.(*reflect.StructType); ok && !ut.isGobDecoder { if engine.numInstr == 0 && st.NumField() > 0 && len(dec.wireType[wireId].StructT.Field) > 0 { name := base.Name() - return os.ErrorString("gob: type mismatch: no fields matched compiling decoder for " + name) + errorf("gob: type mismatch: no fields matched compiling decoder for %s", name) } - return dec.decodeStruct(engine, ut, uintptr(val.UnsafeAddr()), indir) + dec.decodeStruct(engine, ut, uintptr(val.UnsafeAddr()), indir) + } else { + dec.decodeSingle(engine, ut, uintptr(val.UnsafeAddr())) } - return dec.decodeSingle(engine, ut, uintptr(val.UnsafeAddr())) } -func (dec *Decoder) decodeIgnoredValue(wireId typeId) os.Error { - enginePtr, err := dec.getIgnoreEnginePtr(wireId) - if err != nil { - return err +// decodeIgnoredValue decodes the data stream representing a value of the specified type and discards it. +func (dec *Decoder) decodeIgnoredValue(wireId typeId) { + var enginePtr **decEngine + enginePtr, dec.err = dec.getIgnoreEnginePtr(wireId) + if dec.err != nil { + return } wire := dec.wireType[wireId] if wire != nil && wire.StructT != nil { - return dec.ignoreStruct(*enginePtr) + dec.ignoreStruct(*enginePtr) + } else { + dec.ignoreSingle(*enginePtr) } - return dec.ignoreSingle(*enginePtr) } func init() { diff --git a/libgo/go/gob/decoder.go b/libgo/go/gob/decoder.go index f7c994ffa78..34364161aa3 100644 --- a/libgo/go/gob/decoder.go +++ b/libgo/go/gob/decoder.go @@ -5,6 +5,7 @@ package gob import ( + "bufio" "bytes" "io" "os" @@ -21,7 +22,7 @@ type Decoder struct { wireType map[typeId]*wireType // map from remote ID to local description decoderCache map[reflect.Type]map[typeId]**decEngine // cache of compiled engines ignorerCache map[typeId]**decEngine // ditto for ignored objects - countState *decodeState // reads counts from wire + freeList *decoderState // list of free decoderStates; avoids reallocation countBuf []byte // used for decoding integers while parsing messages tmp []byte // temporary storage for i/o; saves reallocating err os.Error @@ -30,7 +31,7 @@ type Decoder struct { // NewDecoder returns a new decoder that reads from the io.Reader. func NewDecoder(r io.Reader) *Decoder { dec := new(Decoder) - dec.r = r + dec.r = bufio.NewReader(r) dec.wireType = make(map[typeId]*wireType) dec.decoderCache = make(map[reflect.Type]map[typeId]**decEngine) dec.ignorerCache = make(map[typeId]**decEngine) @@ -49,7 +50,7 @@ func (dec *Decoder) recvType(id typeId) { // Type: wire := new(wireType) - dec.err = dec.decodeValue(tWireType, reflect.NewValue(wire)) + dec.decodeValue(tWireType, reflect.NewValue(wire)) if dec.err != nil { return } @@ -184,7 +185,7 @@ func (dec *Decoder) DecodeValue(value reflect.Value) os.Error { dec.err = nil id := dec.decodeTypeSequence(false) if dec.err == nil { - dec.err = dec.decodeValue(id, value) + dec.decodeValue(id, value) } return dec.err } diff --git a/libgo/go/gob/encode.go b/libgo/go/gob/encode.go index e92db74ffdd..5cfdb583a18 100644 --- a/libgo/go/gob/encode.go +++ b/libgo/go/gob/encode.go @@ -6,16 +6,14 @@ package gob import ( "bytes" - "io" "math" - "os" "reflect" "unsafe" ) const uint64Size = unsafe.Sizeof(uint64(0)) -// The global execution state of an instance of the encoder. +// encoderState is the global execution state of an instance of the encoder. // Field numbers are delta encoded and always increase. The field // number is initialized to -1 so 0 comes out as delta(1). A delta of // 0 terminates the structure. @@ -25,10 +23,26 @@ type encoderState struct { sendZero bool // encoding an array element or map key/value pair; send zero values fieldnum int // the last field number written. buf [1 + uint64Size]byte // buffer used by the encoder; here to avoid allocation. + next *encoderState // for free list } -func newEncoderState(enc *Encoder, b *bytes.Buffer) *encoderState { - return &encoderState{enc: enc, b: b} +func (enc *Encoder) newEncoderState(b *bytes.Buffer) *encoderState { + e := enc.freeList + if e == nil { + e = new(encoderState) + e.enc = enc + } else { + enc.freeList = e.next + } + e.sendZero = false + e.fieldnum = 0 + e.b = b + return e +} + +func (enc *Encoder) freeEncoderState(e *encoderState) { + e.next = enc.freeList + enc.freeList = e } // Unsigned integers have a two-state encoding. If the number is less @@ -72,6 +86,7 @@ func (state *encoderState) encodeInt(i int64) { state.encodeUint(uint64(x)) } +// encOp is the signature of an encoding operator for a given type. type encOp func(i *encInstr, state *encoderState, p unsafe.Pointer) // The 'instructions' of the encoding machine @@ -82,8 +97,8 @@ type encInstr struct { offset uintptr // offset in the structure of the field to encode } -// Emit a field number and update the state to record its value for delta encoding. -// If the instruction pointer is nil, do nothing +// update emits a field number and updates the state to record its value for delta encoding. +// If the instruction pointer is nil, it does nothing func (state *encoderState) update(instr *encInstr) { if instr != nil { state.encodeUint(uint64(instr.field - state.fieldnum)) @@ -91,12 +106,16 @@ func (state *encoderState) update(instr *encInstr) { } } -// Each encoder is responsible for handling any indirections associated -// with the data structure. If any pointer so reached is nil, no bytes are written. -// If the data item is zero, no bytes are written. -// Otherwise, the output (for a scalar) is the field number, as an encoded integer, -// followed by the field data in its appropriate format. +// Each encoder for a composite is responsible for handling any +// indirections associated with the elements of the data structure. +// If any pointer so reached is nil, no bytes are written. If the +// data item is zero, no bytes are written. Single values - ints, +// strings etc. - are indirected before calling their encoders. +// Otherwise, the output (for a scalar) is the field number, as an +// encoded integer, followed by the field data in its appropriate +// format. +// encIndirect dereferences p indir times and returns the result. func encIndirect(p unsafe.Pointer, indir int) unsafe.Pointer { for ; indir > 0; indir-- { p = *(*unsafe.Pointer)(p) @@ -107,6 +126,7 @@ func encIndirect(p unsafe.Pointer, indir int) unsafe.Pointer { return p } +// encBool encodes the bool with address p as an unsigned 0 or 1. func encBool(i *encInstr, state *encoderState, p unsafe.Pointer) { b := *(*bool)(p) if b || state.sendZero { @@ -119,6 +139,7 @@ func encBool(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encInt encodes the int with address p. func encInt(i *encInstr, state *encoderState, p unsafe.Pointer) { v := int64(*(*int)(p)) if v != 0 || state.sendZero { @@ -127,6 +148,7 @@ func encInt(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encUint encodes the uint with address p. func encUint(i *encInstr, state *encoderState, p unsafe.Pointer) { v := uint64(*(*uint)(p)) if v != 0 || state.sendZero { @@ -135,6 +157,7 @@ func encUint(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encInt8 encodes the int8 with address p. func encInt8(i *encInstr, state *encoderState, p unsafe.Pointer) { v := int64(*(*int8)(p)) if v != 0 || state.sendZero { @@ -143,6 +166,7 @@ func encInt8(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encUint8 encodes the uint8 with address p. func encUint8(i *encInstr, state *encoderState, p unsafe.Pointer) { v := uint64(*(*uint8)(p)) if v != 0 || state.sendZero { @@ -151,6 +175,7 @@ func encUint8(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encInt16 encodes the int16 with address p. func encInt16(i *encInstr, state *encoderState, p unsafe.Pointer) { v := int64(*(*int16)(p)) if v != 0 || state.sendZero { @@ -159,6 +184,7 @@ func encInt16(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encUint16 encodes the uint16 with address p. func encUint16(i *encInstr, state *encoderState, p unsafe.Pointer) { v := uint64(*(*uint16)(p)) if v != 0 || state.sendZero { @@ -167,6 +193,7 @@ func encUint16(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encInt32 encodes the int32 with address p. func encInt32(i *encInstr, state *encoderState, p unsafe.Pointer) { v := int64(*(*int32)(p)) if v != 0 || state.sendZero { @@ -175,6 +202,7 @@ func encInt32(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encUint encodes the uint32 with address p. func encUint32(i *encInstr, state *encoderState, p unsafe.Pointer) { v := uint64(*(*uint32)(p)) if v != 0 || state.sendZero { @@ -183,6 +211,7 @@ func encUint32(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encInt64 encodes the int64 with address p. func encInt64(i *encInstr, state *encoderState, p unsafe.Pointer) { v := *(*int64)(p) if v != 0 || state.sendZero { @@ -191,6 +220,7 @@ func encInt64(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encInt64 encodes the uint64 with address p. func encUint64(i *encInstr, state *encoderState, p unsafe.Pointer) { v := *(*uint64)(p) if v != 0 || state.sendZero { @@ -199,6 +229,7 @@ func encUint64(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encUintptr encodes the uintptr with address p. func encUintptr(i *encInstr, state *encoderState, p unsafe.Pointer) { v := uint64(*(*uintptr)(p)) if v != 0 || state.sendZero { @@ -207,6 +238,7 @@ func encUintptr(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// floatBits returns a uint64 holding the bits of a floating-point number. // Floating-point numbers are transmitted as uint64s holding the bits // of the underlying representation. They are sent byte-reversed, with // the exponent end coming out first, so integer floating point numbers @@ -223,6 +255,7 @@ func floatBits(f float64) uint64 { return v } +// encFloat32 encodes the float32 with address p. func encFloat32(i *encInstr, state *encoderState, p unsafe.Pointer) { f := *(*float32)(p) if f != 0 || state.sendZero { @@ -232,6 +265,7 @@ func encFloat32(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encFloat64 encodes the float64 with address p. func encFloat64(i *encInstr, state *encoderState, p unsafe.Pointer) { f := *(*float64)(p) if f != 0 || state.sendZero { @@ -241,6 +275,7 @@ func encFloat64(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encComplex64 encodes the complex64 with address p. // Complex numbers are just a pair of floating-point numbers, real part first. func encComplex64(i *encInstr, state *encoderState, p unsafe.Pointer) { c := *(*complex64)(p) @@ -253,6 +288,7 @@ func encComplex64(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encComplex128 encodes the complex128 with address p. func encComplex128(i *encInstr, state *encoderState, p unsafe.Pointer) { c := *(*complex128)(p) if c != 0+0i || state.sendZero { @@ -264,6 +300,7 @@ func encComplex128(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encUint8Array encodes the byte slice whose header has address p. // Byte arrays are encoded as an unsigned count followed by the raw bytes. func encUint8Array(i *encInstr, state *encoderState, p unsafe.Pointer) { b := *(*[]byte)(p) @@ -274,24 +311,26 @@ func encUint8Array(i *encInstr, state *encoderState, p unsafe.Pointer) { } } +// encString encodes the string whose header has address p. // Strings are encoded as an unsigned count followed by the raw bytes. func encString(i *encInstr, state *encoderState, p unsafe.Pointer) { s := *(*string)(p) if len(s) > 0 || state.sendZero { state.update(i) state.encodeUint(uint64(len(s))) - io.WriteString(state.b, s) + state.b.WriteString(s) } } -// The end of a struct is marked by a delta field number of 0. +// encStructTerminator encodes the end of an encoded struct +// as delta field number of 0. func encStructTerminator(i *encInstr, state *encoderState, p unsafe.Pointer) { state.encodeUint(0) } // Execution engine -// The encoder engine is an array of instructions indexed by field number of the encoding +// encEngine an array of instructions indexed by field number of the encoding // data, typically a struct. It is executed top to bottom, walking the struct. type encEngine struct { instr []encInstr @@ -299,8 +338,9 @@ type encEngine struct { const singletonField = 0 +// encodeSingle encodes a single top-level non-struct value. func (enc *Encoder) encodeSingle(b *bytes.Buffer, engine *encEngine, basep uintptr) { - state := newEncoderState(enc, b) + state := enc.newEncoderState(b) state.fieldnum = singletonField // There is no surrounding struct to frame the transmission, so we must // generate data even if the item is zero. To do this, set sendZero. @@ -313,10 +353,12 @@ func (enc *Encoder) encodeSingle(b *bytes.Buffer, engine *encEngine, basep uintp } } instr.op(instr, state, p) + enc.freeEncoderState(state) } +// encodeStruct encodes a single struct value. func (enc *Encoder) encodeStruct(b *bytes.Buffer, engine *encEngine, basep uintptr) { - state := newEncoderState(enc, b) + state := enc.newEncoderState(b) state.fieldnum = -1 for i := 0; i < len(engine.instr); i++ { instr := &engine.instr[i] @@ -328,10 +370,12 @@ func (enc *Encoder) encodeStruct(b *bytes.Buffer, engine *encEngine, basep uintp } instr.op(instr, state, p) } + enc.freeEncoderState(state) } +// encodeArray encodes the array whose 0th element is at p. func (enc *Encoder) encodeArray(b *bytes.Buffer, p uintptr, op encOp, elemWid uintptr, elemIndir int, length int) { - state := newEncoderState(enc, b) + state := enc.newEncoderState(b) state.fieldnum = -1 state.sendZero = true state.encodeUint(uint64(length)) @@ -347,8 +391,10 @@ func (enc *Encoder) encodeArray(b *bytes.Buffer, p uintptr, op encOp, elemWid ui op(nil, state, unsafe.Pointer(elemp)) p += uintptr(elemWid) } + enc.freeEncoderState(state) } +// encodeReflectValue is a helper for maps. It encodes the value v. func encodeReflectValue(state *encoderState, v reflect.Value, op encOp, indir int) { for i := 0; i < indir && v != nil; i++ { v = reflect.Indirect(v) @@ -359,8 +405,11 @@ func encodeReflectValue(state *encoderState, v reflect.Value, op encOp, indir in op(nil, state, unsafe.Pointer(v.UnsafeAddr())) } +// encodeMap encodes a map as unsigned count followed by key:value pairs. +// Because map internals are not exposed, we must use reflection rather than +// addresses. func (enc *Encoder) encodeMap(b *bytes.Buffer, mv *reflect.MapValue, keyOp, elemOp encOp, keyIndir, elemIndir int) { - state := newEncoderState(enc, b) + state := enc.newEncoderState(b) state.fieldnum = -1 state.sendZero = true keys := mv.Keys() @@ -369,14 +418,16 @@ func (enc *Encoder) encodeMap(b *bytes.Buffer, mv *reflect.MapValue, keyOp, elem encodeReflectValue(state, key, keyOp, keyIndir) encodeReflectValue(state, mv.Elem(key), elemOp, elemIndir) } + enc.freeEncoderState(state) } +// encodeInterface encodes the interface value iv. // To send an interface, we send a string identifying the concrete type, followed // by the type identifier (which might require defining that type right now), followed // by the concrete value. A nil value gets sent as the empty string for the name, // followed by no value. func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv *reflect.InterfaceValue) { - state := newEncoderState(enc, b) + state := enc.newEncoderState(b) state.fieldnum = -1 state.sendZero = true if iv.IsNil() { @@ -391,7 +442,7 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv *reflect.InterfaceValue) } // Send the name. state.encodeUint(uint64(len(name))) - _, err := io.WriteString(state.b, name) + _, err := state.b.WriteString(name) if err != nil { error(err) } @@ -403,15 +454,32 @@ func (enc *Encoder) encodeInterface(b *bytes.Buffer, iv *reflect.InterfaceValue) // should be written to b, before the encoded value. enc.pushWriter(b) data := new(bytes.Buffer) - err = enc.encode(data, iv.Elem(), ut) - if err != nil { - error(err) + enc.encode(data, iv.Elem(), ut) + if enc.err != nil { + error(enc.err) } enc.popWriter() enc.writeMessage(b, data) if enc.err != nil { error(err) } + enc.freeEncoderState(state) +} + +// encGobEncoder encodes a value that implements the GobEncoder interface. +// The data is sent as a byte array. +func (enc *Encoder) encodeGobEncoder(b *bytes.Buffer, v reflect.Value, index int) { + // TODO: should we catch panics from the called method? + // We know it's a GobEncoder, so just call the method directly. + data, err := v.Interface().(GobEncoder).GobEncode() + if err != nil { + error(err) + } + state := enc.newEncoderState(b) + state.fieldnum = -1 + state.encodeUint(uint64(len(data))) + state.b.Write(data) + enc.freeEncoderState(state) } var encOpTable = [...]encOp{ @@ -434,10 +502,14 @@ var encOpTable = [...]encOp{ reflect.String: encString, } -// Return (a pointer to) the encoding op for the base type under rt and +// encOpFor returns (a pointer to) the encoding op for the base type under rt and // the indirection count to reach it. func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp) (*encOp, int) { ut := userType(rt) + // If the type implements GobEncoder, we handle it without further processing. + if ut.isGobEncoder { + return enc.gobEncodeOpFor(ut) + } // If this type is already in progress, it's a recursive type (e.g. map[string]*T). // Return the pointer to the op we're already building. if opPtr := inProgress[rt]; opPtr != nil { @@ -483,7 +555,7 @@ func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp // Maps cannot be accessed by moving addresses around the way // that slices etc. can. We must recover a full reflection value for // the iteration. - v := reflect.NewValue(unsafe.Unreflect(t, unsafe.Pointer((p)))) + v := reflect.NewValue(unsafe.Unreflect(t, unsafe.Pointer(p))) mv := reflect.Indirect(v).(*reflect.MapValue) if !state.sendZero && mv.Len() == 0 { return @@ -493,7 +565,7 @@ func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp } case *reflect.StructType: // Generate a closure that calls out to the engine for the nested type. - enc.getEncEngine(typ) + enc.getEncEngine(userType(typ)) info := mustGetTypeInfo(typ) op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { state.update(i) @@ -504,7 +576,7 @@ func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { // Interfaces transmit the name and contents of the concrete // value they contain. - v := reflect.NewValue(unsafe.Unreflect(t, unsafe.Pointer((p)))) + v := reflect.NewValue(unsafe.Unreflect(t, unsafe.Pointer(p))) iv := reflect.Indirect(v).(*reflect.InterfaceValue) if !state.sendZero && (iv == nil || iv.IsNil()) { return @@ -520,22 +592,64 @@ func (enc *Encoder) encOpFor(rt reflect.Type, inProgress map[reflect.Type]*encOp return &op, indir } -// The local Type was compiled from the actual value, so we know it's compatible. -func (enc *Encoder) compileEnc(rt reflect.Type) *encEngine { - srt, isStruct := rt.(*reflect.StructType) +// methodIndex returns which method of rt implements the method. +func methodIndex(rt reflect.Type, method string) int { + for i := 0; i < rt.NumMethod(); i++ { + if rt.Method(i).Name == method { + return i + } + } + errorf("gob: internal error: can't find method %s", method) + return 0 +} + +// gobEncodeOpFor returns the op for a type that is known to implement +// GobEncoder. +func (enc *Encoder) gobEncodeOpFor(ut *userTypeInfo) (*encOp, int) { + rt := ut.user + if ut.encIndir == -1 { + rt = reflect.PtrTo(rt) + } else if ut.encIndir > 0 { + for i := int8(0); i < ut.encIndir; i++ { + rt = rt.(*reflect.PtrType).Elem() + } + } + var op encOp + op = func(i *encInstr, state *encoderState, p unsafe.Pointer) { + var v reflect.Value + if ut.encIndir == -1 { + // Need to climb up one level to turn value into pointer. + v = reflect.NewValue(unsafe.Unreflect(rt, unsafe.Pointer(&p))) + } else { + v = reflect.NewValue(unsafe.Unreflect(rt, p)) + } + state.update(i) + state.enc.encodeGobEncoder(state.b, v, methodIndex(rt, gobEncodeMethodName)) + } + return &op, int(ut.encIndir) // encIndir: op will get called with p == address of receiver. +} + +// compileEnc returns the engine to compile the type. +func (enc *Encoder) compileEnc(ut *userTypeInfo) *encEngine { + srt, isStruct := ut.base.(*reflect.StructType) engine := new(encEngine) seen := make(map[reflect.Type]*encOp) - if isStruct { - for fieldNum := 0; fieldNum < srt.NumField(); fieldNum++ { + rt := ut.base + if ut.isGobEncoder { + rt = ut.user + } + if !ut.isGobEncoder && isStruct { + for fieldNum, wireFieldNum := 0, 0; fieldNum < srt.NumField(); fieldNum++ { f := srt.Field(fieldNum) if !isExported(f.Name) { continue } op, indir := enc.encOpFor(f.Type, seen) - engine.instr = append(engine.instr, encInstr{*op, fieldNum, indir, uintptr(f.Offset)}) + engine.instr = append(engine.instr, encInstr{*op, wireFieldNum, indir, uintptr(f.Offset)}) + wireFieldNum++ } if srt.NumField() > 0 && len(engine.instr) == 0 { - errorf("type %s has no exported fields", rt) + errorf("gob: type %s has no exported fields", rt) } engine.instr = append(engine.instr, encInstr{encStructTerminator, 0, 0, 0}) } else { @@ -546,38 +660,42 @@ func (enc *Encoder) compileEnc(rt reflect.Type) *encEngine { return engine } +// getEncEngine returns the engine to compile the type. // typeLock must be held (or we're in initialization and guaranteed single-threaded). -// The reflection type must have all its indirections processed out. -func (enc *Encoder) getEncEngine(rt reflect.Type) *encEngine { - info, err1 := getTypeInfo(rt) +func (enc *Encoder) getEncEngine(ut *userTypeInfo) *encEngine { + info, err1 := getTypeInfo(ut) if err1 != nil { error(err1) } if info.encoder == nil { // mark this engine as underway before compiling to handle recursive types. info.encoder = new(encEngine) - info.encoder = enc.compileEnc(rt) + info.encoder = enc.compileEnc(ut) } return info.encoder } -// Put this in a function so we can hold the lock only while compiling, not when encoding. -func (enc *Encoder) lockAndGetEncEngine(rt reflect.Type) *encEngine { +// lockAndGetEncEngine is a function that locks and compiles. +// This lets us hold the lock only while compiling, not when encoding. +func (enc *Encoder) lockAndGetEncEngine(ut *userTypeInfo) *encEngine { typeLock.Lock() defer typeLock.Unlock() - return enc.getEncEngine(rt) + return enc.getEncEngine(ut) } -func (enc *Encoder) encode(b *bytes.Buffer, value reflect.Value, ut *userTypeInfo) (err os.Error) { - defer catchError(&err) - for i := 0; i < ut.indir; i++ { +func (enc *Encoder) encode(b *bytes.Buffer, value reflect.Value, ut *userTypeInfo) { + defer catchError(&enc.err) + engine := enc.lockAndGetEncEngine(ut) + indir := ut.indir + if ut.isGobEncoder { + indir = int(ut.encIndir) + } + for i := 0; i < indir; i++ { value = reflect.Indirect(value) } - engine := enc.lockAndGetEncEngine(ut.base) - if value.Type().Kind() == reflect.Struct { + if !ut.isGobEncoder && value.Type().Kind() == reflect.Struct { enc.encodeStruct(b, engine, value.UnsafeAddr()) } else { enc.encodeSingle(b, engine, value.UnsafeAddr()) } - return nil } diff --git a/libgo/go/gob/encoder.go b/libgo/go/gob/encoder.go index 92d036c11c3..e52a4de29f7 100644 --- a/libgo/go/gob/encoder.go +++ b/libgo/go/gob/encoder.go @@ -19,7 +19,9 @@ type Encoder struct { w []io.Writer // where to send the data sent map[reflect.Type]typeId // which types we've already sent countState *encoderState // stage for writing counts + freeList *encoderState // list of free encoderStates; avoids reallocation buf []byte // for collecting the output. + byteBuf bytes.Buffer // buffer for top-level encoderState err os.Error } @@ -28,7 +30,7 @@ func NewEncoder(w io.Writer) *Encoder { enc := new(Encoder) enc.w = []io.Writer{w} enc.sent = make(map[reflect.Type]typeId) - enc.countState = newEncoderState(enc, new(bytes.Buffer)) + enc.countState = enc.newEncoderState(new(bytes.Buffer)) return enc } @@ -78,12 +80,57 @@ func (enc *Encoder) writeMessage(w io.Writer, b *bytes.Buffer) { } } +// sendActualType sends the requested type, without further investigation, unless +// it's been sent before. +func (enc *Encoder) sendActualType(w io.Writer, state *encoderState, ut *userTypeInfo, actual reflect.Type) (sent bool) { + if _, alreadySent := enc.sent[actual]; alreadySent { + return false + } + typeLock.Lock() + info, err := getTypeInfo(ut) + typeLock.Unlock() + if err != nil { + enc.setError(err) + return + } + // Send the pair (-id, type) + // Id: + state.encodeInt(-int64(info.id)) + // Type: + enc.encode(state.b, reflect.NewValue(info.wire), wireTypeUserInfo) + enc.writeMessage(w, state.b) + if enc.err != nil { + return + } + + // Remember we've sent this type, both what the user gave us and the base type. + enc.sent[ut.base] = info.id + if ut.user != ut.base { + enc.sent[ut.user] = info.id + } + // Now send the inner types + switch st := actual.(type) { + case *reflect.StructType: + for i := 0; i < st.NumField(); i++ { + enc.sendType(w, state, st.Field(i).Type) + } + case reflect.ArrayOrSliceType: + enc.sendType(w, state, st.Elem()) + } + return true +} + +// sendType sends the type info to the other side, if necessary. func (enc *Encoder) sendType(w io.Writer, state *encoderState, origt reflect.Type) (sent bool) { - // Drill down to the base type. ut := userType(origt) - rt := ut.base + if ut.isGobEncoder { + // The rules are different: regardless of the underlying type's representation, + // we need to tell the other side that this exact type is a GobEncoder. + return enc.sendActualType(w, state, ut, ut.user) + } - switch rt := rt.(type) { + // It's a concrete value, so drill down to the base type. + switch rt := ut.base.(type) { default: // Basic types and interfaces do not need to be described. return @@ -109,43 +156,7 @@ func (enc *Encoder) sendType(w io.Writer, state *encoderState, origt reflect.Typ return } - // Have we already sent this type? This time we ask about the base type. - if _, alreadySent := enc.sent[rt]; alreadySent { - return - } - - // Need to send it. - typeLock.Lock() - info, err := getTypeInfo(rt) - typeLock.Unlock() - if err != nil { - enc.setError(err) - return - } - // Send the pair (-id, type) - // Id: - state.encodeInt(-int64(info.id)) - // Type: - enc.encode(state.b, reflect.NewValue(info.wire), wireTypeUserInfo) - enc.writeMessage(w, state.b) - if enc.err != nil { - return - } - - // Remember we've sent this type. - enc.sent[rt] = info.id - // Remember we've sent the top-level, possibly indirect type too. - enc.sent[origt] = info.id - // Now send the inner types - switch st := rt.(type) { - case *reflect.StructType: - for i := 0; i < st.NumField(); i++ { - enc.sendType(w, state, st.Field(i).Type) - } - case reflect.ArrayOrSliceType: - enc.sendType(w, state, st.Elem()) - } - return true + return enc.sendActualType(w, state, ut, ut.base) } // Encode transmits the data item represented by the empty interface value, @@ -159,11 +170,14 @@ func (enc *Encoder) Encode(e interface{}) os.Error { // sent. func (enc *Encoder) sendTypeDescriptor(w io.Writer, state *encoderState, ut *userTypeInfo) { // Make sure the type is known to the other side. - // First, have we already sent this (base) type? - base := ut.base - if _, alreadySent := enc.sent[base]; !alreadySent { + // First, have we already sent this type? + rt := ut.base + if ut.isGobEncoder { + rt = ut.user + } + if _, alreadySent := enc.sent[rt]; !alreadySent { // No, so send it. - sent := enc.sendType(w, state, base) + sent := enc.sendType(w, state, rt) if enc.err != nil { return } @@ -172,13 +186,13 @@ func (enc *Encoder) sendTypeDescriptor(w io.Writer, state *encoderState, ut *use // need to send the type info but we do need to update enc.sent. if !sent { typeLock.Lock() - info, err := getTypeInfo(base) + info, err := getTypeInfo(ut) typeLock.Unlock() if err != nil { enc.setError(err) return } - enc.sent[base] = info.id + enc.sent[rt] = info.id } } } @@ -206,7 +220,8 @@ func (enc *Encoder) EncodeValue(value reflect.Value) os.Error { } enc.err = nil - state := newEncoderState(enc, new(bytes.Buffer)) + enc.byteBuf.Reset() + state := enc.newEncoderState(&enc.byteBuf) enc.sendTypeDescriptor(enc.writer(), state, ut) enc.sendTypeId(state, ut) @@ -215,12 +230,11 @@ func (enc *Encoder) EncodeValue(value reflect.Value) os.Error { } // Encode the object. - err = enc.encode(state.b, value, ut) - if err != nil { - enc.setError(err) - } else { + enc.encode(state.b, value, ut) + if enc.err == nil { enc.writeMessage(enc.writer(), state.b) } + enc.freeEncoderState(state) return enc.err } diff --git a/libgo/go/gob/gobencdec_test.go b/libgo/go/gob/gobencdec_test.go new file mode 100644 index 00000000000..012b0995662 --- /dev/null +++ b/libgo/go/gob/gobencdec_test.go @@ -0,0 +1,384 @@ +// Copyright 20011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains tests of the GobEncoder/GobDecoder support. + +package gob + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" +) + +// Types that implement the GobEncoder/Decoder interfaces. + +type ByteStruct struct { + a byte // not an exported field +} + +type StringStruct struct { + s string // not an exported field +} + +type Gobber int + +type ValueGobber string // encodes with a value, decodes with a pointer. + +// The relevant methods + +func (g *ByteStruct) GobEncode() ([]byte, os.Error) { + b := make([]byte, 3) + b[0] = g.a + b[1] = g.a + 1 + b[2] = g.a + 2 + return b, nil +} + +func (g *ByteStruct) GobDecode(data []byte) os.Error { + if g == nil { + return os.ErrorString("NIL RECEIVER") + } + // Expect N sequential-valued bytes. + if len(data) == 0 { + return os.EOF + } + g.a = data[0] + for i, c := range data { + if c != g.a+byte(i) { + return os.ErrorString("invalid data sequence") + } + } + return nil +} + +func (g *StringStruct) GobEncode() ([]byte, os.Error) { + return []byte(g.s), nil +} + +func (g *StringStruct) GobDecode(data []byte) os.Error { + // Expect N sequential-valued bytes. + if len(data) == 0 { + return os.EOF + } + a := data[0] + for i, c := range data { + if c != a+byte(i) { + return os.ErrorString("invalid data sequence") + } + } + g.s = string(data) + return nil +} + +func (g *Gobber) GobEncode() ([]byte, os.Error) { + return []byte(fmt.Sprintf("VALUE=%d", *g)), nil +} + +func (g *Gobber) GobDecode(data []byte) os.Error { + _, err := fmt.Sscanf(string(data), "VALUE=%d", (*int)(g)) + return err +} + +func (v ValueGobber) GobEncode() ([]byte, os.Error) { + return []byte(fmt.Sprintf("VALUE=%s", v)), nil +} + +func (v *ValueGobber) GobDecode(data []byte) os.Error { + _, err := fmt.Sscanf(string(data), "VALUE=%s", (*string)(v)) + return err +} + +// Structs that include GobEncodable fields. + +type GobTest0 struct { + X int // guarantee we have something in common with GobTest* + G *ByteStruct +} + +type GobTest1 struct { + X int // guarantee we have something in common with GobTest* + G *StringStruct +} + +type GobTest2 struct { + X int // guarantee we have something in common with GobTest* + G string // not a GobEncoder - should give us errors +} + +type GobTest3 struct { + X int // guarantee we have something in common with GobTest* + G *Gobber +} + +type GobTest4 struct { + X int // guarantee we have something in common with GobTest* + V ValueGobber +} + +type GobTest5 struct { + X int // guarantee we have something in common with GobTest* + V *ValueGobber +} + +type GobTestIgnoreEncoder struct { + X int // guarantee we have something in common with GobTest* +} + +type GobTestValueEncDec struct { + X int // guarantee we have something in common with GobTest* + G StringStruct // not a pointer. +} + +type GobTestIndirectEncDec struct { + X int // guarantee we have something in common with GobTest* + G ***StringStruct // indirections to the receiver. +} + +func TestGobEncoderField(t *testing.T) { + b := new(bytes.Buffer) + // First a field that's a structure. + enc := NewEncoder(b) + err := enc.Encode(GobTest0{17, &ByteStruct{'A'}}) + if err != nil { + t.Fatal("encode error:", err) + } + dec := NewDecoder(b) + x := new(GobTest0) + err = dec.Decode(x) + if err != nil { + t.Fatal("decode error:", err) + } + if x.G.a != 'A' { + t.Errorf("expected 'A' got %c", x.G.a) + } + // Now a field that's not a structure. + b.Reset() + gobber := Gobber(23) + err = enc.Encode(GobTest3{17, &gobber}) + if err != nil { + t.Fatal("encode error:", err) + } + y := new(GobTest3) + err = dec.Decode(y) + if err != nil { + t.Fatal("decode error:", err) + } + if *y.G != 23 { + t.Errorf("expected '23 got %d", *y.G) + } +} + +// Even though the field is a value, we can still take its address +// and should be able to call the methods. +func TestGobEncoderValueField(t *testing.T) { + b := new(bytes.Buffer) + // First a field that's a structure. + enc := NewEncoder(b) + err := enc.Encode(GobTestValueEncDec{17, StringStruct{"HIJKL"}}) + if err != nil { + t.Fatal("encode error:", err) + } + dec := NewDecoder(b) + x := new(GobTestValueEncDec) + err = dec.Decode(x) + if err != nil { + t.Fatal("decode error:", err) + } + if x.G.s != "HIJKL" { + t.Errorf("expected `HIJKL` got %s", x.G.s) + } +} + +// GobEncode/Decode should work even if the value is +// more indirect than the receiver. +func TestGobEncoderIndirectField(t *testing.T) { + b := new(bytes.Buffer) + // First a field that's a structure. + enc := NewEncoder(b) + s := &StringStruct{"HIJKL"} + sp := &s + err := enc.Encode(GobTestIndirectEncDec{17, &sp}) + if err != nil { + t.Fatal("encode error:", err) + } + dec := NewDecoder(b) + x := new(GobTestIndirectEncDec) + err = dec.Decode(x) + if err != nil { + t.Fatal("decode error:", err) + } + if (***x.G).s != "HIJKL" { + t.Errorf("expected `HIJKL` got %s", (***x.G).s) + } +} + +// As long as the fields have the same name and implement the +// interface, we can cross-connect them. Not sure it's useful +// and may even be bad but it works and it's hard to prevent +// without exposing the contents of the object, which would +// defeat the purpose. +func TestGobEncoderFieldsOfDifferentType(t *testing.T) { + // first, string in field to byte in field + b := new(bytes.Buffer) + enc := NewEncoder(b) + err := enc.Encode(GobTest1{17, &StringStruct{"ABC"}}) + if err != nil { + t.Fatal("encode error:", err) + } + dec := NewDecoder(b) + x := new(GobTest0) + err = dec.Decode(x) + if err != nil { + t.Fatal("decode error:", err) + } + if x.G.a != 'A' { + t.Errorf("expected 'A' got %c", x.G.a) + } + // now the other direction, byte in field to string in field + b.Reset() + err = enc.Encode(GobTest0{17, &ByteStruct{'X'}}) + if err != nil { + t.Fatal("encode error:", err) + } + y := new(GobTest1) + err = dec.Decode(y) + if err != nil { + t.Fatal("decode error:", err) + } + if y.G.s != "XYZ" { + t.Fatalf("expected `XYZ` got %c", y.G.s) + } +} + +// Test that we can encode a value and decode into a pointer. +func TestGobEncoderValueEncoder(t *testing.T) { + // first, string in field to byte in field + b := new(bytes.Buffer) + enc := NewEncoder(b) + err := enc.Encode(GobTest4{17, ValueGobber("hello")}) + if err != nil { + t.Fatal("encode error:", err) + } + dec := NewDecoder(b) + x := new(GobTest5) + err = dec.Decode(x) + if err != nil { + t.Fatal("decode error:", err) + } + if *x.V != "hello" { + t.Errorf("expected `hello` got %s", x.V) + } +} + +func TestGobEncoderFieldTypeError(t *testing.T) { + // GobEncoder to non-decoder: error + b := new(bytes.Buffer) + enc := NewEncoder(b) + err := enc.Encode(GobTest1{17, &StringStruct{"ABC"}}) + if err != nil { + t.Fatal("encode error:", err) + } + dec := NewDecoder(b) + x := &GobTest2{} + err = dec.Decode(x) + if err == nil { + t.Fatal("expected decode error for mismatched fields (encoder to non-decoder)") + } + if strings.Index(err.String(), "type") < 0 { + t.Fatal("expected type error; got", err) + } + // Non-encoder to GobDecoder: error + b.Reset() + err = enc.Encode(GobTest2{17, "ABC"}) + if err != nil { + t.Fatal("encode error:", err) + } + y := &GobTest1{} + err = dec.Decode(y) + if err == nil { + t.Fatal("expected decode error for mistmatched fields (non-encoder to decoder)") + } + if strings.Index(err.String(), "type") < 0 { + t.Fatal("expected type error; got", err) + } +} + +// Even though ByteStruct is a struct, it's treated as a singleton at the top level. +func TestGobEncoderStructSingleton(t *testing.T) { + b := new(bytes.Buffer) + enc := NewEncoder(b) + err := enc.Encode(&ByteStruct{'A'}) + if err != nil { + t.Fatal("encode error:", err) + } + dec := NewDecoder(b) + x := new(ByteStruct) + err = dec.Decode(x) + if err != nil { + t.Fatal("decode error:", err) + } + if x.a != 'A' { + t.Errorf("expected 'A' got %c", x.a) + } +} + +func TestGobEncoderNonStructSingleton(t *testing.T) { + b := new(bytes.Buffer) + enc := NewEncoder(b) + err := enc.Encode(Gobber(1234)) + if err != nil { + t.Fatal("encode error:", err) + } + dec := NewDecoder(b) + var x Gobber + err = dec.Decode(&x) + if err != nil { + t.Fatal("decode error:", err) + } + if x != 1234 { + t.Errorf("expected 1234 got %c", x) + } +} + +func TestGobEncoderIgnoreStructField(t *testing.T) { + b := new(bytes.Buffer) + // First a field that's a structure. + enc := NewEncoder(b) + err := enc.Encode(GobTest0{17, &ByteStruct{'A'}}) + if err != nil { + t.Fatal("encode error:", err) + } + dec := NewDecoder(b) + x := new(GobTestIgnoreEncoder) + err = dec.Decode(x) + if err != nil { + t.Fatal("decode error:", err) + } + if x.X != 17 { + t.Errorf("expected 17 got %c", x.X) + } +} + +func TestGobEncoderIgnoreNonStructField(t *testing.T) { + b := new(bytes.Buffer) + // First a field that's a structure. + enc := NewEncoder(b) + gobber := Gobber(23) + err := enc.Encode(GobTest3{17, &gobber}) + if err != nil { + t.Fatal("encode error:", err) + } + dec := NewDecoder(b) + x := new(GobTestIgnoreEncoder) + err = dec.Decode(x) + if err != nil { + t.Fatal("decode error:", err) + } + if x.X != 17 { + t.Errorf("expected 17 got %c", x.X) + } +} diff --git a/libgo/go/gob/timing_test.go b/libgo/go/gob/timing_test.go new file mode 100644 index 00000000000..645f4fe51c9 --- /dev/null +++ b/libgo/go/gob/timing_test.go @@ -0,0 +1,90 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gob + +import ( + "bytes" + "fmt" + "io" + "os" + "runtime" + "testing" +) + +type Bench struct { + A int + B float64 + C string + D []byte +} + +func benchmarkEndToEnd(r io.Reader, w io.Writer, b *testing.B) { + b.StopTimer() + enc := NewEncoder(w) + dec := NewDecoder(r) + bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")} + b.StartTimer() + for i := 0; i < b.N; i++ { + if enc.Encode(bench) != nil { + panic("encode error") + } + if dec.Decode(bench) != nil { + panic("decode error") + } + } +} + +func BenchmarkEndToEndPipe(b *testing.B) { + r, w, err := os.Pipe() + if err != nil { + panic("can't get pipe:" + err.String()) + } + benchmarkEndToEnd(r, w, b) +} + +func BenchmarkEndToEndByteBuffer(b *testing.B) { + var buf bytes.Buffer + benchmarkEndToEnd(&buf, &buf, b) +} + +func TestCountEncodeMallocs(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")} + mallocs := 0 - runtime.MemStats.Mallocs + const count = 1000 + for i := 0; i < count; i++ { + err := enc.Encode(bench) + if err != nil { + t.Fatal("encode:", err) + } + } + mallocs += runtime.MemStats.Mallocs + fmt.Printf("mallocs per encode of type Bench: %d\n", mallocs/count) +} + +func TestCountDecodeMallocs(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + bench := &Bench{7, 3.2, "now is the time", []byte("for all good men")} + const count = 1000 + for i := 0; i < count; i++ { + err := enc.Encode(bench) + if err != nil { + t.Fatal("encode:", err) + } + } + dec := NewDecoder(&buf) + mallocs := 0 - runtime.MemStats.Mallocs + for i := 0; i < count; i++ { + *bench = Bench{} + err := dec.Decode(&bench) + if err != nil { + t.Fatal("decode:", err) + } + } + mallocs += runtime.MemStats.Mallocs + fmt.Printf("mallocs per decode of type Bench: %d\n", mallocs/count) +} diff --git a/libgo/go/gob/type.go b/libgo/go/gob/type.go index 6e3f148b4e7..fc620f5c7c1 100644 --- a/libgo/go/gob/type.go +++ b/libgo/go/gob/type.go @@ -9,15 +9,21 @@ import ( "os" "reflect" "sync" + "unicode" + "utf8" ) // userTypeInfo stores the information associated with a type the user has handed // to the package. It's computed once and stored in a map keyed by reflection // type. type userTypeInfo struct { - user reflect.Type // the type the user handed us - base reflect.Type // the base type after all indirections - indir int // number of indirections to reach the base type + user reflect.Type // the type the user handed us + base reflect.Type // the base type after all indirections + indir int // number of indirections to reach the base type + isGobEncoder bool // does the type implement GobEncoder? + isGobDecoder bool // does the type implement GobDecoder? + encIndir int8 // number of indirections to reach the receiver type; may be negative + decIndir int8 // number of indirections to reach the receiver type; may be negative } var ( @@ -68,10 +74,73 @@ func validUserType(rt reflect.Type) (ut *userTypeInfo, err os.Error) { } ut.indir++ } + ut.isGobEncoder, ut.encIndir = implementsInterface(ut.user, gobEncoderCheck) + ut.isGobDecoder, ut.decIndir = implementsInterface(ut.user, gobDecoderCheck) userTypeCache[rt] = ut return } +const ( + gobEncodeMethodName = "GobEncode" + gobDecodeMethodName = "GobDecode" +) + +// implements returns whether the type implements the interface, as encoded +// in the check function. +func implements(typ reflect.Type, check func(typ reflect.Type) bool) bool { + if typ.NumMethod() == 0 { // avoid allocations etc. unless there's some chance + return false + } + return check(typ) +} + +// gobEncoderCheck makes the type assertion a boolean function. +func gobEncoderCheck(typ reflect.Type) bool { + _, ok := reflect.MakeZero(typ).Interface().(GobEncoder) + return ok +} + +// gobDecoderCheck makes the type assertion a boolean function. +func gobDecoderCheck(typ reflect.Type) bool { + _, ok := reflect.MakeZero(typ).Interface().(GobDecoder) + return ok +} + +// implementsInterface reports whether the type implements the +// interface. (The actual check is done through the provided function.) +// It also returns the number of indirections required to get to the +// implementation. +func implementsInterface(typ reflect.Type, check func(typ reflect.Type) bool) (success bool, indir int8) { + if typ == nil { + return + } + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if implements(rt, check) { + return true, indir + } + if p, ok := rt.(*reflect.PtrType); ok { + indir++ + if indir > 100 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue + } + break + } + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if _, ok := typ.(*reflect.PtrType); !ok { + // Not a pointer, but does the pointer work? + if implements(reflect.PtrTo(typ), check) { + return true, -1 + } + } + return false, 0 +} + // userType returns, and saves, the information associated with user-provided type rt. // If the user type is not valid, it calls error. func userType(rt reflect.Type) *userTypeInfo { @@ -153,22 +222,24 @@ func (t *CommonType) name() string { return t.Name } var ( // Primordial types, needed during initialization. - tBool = bootstrapType("bool", false, 1) - tInt = bootstrapType("int", int(0), 2) - tUint = bootstrapType("uint", uint(0), 3) - tFloat = bootstrapType("float", float64(0), 4) - tBytes = bootstrapType("bytes", make([]byte, 0), 5) - tString = bootstrapType("string", "", 6) - tComplex = bootstrapType("complex", 0+0i, 7) - tInterface = bootstrapType("interface", interface{}(nil), 8) + // Always passed as pointers so the interface{} type + // goes through without losing its interfaceness. + tBool = bootstrapType("bool", (*bool)(nil), 1) + tInt = bootstrapType("int", (*int)(nil), 2) + tUint = bootstrapType("uint", (*uint)(nil), 3) + tFloat = bootstrapType("float", (*float64)(nil), 4) + tBytes = bootstrapType("bytes", (*[]byte)(nil), 5) + tString = bootstrapType("string", (*string)(nil), 6) + tComplex = bootstrapType("complex", (*complex128)(nil), 7) + tInterface = bootstrapType("interface", (*interface{})(nil), 8) // Reserve some Ids for compatible expansion - tReserved7 = bootstrapType("_reserved1", struct{ r7 int }{}, 9) - tReserved6 = bootstrapType("_reserved1", struct{ r6 int }{}, 10) - tReserved5 = bootstrapType("_reserved1", struct{ r5 int }{}, 11) - tReserved4 = bootstrapType("_reserved1", struct{ r4 int }{}, 12) - tReserved3 = bootstrapType("_reserved1", struct{ r3 int }{}, 13) - tReserved2 = bootstrapType("_reserved1", struct{ r2 int }{}, 14) - tReserved1 = bootstrapType("_reserved1", struct{ r1 int }{}, 15) + tReserved7 = bootstrapType("_reserved1", (*struct{ r7 int })(nil), 9) + tReserved6 = bootstrapType("_reserved1", (*struct{ r6 int })(nil), 10) + tReserved5 = bootstrapType("_reserved1", (*struct{ r5 int })(nil), 11) + tReserved4 = bootstrapType("_reserved1", (*struct{ r4 int })(nil), 12) + tReserved3 = bootstrapType("_reserved1", (*struct{ r3 int })(nil), 13) + tReserved2 = bootstrapType("_reserved1", (*struct{ r2 int })(nil), 14) + tReserved1 = bootstrapType("_reserved1", (*struct{ r1 int })(nil), 15) ) // Predefined because it's needed by the Decoder @@ -229,6 +300,23 @@ func (a *arrayType) safeString(seen map[typeId]bool) string { func (a *arrayType) string() string { return a.safeString(make(map[typeId]bool)) } +// GobEncoder type (something that implements the GobEncoder interface) +type gobEncoderType struct { + CommonType +} + +func newGobEncoderType(name string) *gobEncoderType { + g := &gobEncoderType{CommonType{Name: name}} + setTypeId(g) + return g +} + +func (g *gobEncoderType) safeString(seen map[typeId]bool) string { + return g.Name +} + +func (g *gobEncoderType) string() string { return g.Name } + // Map type type mapType struct { CommonType @@ -324,11 +412,16 @@ func newStructType(name string) *structType { return s } -func (s *structType) init(field []*fieldType) { - s.Field = field -} - -func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) { +// newTypeObject allocates a gobType for the reflection type rt. +// Unless ut represents a GobEncoder, rt should be the base type +// of ut. +// This is only called from the encoding side. The decoding side +// works through typeIds and userTypeInfos alone. +func newTypeObject(name string, ut *userTypeInfo, rt reflect.Type) (gobType, os.Error) { + // Does this type implement GobEncoder? + if ut.isGobEncoder { + return newGobEncoderType(name), nil + } var err os.Error var type0, type1 gobType defer func() { @@ -364,7 +457,7 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) { case *reflect.ArrayType: at := newArrayType(name) types[rt] = at - type0, err = getType("", t.Elem()) + type0, err = getBaseType("", t.Elem()) if err != nil { return nil, err } @@ -382,11 +475,11 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) { case *reflect.MapType: mt := newMapType(name) types[rt] = mt - type0, err = getType("", t.Key()) + type0, err = getBaseType("", t.Key()) if err != nil { return nil, err } - type1, err = getType("", t.Elem()) + type1, err = getBaseType("", t.Elem()) if err != nil { return nil, err } @@ -400,7 +493,7 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) { } st := newSliceType(name) types[rt] = st - type0, err = getType(t.Elem().Name(), t.Elem()) + type0, err = getBaseType(t.Elem().Name(), t.Elem()) if err != nil { return nil, err } @@ -411,22 +504,23 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) { st := newStructType(name) types[rt] = st idToType[st.id()] = st - field := make([]*fieldType, t.NumField()) for i := 0; i < t.NumField(); i++ { f := t.Field(i) + if !isExported(f.Name) { + continue + } typ := userType(f.Type).base tname := typ.Name() if tname == "" { t := userType(f.Type).base tname = t.String() } - gt, err := getType(tname, f.Type) + gt, err := getBaseType(tname, f.Type) if err != nil { return nil, err } - field[i] = &fieldType{f.Name, gt.id()} + st.Field = append(st.Field, &fieldType{f.Name, gt.id()}) } - st.init(field) return st, nil default: @@ -435,15 +529,30 @@ func newTypeObject(name string, rt reflect.Type) (gobType, os.Error) { return nil, nil } +// isExported reports whether this is an exported - upper case - name. +func isExported(name string) bool { + rune, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(rune) +} + +// getBaseType returns the Gob type describing the given reflect.Type's base type. +// typeLock must be held. +func getBaseType(name string, rt reflect.Type) (gobType, os.Error) { + ut := userType(rt) + return getType(name, ut, ut.base) +} + // getType returns the Gob type describing the given reflect.Type. +// Should be called only when handling GobEncoders/Decoders, +// which may be pointers. All other types are handled through the +// base type, never a pointer. // typeLock must be held. -func getType(name string, rt reflect.Type) (gobType, os.Error) { - rt = userType(rt).base +func getType(name string, ut *userTypeInfo, rt reflect.Type) (gobType, os.Error) { typ, present := types[rt] if present { return typ, nil } - typ, err := newTypeObject(name, rt) + typ, err := newTypeObject(name, ut, rt) if err == nil { types[rt] = typ } @@ -457,9 +566,10 @@ func checkId(want, got typeId) { } } -// used for building the basic types; called only from init() +// used for building the basic types; called only from init(). the incoming +// interface always refers to a pointer. func bootstrapType(name string, e interface{}, expect typeId) typeId { - rt := reflect.Typeof(e) + rt := reflect.Typeof(e).(*reflect.PtrType).Elem() _, present := types[rt] if present { panic("bootstrap type already present: " + name + ", " + rt.String()) @@ -484,10 +594,11 @@ func bootstrapType(name string, e interface{}, expect typeId) typeId { // To maintain binary compatibility, if you extend this type, always put // the new fields last. type wireType struct { - ArrayT *arrayType - SliceT *sliceType - StructT *structType - MapT *mapType + ArrayT *arrayType + SliceT *sliceType + StructT *structType + MapT *mapType + GobEncoderT *gobEncoderType } func (w *wireType) string() string { @@ -504,6 +615,8 @@ func (w *wireType) string() string { return w.StructT.Name case w.MapT != nil: return w.MapT.Name + case w.GobEncoderT != nil: + return w.GobEncoderT.Name } return unknown } @@ -516,49 +629,88 @@ type typeInfo struct { var typeInfoMap = make(map[reflect.Type]*typeInfo) // protected by typeLock -// The reflection type must have all its indirections processed out. // typeLock must be held. -func getTypeInfo(rt reflect.Type) (*typeInfo, os.Error) { - if rt.Kind() == reflect.Ptr { - panic("pointer type in getTypeInfo: " + rt.String()) +func getTypeInfo(ut *userTypeInfo) (*typeInfo, os.Error) { + rt := ut.base + if ut.isGobEncoder { + // We want the user type, not the base type. + rt = ut.user } info, ok := typeInfoMap[rt] - if !ok { - info = new(typeInfo) - name := rt.Name() - gt, err := getType(name, rt) + if ok { + return info, nil + } + info = new(typeInfo) + gt, err := getBaseType(rt.Name(), rt) + if err != nil { + return nil, err + } + info.id = gt.id() + + if ut.isGobEncoder { + userType, err := getType(rt.Name(), ut, rt) if err != nil { return nil, err } - info.id = gt.id() - t := info.id.gobType() - switch typ := rt.(type) { - case *reflect.ArrayType: - info.wire = &wireType{ArrayT: t.(*arrayType)} - case *reflect.MapType: - info.wire = &wireType{MapT: t.(*mapType)} - case *reflect.SliceType: - // []byte == []uint8 is a special case handled separately - if typ.Elem().Kind() != reflect.Uint8 { - info.wire = &wireType{SliceT: t.(*sliceType)} - } - case *reflect.StructType: - info.wire = &wireType{StructT: t.(*structType)} + info.wire = &wireType{GobEncoderT: userType.id().gobType().(*gobEncoderType)} + typeInfoMap[ut.user] = info + return info, nil + } + + t := info.id.gobType() + switch typ := rt.(type) { + case *reflect.ArrayType: + info.wire = &wireType{ArrayT: t.(*arrayType)} + case *reflect.MapType: + info.wire = &wireType{MapT: t.(*mapType)} + case *reflect.SliceType: + // []byte == []uint8 is a special case handled separately + if typ.Elem().Kind() != reflect.Uint8 { + info.wire = &wireType{SliceT: t.(*sliceType)} } - typeInfoMap[rt] = info + case *reflect.StructType: + info.wire = &wireType{StructT: t.(*structType)} } + typeInfoMap[rt] = info return info, nil } // Called only when a panic is acceptable and unexpected. func mustGetTypeInfo(rt reflect.Type) *typeInfo { - t, err := getTypeInfo(rt) + t, err := getTypeInfo(userType(rt)) if err != nil { panic("getTypeInfo: " + err.String()) } return t } +// GobEncoder is the interface describing data that provides its own +// representation for encoding values for transmission to a GobDecoder. +// A type that implements GobEncoder and GobDecoder has complete +// control over the representation of its data and may therefore +// contain things such as private fields, channels, and functions, +// which are not usually transmissable in gob streams. +// +// Note: Since gobs can be stored permanently, It is good design +// to guarantee the encoding used by a GobEncoder is stable as the +// software evolves. For instance, it might make sense for GobEncode +// to include a version number in the encoding. +type GobEncoder interface { + // GobEncode returns a byte slice representing the encoding of the + // receiver for transmission to a GobDecoder, usually of the same + // concrete type. + GobEncode() ([]byte, os.Error) +} + +// GobDecoder is the interface describing data that provides its own +// routine for decoding transmitted values sent by a GobEncoder. +type GobDecoder interface { + // GobDecode overwrites the receiver, which must be a pointer, + // with the value represented by the byte slice, which was written + // by GobEncode, usually for the same concrete type. + GobDecode([]byte) os.Error +} + var ( nameToConcreteType = make(map[string]reflect.Type) concreteTypeToName = make(map[reflect.Type]string) diff --git a/libgo/go/gob/type_test.go b/libgo/go/gob/type_test.go index 5aecde103a5..ffd1345e5c0 100644 --- a/libgo/go/gob/type_test.go +++ b/libgo/go/gob/type_test.go @@ -26,7 +26,7 @@ var basicTypes = []typeT{ func getTypeUnlocked(name string, rt reflect.Type) gobType { typeLock.Lock() defer typeLock.Unlock() - t, err := getType(name, rt) + t, err := getBaseType(name, rt) if err != nil { panic("getTypeUnlocked: " + err.String()) } @@ -126,27 +126,27 @@ func TestMapType(t *testing.T) { } type Bar struct { - x string + X string } // This structure has pointers and refers to itself, making it a good test case. type Foo struct { - a int - b int32 // will become int - c string - d []byte - e *float64 // will become float64 - f ****float64 // will become float64 - g *Bar - h *Bar // should not interpolate the definition of Bar again - i *Foo // will not explode + A int + B int32 // will become int + C string + D []byte + E *float64 // will become float64 + F ****float64 // will become float64 + G *Bar + H *Bar // should not interpolate the definition of Bar again + I *Foo // will not explode } func TestStructType(t *testing.T) { sstruct := getTypeUnlocked("Foo", reflect.Typeof(Foo{})) str := sstruct.string() // If we can print it correctly, we built it correctly. - expected := "Foo = struct { a int; b int; c string; d bytes; e float; f float; g Bar = struct { x string; }; h Bar; i Foo; }" + expected := "Foo = struct { A int; B int; C string; D bytes; E float; F float; G Bar = struct { X string; }; H Bar; I Foo; }" if str != expected { t.Errorf("struct printed as %q; expected %q", str, expected) } diff --git a/libgo/go/hash/fnv/fnv.go b/libgo/go/hash/fnv/fnv.go new file mode 100644 index 00000000000..66ab5a635bf --- /dev/null +++ b/libgo/go/hash/fnv/fnv.go @@ -0,0 +1,133 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The fnv package implements FNV-1 and FNV-1a, +// non-cryptographic hash functions created by +// Glenn Fowler, Landon Curt Noll, and Phong Vo. +// See http://isthe.com/chongo/tech/comp/fnv/. +package fnv + +import ( + "encoding/binary" + "hash" + "os" + "unsafe" +) + +type ( + sum32 uint32 + sum32a uint32 + sum64 uint64 + sum64a uint64 +) + +const ( + offset32 = 2166136261 + offset64 = 14695981039346656037 + prime32 = 16777619 + prime64 = 1099511628211 +) + +// New32 returns a new 32-bit FNV-1 hash.Hash. +func New32() hash.Hash32 { + var s sum32 = offset32 + return &s +} + +// New32a returns a new 32-bit FNV-1a hash.Hash. +func New32a() hash.Hash32 { + var s sum32a = offset32 + return &s +} + +// New64 returns a new 64-bit FNV-1 hash.Hash. +func New64() hash.Hash64 { + var s sum64 = offset64 + return &s +} + +// New64a returns a new 64-bit FNV-1a hash.Hash. +func New64a() hash.Hash64 { + var s sum64a = offset64 + return &s +} + +func (s *sum32) Reset() { *s = offset32 } +func (s *sum32a) Reset() { *s = offset32 } +func (s *sum64) Reset() { *s = offset64 } +func (s *sum64a) Reset() { *s = offset64 } + +func (s *sum32) Sum32() uint32 { return uint32(*s) } +func (s *sum32a) Sum32() uint32 { return uint32(*s) } +func (s *sum64) Sum64() uint64 { return uint64(*s) } +func (s *sum64a) Sum64() uint64 { return uint64(*s) } + +func (s *sum32) Write(data []byte) (int, os.Error) { + hash := *s + for _, c := range data { + hash *= prime32 + hash ^= sum32(c) + } + *s = hash + return len(data), nil +} + +func (s *sum32a) Write(data []byte) (int, os.Error) { + hash := *s + for _, c := range data { + hash ^= sum32a(c) + hash *= prime32 + } + *s = hash + return len(data), nil +} + +func (s *sum64) Write(data []byte) (int, os.Error) { + hash := *s + for _, c := range data { + hash *= prime64 + hash ^= sum64(c) + } + *s = hash + return len(data), nil +} + +func (s *sum64a) Write(data []byte) (int, os.Error) { + hash := *s + for _, c := range data { + hash ^= sum64a(c) + hash *= prime64 + } + *s = hash + return len(data), nil +} + +func (s *sum32) Size() int { return unsafe.Sizeof(*s) } +func (s *sum32a) Size() int { return unsafe.Sizeof(*s) } +func (s *sum64) Size() int { return unsafe.Sizeof(*s) } +func (s *sum64a) Size() int { return unsafe.Sizeof(*s) } + +func (s *sum32) Sum() []byte { + a := make([]byte, unsafe.Sizeof(*s)) + binary.BigEndian.PutUint32(a, uint32(*s)) + return a +} + +func (s *sum32a) Sum() []byte { + a := make([]byte, unsafe.Sizeof(*s)) + binary.BigEndian.PutUint32(a, uint32(*s)) + return a +} + +func (s *sum64) Sum() []byte { + a := make([]byte, unsafe.Sizeof(*s)) + binary.BigEndian.PutUint64(a, uint64(*s)) + return a +} + +func (s *sum64a) Sum() []byte { + a := make([]byte, unsafe.Sizeof(*s)) + binary.BigEndian.PutUint64(a, uint64(*s)) + return a +} diff --git a/libgo/go/hash/fnv/fnv_test.go b/libgo/go/hash/fnv/fnv_test.go new file mode 100644 index 00000000000..3ea3fe6f124 --- /dev/null +++ b/libgo/go/hash/fnv/fnv_test.go @@ -0,0 +1,167 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fnv + +import ( + "bytes" + "encoding/binary" + "hash" + "testing" +) + +const testDataSize = 40 + +type golden struct { + sum []byte + text string +} + +var golden32 = []golden{ + {[]byte{0x81, 0x1c, 0x9d, 0xc5}, ""}, + {[]byte{0x05, 0x0c, 0x5d, 0x7e}, "a"}, + {[]byte{0x70, 0x77, 0x2d, 0x38}, "ab"}, + {[]byte{0x43, 0x9c, 0x2f, 0x4b}, "abc"}, +} + +var golden32a = []golden{ + {[]byte{0x81, 0x1c, 0x9d, 0xc5}, ""}, + {[]byte{0xe4, 0x0c, 0x29, 0x2c}, "a"}, + {[]byte{0x4d, 0x25, 0x05, 0xca}, "ab"}, + {[]byte{0x1a, 0x47, 0xe9, 0x0b}, "abc"}, +} + +var golden64 = []golden{ + {[]byte{0xcb, 0xf2, 0x9c, 0xe4, 0x84, 0x22, 0x23, 0x25}, ""}, + {[]byte{0xaf, 0x63, 0xbd, 0x4c, 0x86, 0x01, 0xb7, 0xbe}, "a"}, + {[]byte{0x08, 0x32, 0x67, 0x07, 0xb4, 0xeb, 0x37, 0xb8}, "ab"}, + {[]byte{0xd8, 0xdc, 0xca, 0x18, 0x6b, 0xaf, 0xad, 0xcb}, "abc"}, +} + +var golden64a = []golden{ + {[]byte{0xcb, 0xf2, 0x9c, 0xe4, 0x84, 0x22, 0x23, 0x25}, ""}, + {[]byte{0xaf, 0x63, 0xdc, 0x4c, 0x86, 0x01, 0xec, 0x8c}, "a"}, + {[]byte{0x08, 0x9c, 0x44, 0x07, 0xb5, 0x45, 0x98, 0x6a}, "ab"}, + {[]byte{0xe7, 0x1f, 0xa2, 0x19, 0x05, 0x41, 0x57, 0x4b}, "abc"}, +} + +func TestGolden32(t *testing.T) { + testGolden(t, New32(), golden32) +} + +func TestGolden32a(t *testing.T) { + testGolden(t, New32a(), golden32a) +} + +func TestGolden64(t *testing.T) { + testGolden(t, New64(), golden64) +} + +func TestGolden64a(t *testing.T) { + testGolden(t, New64a(), golden64a) +} + +func testGolden(t *testing.T, hash hash.Hash, gold []golden) { + for _, g := range gold { + hash.Reset() + done, error := hash.Write([]byte(g.text)) + if error != nil { + t.Fatalf("write error: %s", error) + } + if done != len(g.text) { + t.Fatalf("wrote only %d out of %d bytes", done, len(g.text)) + } + if actual := hash.Sum(); !bytes.Equal(g.sum, actual) { + t.Errorf("hash(%q) = 0x%x want 0x%x", g.text, actual, g.sum) + } + } +} + +func TestIntegrity32(t *testing.T) { + testIntegrity(t, New32()) +} + +func TestIntegrity32a(t *testing.T) { + testIntegrity(t, New32a()) +} + +func TestIntegrity64(t *testing.T) { + testIntegrity(t, New64()) +} + +func TestIntegrity64a(t *testing.T) { + testIntegrity(t, New64a()) +} + +func testIntegrity(t *testing.T, h hash.Hash) { + data := []byte{'1', '2', 3, 4, 5} + h.Write(data) + sum := h.Sum() + + if size := h.Size(); size != len(sum) { + t.Fatalf("Size()=%d but len(Sum())=%d", size, len(sum)) + } + + if a := h.Sum(); !bytes.Equal(sum, a) { + t.Fatalf("first Sum()=0x%x, second Sum()=0x%x", sum, a) + } + + h.Reset() + h.Write(data) + if a := h.Sum(); !bytes.Equal(sum, a) { + t.Fatalf("Sum()=0x%x, but after Reset() Sum()=0x%x", sum, a) + } + + h.Reset() + h.Write(data[:2]) + h.Write(data[2:]) + if a := h.Sum(); !bytes.Equal(sum, a) { + t.Fatalf("Sum()=0x%x, but with partial writes, Sum()=0x%x", sum, a) + } + + switch h.Size() { + case 4: + sum32 := h.(hash.Hash32).Sum32() + if sum32 != binary.BigEndian.Uint32(sum) { + t.Fatalf("Sum()=0x%x, but Sum32()=0x%x", sum, sum32) + } + case 8: + sum64 := h.(hash.Hash64).Sum64() + if sum64 != binary.BigEndian.Uint64(sum) { + t.Fatalf("Sum()=0x%x, but Sum64()=0x%x", sum, sum64) + } + } +} + +func Benchmark32(b *testing.B) { + benchmark(b, New32()) +} + +func Benchmark32a(b *testing.B) { + benchmark(b, New32a()) +} + +func Benchmark64(b *testing.B) { + benchmark(b, New64()) +} + +func Benchmark64a(b *testing.B) { + benchmark(b, New64a()) +} + +func benchmark(b *testing.B, h hash.Hash) { + b.ResetTimer() + b.SetBytes(testDataSize) + data := make([]byte, testDataSize) + for i, _ := range data { + data[i] = byte(i + 'a') + } + + b.StartTimer() + for todo := b.N; todo != 0; todo-- { + h.Reset() + h.Write(data) + h.Sum() + } +} diff --git a/libgo/go/http/cgi/child.go b/libgo/go/http/cgi/child.go new file mode 100644 index 00000000000..c7d48b9eb3f --- /dev/null +++ b/libgo/go/http/cgi/child.go @@ -0,0 +1,192 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements CGI from the perspective of a child +// process. + +package cgi + +import ( + "bufio" + "fmt" + "http" + "io" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Request returns the HTTP request as represented in the current +// environment. This assumes the current program is being run +// by a web server in a CGI environment. +func Request() (*http.Request, os.Error) { + return requestFromEnvironment(envMap(os.Environ())) +} + +func envMap(env []string) map[string]string { + m := make(map[string]string) + for _, kv := range env { + if idx := strings.Index(kv, "="); idx != -1 { + m[kv[:idx]] = kv[idx+1:] + } + } + return m +} + +// These environment variables are manually copied into Request +var skipHeader = map[string]bool{ + "HTTP_HOST": true, + "HTTP_REFERER": true, + "HTTP_USER_AGENT": true, +} + +func requestFromEnvironment(env map[string]string) (*http.Request, os.Error) { + r := new(http.Request) + r.Method = env["REQUEST_METHOD"] + if r.Method == "" { + return nil, os.NewError("cgi: no REQUEST_METHOD in environment") + } + r.Close = true + r.Trailer = http.Header{} + r.Header = http.Header{} + + r.Host = env["HTTP_HOST"] + r.Referer = env["HTTP_REFERER"] + r.UserAgent = env["HTTP_USER_AGENT"] + + // CGI doesn't allow chunked requests, so these should all be accurate: + r.Proto = "HTTP/1.0" + r.ProtoMajor = 1 + r.ProtoMinor = 0 + r.TransferEncoding = nil + + if lenstr := env["CONTENT_LENGTH"]; lenstr != "" { + clen, err := strconv.Atoi64(lenstr) + if err != nil { + return nil, os.NewError("cgi: bad CONTENT_LENGTH in environment: " + lenstr) + } + r.ContentLength = clen + r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, clen)) + } + + // Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers + for k, v := range env { + if !strings.HasPrefix(k, "HTTP_") || skipHeader[k] { + continue + } + r.Header.Add(strings.Replace(k[5:], "_", "-", -1), v) + } + + // TODO: cookies. parsing them isn't exported, though. + + if r.Host != "" { + // Hostname is provided, so we can reasonably construct a URL, + // even if we have to assume 'http' for the scheme. + r.RawURL = "http://" + r.Host + env["REQUEST_URI"] + url, err := http.ParseURL(r.RawURL) + if err != nil { + return nil, os.NewError("cgi: failed to parse host and REQUEST_URI into a URL: " + r.RawURL) + } + r.URL = url + } + // Fallback logic if we don't have a Host header or the URL + // failed to parse + if r.URL == nil { + r.RawURL = env["REQUEST_URI"] + url, err := http.ParseURL(r.RawURL) + if err != nil { + return nil, os.NewError("cgi: failed to parse REQUEST_URI into a URL: " + r.RawURL) + } + r.URL = url + } + return r, nil +} + +// Serve executes the provided Handler on the currently active CGI +// request, if any. If there's no current CGI environment +// an error is returned. The provided handler may be nil to use +// http.DefaultServeMux. +func Serve(handler http.Handler) os.Error { + req, err := Request() + if err != nil { + return err + } + if handler == nil { + handler = http.DefaultServeMux + } + rw := &response{ + req: req, + header: make(http.Header), + bufw: bufio.NewWriter(os.Stdout), + } + handler.ServeHTTP(rw, req) + if err = rw.bufw.Flush(); err != nil { + return err + } + return nil +} + +type response struct { + req *http.Request + header http.Header + bufw *bufio.Writer + headerSent bool +} + +func (r *response) Flush() { + r.bufw.Flush() +} + +func (r *response) RemoteAddr() string { + return os.Getenv("REMOTE_ADDR") +} + +func (r *response) Header() http.Header { + return r.header +} + +func (r *response) Write(p []byte) (n int, err os.Error) { + if !r.headerSent { + r.WriteHeader(http.StatusOK) + } + return r.bufw.Write(p) +} + +func (r *response) WriteHeader(code int) { + if r.headerSent { + // Note: explicitly using Stderr, as Stdout is our HTTP output. + fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL) + return + } + r.headerSent = true + fmt.Fprintf(r.bufw, "Status: %d %s\r\n", code, http.StatusText(code)) + + // Set a default Content-Type + if _, hasType := r.header["Content-Type"]; !hasType { + r.header.Add("Content-Type", "text/html; charset=utf-8") + } + + // TODO: add a method on http.Header to write itself to an io.Writer? + // This is duplicated code. + for k, vv := range r.header { + for _, v := range vv { + v = strings.Replace(v, "\n", "", -1) + v = strings.Replace(v, "\r", "", -1) + v = strings.TrimSpace(v) + fmt.Fprintf(r.bufw, "%s: %s\r\n", k, v) + } + } + r.bufw.Write([]byte("\r\n")) + r.bufw.Flush() +} + +func (r *response) UsingTLS() bool { + // There's apparently a de-facto standard for this. + // http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636 + if s := os.Getenv("HTTPS"); s == "on" || s == "ON" || s == "1" { + return true + } + return false +} diff --git a/libgo/go/http/cgi/child_test.go b/libgo/go/http/cgi/child_test.go new file mode 100644 index 00000000000..db0e09cf66a --- /dev/null +++ b/libgo/go/http/cgi/child_test.go @@ -0,0 +1,83 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests for CGI (the child process perspective) + +package cgi + +import ( + "testing" +) + +func TestRequest(t *testing.T) { + env := map[string]string{ + "REQUEST_METHOD": "GET", + "HTTP_HOST": "example.com", + "HTTP_REFERER": "elsewhere", + "HTTP_USER_AGENT": "goclient", + "HTTP_FOO_BAR": "baz", + "REQUEST_URI": "/path?a=b", + "CONTENT_LENGTH": "123", + } + req, err := requestFromEnvironment(env) + if err != nil { + t.Fatalf("requestFromEnvironment: %v", err) + } + if g, e := req.UserAgent, "goclient"; e != g { + t.Errorf("expected UserAgent %q; got %q", e, g) + } + if g, e := req.Method, "GET"; e != g { + t.Errorf("expected Method %q; got %q", e, g) + } + if g, e := req.Header.Get("User-Agent"), ""; e != g { + // Tests that we don't put recognized headers in the map + t.Errorf("expected User-Agent %q; got %q", e, g) + } + if g, e := req.ContentLength, int64(123); e != g { + t.Errorf("expected ContentLength %d; got %d", e, g) + } + if g, e := req.Referer, "elsewhere"; e != g { + t.Errorf("expected Referer %q; got %q", e, g) + } + if req.Header == nil { + t.Fatalf("unexpected nil Header") + } + if g, e := req.Header.Get("Foo-Bar"), "baz"; e != g { + t.Errorf("expected Foo-Bar %q; got %q", e, g) + } + if g, e := req.RawURL, "http://example.com/path?a=b"; e != g { + t.Errorf("expected RawURL %q; got %q", e, g) + } + if g, e := req.URL.String(), "http://example.com/path?a=b"; e != g { + t.Errorf("expected URL %q; got %q", e, g) + } + if g, e := req.FormValue("a"), "b"; e != g { + t.Errorf("expected FormValue(a) %q; got %q", e, g) + } + if req.Trailer == nil { + t.Errorf("unexpected nil Trailer") + } +} + +func TestRequestWithoutHost(t *testing.T) { + env := map[string]string{ + "HTTP_HOST": "", + "REQUEST_METHOD": "GET", + "REQUEST_URI": "/path?a=b", + "CONTENT_LENGTH": "123", + } + req, err := requestFromEnvironment(env) + if err != nil { + t.Fatalf("requestFromEnvironment: %v", err) + } + if g, e := req.RawURL, "/path?a=b"; e != g { + t.Errorf("expected RawURL %q; got %q", e, g) + } + if req.URL == nil { + t.Fatalf("unexpected nil URL") + } + if g, e := req.URL.String(), "/path?a=b"; e != g { + t.Errorf("expected URL %q; got %q", e, g) + } +} diff --git a/libgo/go/http/cgi/host.go b/libgo/go/http/cgi/host.go new file mode 100644 index 00000000000..22723873749 --- /dev/null +++ b/libgo/go/http/cgi/host.go @@ -0,0 +1,221 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements the host side of CGI (being the webserver +// parent process). + +// Package cgi implements CGI (Common Gateway Interface) as specified +// in RFC 3875. +// +// Note that using CGI means starting a new process to handle each +// request, which is typically less efficient than using a +// long-running server. This package is intended primarily for +// compatibility with existing systems. +package cgi + +import ( + "bytes" + "encoding/line" + "exec" + "fmt" + "http" + "io" + "log" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var trailingPort = regexp.MustCompile(`:([0-9]+)$`) + +// Handler runs an executable in a subprocess with a CGI environment. +type Handler struct { + Path string // path to the CGI executable + Root string // root URI prefix of handler or empty for "/" + + Env []string // extra environment variables to set, if any + Logger *log.Logger // optional log for errors or nil to use log.Print + Args []string // optional arguments to pass to child process +} + +func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + root := h.Root + if root == "" { + root = "/" + } + + if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" { + rw.WriteHeader(http.StatusBadRequest) + rw.Write([]byte("Chunked request bodies are not supported by CGI.")) + return + } + + pathInfo := req.URL.Path + if root != "/" && strings.HasPrefix(pathInfo, root) { + pathInfo = pathInfo[len(root):] + } + + port := "80" + if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 { + port = matches[1] + } + + env := []string{ + "SERVER_SOFTWARE=go", + "SERVER_NAME=" + req.Host, + "HTTP_HOST=" + req.Host, + "GATEWAY_INTERFACE=CGI/1.1", + "REQUEST_METHOD=" + req.Method, + "QUERY_STRING=" + req.URL.RawQuery, + "REQUEST_URI=" + req.URL.RawPath, + "PATH_INFO=" + pathInfo, + "SCRIPT_NAME=" + root, + "SCRIPT_FILENAME=" + h.Path, + "REMOTE_ADDR=" + req.RemoteAddr, + "REMOTE_HOST=" + req.RemoteAddr, + "SERVER_PORT=" + port, + } + + if req.TLS != nil { + env = append(env, "HTTPS=on") + } + + if len(req.Cookie) > 0 { + b := new(bytes.Buffer) + for idx, c := range req.Cookie { + if idx > 0 { + b.Write([]byte("; ")) + } + fmt.Fprintf(b, "%s=%s", c.Name, c.Value) + } + env = append(env, "HTTP_COOKIE="+b.String()) + } + + for k, v := range req.Header { + k = strings.Map(upperCaseAndUnderscore, k) + env = append(env, "HTTP_"+k+"="+strings.Join(v, ", ")) + } + + if req.ContentLength > 0 { + env = append(env, fmt.Sprintf("CONTENT_LENGTH=%d", req.ContentLength)) + } + if ctype := req.Header.Get("Content-Type"); ctype != "" { + env = append(env, "CONTENT_TYPE="+ctype) + } + + if h.Env != nil { + env = append(env, h.Env...) + } + + cwd, pathBase := filepath.Split(h.Path) + if cwd == "" { + cwd = "." + } + + args := []string{h.Path} + args = append(args, h.Args...) + + cmd, err := exec.Run( + pathBase, + args, + env, + cwd, + exec.Pipe, // stdin + exec.Pipe, // stdout + exec.PassThrough, // stderr (for now) + ) + if err != nil { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("CGI error: %v", err) + return + } + defer func() { + cmd.Stdin.Close() + cmd.Stdout.Close() + cmd.Wait(0) // no zombies + }() + + if req.ContentLength != 0 { + go io.Copy(cmd.Stdin, req.Body) + } + + linebody := line.NewReader(cmd.Stdout, 1024) + headers := rw.Header() + statusCode := http.StatusOK + for { + line, isPrefix, err := linebody.ReadLine() + if isPrefix { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("CGI: long header line from subprocess.") + return + } + if err == os.EOF { + break + } + if err != nil { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("CGI: error reading headers: %v", err) + return + } + if len(line) == 0 { + break + } + parts := strings.Split(string(line), ":", 2) + if len(parts) < 2 { + h.printf("CGI: bogus header line: %s", string(line)) + continue + } + header, val := parts[0], parts[1] + header = strings.TrimSpace(header) + val = strings.TrimSpace(val) + switch { + case header == "Status": + if len(val) < 3 { + h.printf("CGI: bogus status (short): %q", val) + return + } + code, err := strconv.Atoi(val[0:3]) + if err != nil { + h.printf("CGI: bogus status: %q", val) + h.printf("CGI: line was %q", line) + return + } + statusCode = code + default: + headers.Add(header, val) + } + } + rw.WriteHeader(statusCode) + + _, err = io.Copy(rw, linebody) + if err != nil { + h.printf("CGI: copy error: %v", err) + } +} + +func (h *Handler) printf(format string, v ...interface{}) { + if h.Logger != nil { + h.Logger.Printf(format, v...) + } else { + log.Printf(format, v...) + } +} + +func upperCaseAndUnderscore(rune int) int { + switch { + case rune >= 'a' && rune <= 'z': + return rune - ('a' - 'A') + case rune == '-': + return '_' + case rune == '=': + // Maybe not part of the CGI 'spec' but would mess up + // the environment in any case, as Go represents the + // environment as a slice of "key=value" strings. + return '_' + } + // TODO: other transformations in spec or practice? + return rune +} diff --git a/libgo/go/http/cgi/host_test.go b/libgo/go/http/cgi/host_test.go new file mode 100644 index 00000000000..e8084b1134e --- /dev/null +++ b/libgo/go/http/cgi/host_test.go @@ -0,0 +1,273 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests for package cgi + +package cgi + +import ( + "bufio" + "exec" + "fmt" + "http" + "http/httptest" + "os" + "strings" + "testing" +) + +var cgiScriptWorks = canRun("./testdata/test.cgi") + +func canRun(s string) bool { + c, err := exec.Run(s, []string{s}, nil, ".", exec.DevNull, exec.DevNull, exec.DevNull) + if err != nil { + return false + } + w, err := c.Wait(0) + if err != nil { + return false + } + return w.Exited() && w.ExitStatus() == 0 +} + +func newRequest(httpreq string) *http.Request { + buf := bufio.NewReader(strings.NewReader(httpreq)) + req, err := http.ReadRequest(buf) + if err != nil { + panic("cgi: bogus http request in test: " + httpreq) + } + req.RemoteAddr = "1.2.3.4" + return req +} + +func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string]string) *httptest.ResponseRecorder { + rw := httptest.NewRecorder() + req := newRequest(httpreq) + h.ServeHTTP(rw, req) + + // Make a map to hold the test map that the CGI returns. + m := make(map[string]string) + linesRead := 0 +readlines: + for { + line, err := rw.Body.ReadString('\n') + switch { + case err == os.EOF: + break readlines + case err != nil: + t.Fatalf("unexpected error reading from CGI: %v", err) + } + linesRead++ + trimmedLine := strings.TrimRight(line, "\r\n") + split := strings.Split(trimmedLine, "=", 2) + if len(split) != 2 { + t.Fatalf("Unexpected %d parts from invalid line number %v: %q; existing map=%v", + len(split), linesRead, line, m) + } + m[split[0]] = split[1] + } + + for key, expected := range expectedMap { + if got := m[key]; got != expected { + t.Errorf("for key %q got %q; expected %q", key, got, expected) + } + } + return rw +} + +func skipTest(t *testing.T) bool { + if !cgiScriptWorks { + // No Perl on Windows, needed by test.cgi + // TODO: make the child process be Go, not Perl. + t.Logf("Skipping test: test.cgi failed.") + return true + } + return false +} + + +func TestCGIBasicGet(t *testing.T) { + if skipTest(t) { + return + } + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "test": "Hello CGI", + "param-a": "b", + "param-foo": "bar", + "env-GATEWAY_INTERFACE": "CGI/1.1", + "env-HTTP_HOST": "example.com", + "env-PATH_INFO": "", + "env-QUERY_STRING": "foo=bar&a=b", + "env-REMOTE_ADDR": "1.2.3.4", + "env-REMOTE_HOST": "1.2.3.4", + "env-REQUEST_METHOD": "GET", + "env-REQUEST_URI": "/test.cgi?foo=bar&a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/test.cgi", + "env-SERVER_NAME": "example.com", + "env-SERVER_PORT": "80", + "env-SERVER_SOFTWARE": "go", + } + replay := runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) + + if expected, got := "text/html", replay.Header().Get("Content-Type"); got != expected { + t.Errorf("got a Content-Type of %q; expected %q", got, expected) + } + if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { + t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) + } +} + +func TestCGIBasicGetAbsPath(t *testing.T) { + if skipTest(t) { + return + } + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("getwd error: %v", err) + } + h := &Handler{ + Path: pwd + "/testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "env-REQUEST_URI": "/test.cgi?foo=bar&a=b", + "env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi", + "env-SCRIPT_NAME": "/test.cgi", + } + runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestPathInfo(t *testing.T) { + if skipTest(t) { + return + } + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "param-a": "b", + "env-PATH_INFO": "/extrapath", + "env-QUERY_STRING": "a=b", + "env-REQUEST_URI": "/test.cgi/extrapath?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/test.cgi", + } + runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestPathInfoDirRoot(t *testing.T) { + if skipTest(t) { + return + } + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/myscript/", + } + expectedMap := map[string]string{ + "env-PATH_INFO": "bar", + "env-QUERY_STRING": "a=b", + "env-REQUEST_URI": "/myscript/bar?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/myscript/", + } + runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestDupHeaders(t *testing.T) { + if skipTest(t) { + return + } + h := &Handler{ + Path: "testdata/test.cgi", + } + expectedMap := map[string]string{ + "env-REQUEST_URI": "/myscript/bar?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-HTTP_COOKIE": "nom=NOM; yum=YUM", + "env-HTTP_X_FOO": "val1, val2", + } + runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+ + "Cookie: nom=NOM\n"+ + "Cookie: yum=YUM\n"+ + "X-Foo: val1\n"+ + "X-Foo: val2\n"+ + "Host: example.com\n\n", + expectedMap) +} + +func TestPathInfoNoRoot(t *testing.T) { + if skipTest(t) { + return + } + h := &Handler{ + Path: "testdata/test.cgi", + Root: "", + } + expectedMap := map[string]string{ + "env-PATH_INFO": "/bar", + "env-QUERY_STRING": "a=b", + "env-REQUEST_URI": "/bar?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/", + } + runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestCGIBasicPost(t *testing.T) { + if skipTest(t) { + return + } + postReq := `POST /test.cgi?a=b HTTP/1.0 +Host: example.com +Content-Type: application/x-www-form-urlencoded +Content-Length: 15 + +postfoo=postbar` + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "test": "Hello CGI", + "param-postfoo": "postbar", + "env-REQUEST_METHOD": "POST", + "env-CONTENT_LENGTH": "15", + "env-REQUEST_URI": "/test.cgi?a=b", + } + runCgiTest(t, h, postReq, expectedMap) +} + +func chunk(s string) string { + return fmt.Sprintf("%x\r\n%s\r\n", len(s), s) +} + +// The CGI spec doesn't allow chunked requests. +func TestCGIPostChunked(t *testing.T) { + if skipTest(t) { + return + } + postReq := `POST /test.cgi?a=b HTTP/1.1 +Host: example.com +Content-Type: application/x-www-form-urlencoded +Transfer-Encoding: chunked + +` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("") + + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{} + resp := runCgiTest(t, h, postReq, expectedMap) + if got, expected := resp.Code, http.StatusBadRequest; got != expected { + t.Fatalf("Expected %v response code from chunked request body; got %d", + expected, got) + } +} diff --git a/libgo/go/http/cgi/matryoshka_test.go b/libgo/go/http/cgi/matryoshka_test.go new file mode 100644 index 00000000000..3e4a6addfa5 --- /dev/null +++ b/libgo/go/http/cgi/matryoshka_test.go @@ -0,0 +1,74 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests a Go CGI program running under a Go CGI host process. +// Further, the two programs are the same binary, just checking +// their environment to figure out what mode to run in. + +package cgi + +import ( + "fmt" + "http" + "os" + "testing" +) + +// This test is a CGI host (testing host.go) that runs its own binary +// as a child process testing the other half of CGI (child.go). +func TestHostingOurselves(t *testing.T) { + h := &Handler{ + Path: os.Args[0], + Root: "/test.go", + Args: []string{"-test.run=TestBeChildCGIProcess"}, + } + expectedMap := map[string]string{ + "test": "Hello CGI-in-CGI", + "param-a": "b", + "param-foo": "bar", + "env-GATEWAY_INTERFACE": "CGI/1.1", + "env-HTTP_HOST": "example.com", + "env-PATH_INFO": "", + "env-QUERY_STRING": "foo=bar&a=b", + "env-REMOTE_ADDR": "1.2.3.4", + "env-REMOTE_HOST": "1.2.3.4", + "env-REQUEST_METHOD": "GET", + "env-REQUEST_URI": "/test.go?foo=bar&a=b", + "env-SCRIPT_FILENAME": os.Args[0], + "env-SCRIPT_NAME": "/test.go", + "env-SERVER_NAME": "example.com", + "env-SERVER_PORT": "80", + "env-SERVER_SOFTWARE": "go", + } + replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) + + if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected { + t.Errorf("got a Content-Type of %q; expected %q", got, expected) + } + if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { + t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) + } +} + +// Note: not actually a test. +func TestBeChildCGIProcess(t *testing.T) { + if os.Getenv("REQUEST_METHOD") == "" { + // Not in a CGI environment; skipping test. + return + } + Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("X-Test-Header", "X-Test-Value") + fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n") + req.ParseForm() + for k, vv := range req.Form { + for _, v := range vv { + fmt.Fprintf(rw, "param-%s=%s\n", k, v) + } + } + for _, kv := range os.Environ() { + fmt.Fprintf(rw, "env-%s\n", kv) + } + })) + os.Exit(0) +} diff --git a/libgo/go/http/client.go b/libgo/go/http/client.go index b1fe5ec6780..daba3a89b0c 100644 --- a/libgo/go/http/client.go +++ b/libgo/go/http/client.go @@ -11,6 +11,7 @@ import ( "encoding/base64" "fmt" "io" + "io/ioutil" "os" "strconv" "strings" @@ -20,26 +21,28 @@ import ( // that uses DefaultTransport. // Client is not yet very configurable. type Client struct { - Transport ClientTransport // if nil, DefaultTransport is used + Transport RoundTripper // if nil, DefaultTransport is used } // DefaultClient is the default Client and is used by Get, Head, and Post. var DefaultClient = &Client{} -// ClientTransport is an interface representing the ability to execute a +// RoundTripper is an interface representing the ability to execute a // single HTTP transaction, obtaining the Response for a given Request. -type ClientTransport interface { - // Do executes a single HTTP transaction, returning the Response for the - // request req. Do should not attempt to interpret the response. - // In particular, Do must return err == nil if it obtained a response, - // regardless of the response's HTTP status code. A non-nil err should - // be reserved for failure to obtain a response. Similarly, Do should - // not attempt to handle higher-level protocol details such as redirects, +type RoundTripper interface { + // RoundTrip executes a single HTTP transaction, returning + // the Response for the request req. RoundTrip should not + // attempt to interpret the response. In particular, + // RoundTrip must return err == nil if it obtained a response, + // regardless of the response's HTTP status code. A non-nil + // err should be reserved for failure to obtain a response. + // Similarly, RoundTrip should not attempt to handle + // higher-level protocol details such as redirects, // authentication, or cookies. // - // Transports may modify the request. The request Headers field is - // guaranteed to be initalized. - Do(req *Request) (resp *Response, err os.Error) + // RoundTrip may modify the request. The request Headers field is + // guaranteed to be initialized. + RoundTrip(req *Request) (resp *Response, err os.Error) } // Given a string of the form "host", "host:port", or "[ipv6::address]:port", @@ -54,40 +57,6 @@ type readClose struct { io.Closer } -// matchNoProxy returns true if requests to addr should not use a proxy, -// according to the NO_PROXY or no_proxy environment variable. -func matchNoProxy(addr string) bool { - if len(addr) == 0 { - return false - } - no_proxy := os.Getenv("NO_PROXY") - if len(no_proxy) == 0 { - no_proxy = os.Getenv("no_proxy") - } - if no_proxy == "*" { - return true - } - - addr = strings.ToLower(strings.TrimSpace(addr)) - if hasPort(addr) { - addr = addr[:strings.LastIndex(addr, ":")] - } - - for _, p := range strings.Split(no_proxy, ",", -1) { - p = strings.ToLower(strings.TrimSpace(p)) - if len(p) == 0 { - continue - } - if hasPort(p) { - p = p[:strings.LastIndex(p, ":")] - } - if addr == p || (p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:])) { - return true - } - } - return false -} - // Do sends an HTTP request and returns an HTTP response, following // policy (e.g. redirects, cookies, auth) as configured on the client. // @@ -100,11 +69,7 @@ func (c *Client) Do(req *Request) (resp *Response, err os.Error) { // send issues an HTTP request. Caller should close resp.Body when done reading from it. -// -// TODO: support persistent connections (multiple requests on a single connection). -// send() method is nonpublic because, when we refactor the code for persistent -// connections, it may no longer make sense to have a method with this signature. -func send(req *Request, t ClientTransport) (resp *Response, err os.Error) { +func send(req *Request, t RoundTripper) (resp *Response, err os.Error) { if t == nil { t = DefaultTransport if t == nil { @@ -115,9 +80,9 @@ func send(req *Request, t ClientTransport) (resp *Response, err os.Error) { // Most the callers of send (Get, Post, et al) don't need // Headers, leaving it uninitialized. We guarantee to the - // ClientTransport that this has been initialized, though. + // Transport that this has been initialized, though. if req.Header == nil { - req.Header = Header(make(map[string][]string)) + req.Header = make(Header) } info := req.URL.RawUserinfo @@ -130,7 +95,7 @@ func send(req *Request, t ClientTransport) (resp *Response, err os.Error) { } req.Header.Set("Authorization", "Basic "+string(encoded)) } - return t.Do(req) + return t.RoundTrip(req) } // True if the specified HTTP status code is one for which the Get utility should @@ -237,7 +202,7 @@ func (c *Client) Post(url string, bodyType string, body io.Reader) (r *Response, req.ProtoMajor = 1 req.ProtoMinor = 1 req.Close = true - req.Body = nopCloser{body} + req.Body = ioutil.NopCloser(body) req.Header = Header{ "Content-Type": {bodyType}, } @@ -272,7 +237,7 @@ func (c *Client) PostForm(url string, data map[string]string) (r *Response, err req.ProtoMinor = 1 req.Close = true body := urlencode(data) - req.Body = nopCloser{body} + req.Body = ioutil.NopCloser(body) req.Header = Header{ "Content-Type": {"application/x-www-form-urlencoded"}, "Content-Length": {strconv.Itoa(body.Len())}, @@ -312,9 +277,3 @@ func (c *Client) Head(url string) (r *Response, err os.Error) { } return send(&req, c.Transport) } - -type nopCloser struct { - io.Reader -} - -func (nopCloser) Close() os.Error { return nil } diff --git a/libgo/go/http/client_test.go b/libgo/go/http/client_test.go index c89ecbce2d0..3a6f834253b 100644 --- a/libgo/go/http/client_test.go +++ b/libgo/go/http/client_test.go @@ -4,20 +4,28 @@ // Tests for client.go -package http +package http_test import ( + "fmt" + . "http" + "http/httptest" "io/ioutil" "os" "strings" "testing" ) +var robotsTxtHandler = HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Last-Modified", "sometime") + fmt.Fprintf(w, "User-agent: go\nDisallow: /something/") +}) + func TestClient(t *testing.T) { - // TODO: add a proper test suite. Current test merely verifies that - // we can retrieve the Google robots.txt file. + ts := httptest.NewServer(robotsTxtHandler) + defer ts.Close() - r, _, err := Get("http://www.google.com/robots.txt") + r, _, err := Get(ts.URL) var b []byte if err == nil { b, err = ioutil.ReadAll(r.Body) @@ -31,7 +39,10 @@ func TestClient(t *testing.T) { } func TestClientHead(t *testing.T) { - r, err := Head("http://www.google.com/robots.txt") + ts := httptest.NewServer(robotsTxtHandler) + defer ts.Close() + + r, err := Head(ts.URL) if err != nil { t.Fatal(err) } @@ -44,7 +55,7 @@ type recordingTransport struct { req *Request } -func (t *recordingTransport) Do(req *Request) (resp *Response, err os.Error) { +func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err os.Error) { t.req = req return nil, os.NewError("dummy impl") } diff --git a/libgo/go/http/cookie.go b/libgo/go/http/cookie.go new file mode 100644 index 00000000000..2bb66e58e5c --- /dev/null +++ b/libgo/go/http/cookie.go @@ -0,0 +1,272 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" + "time" +) + +// This implementation is done according to IETF draft-ietf-httpstate-cookie-23, found at +// +// http://tools.ietf.org/html/draft-ietf-httpstate-cookie-23 + +// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an +// HTTP response or the Cookie header of an HTTP request. +type Cookie struct { + Name string + Value string + Path string + Domain string + Expires time.Time + RawExpires string + + // MaxAge=0 means no 'Max-Age' attribute specified. + // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' + // MaxAge>0 means Max-Age attribute present and given in seconds + MaxAge int + Secure bool + HttpOnly bool + Raw string + Unparsed []string // Raw text of unparsed attribute-value pairs +} + +// readSetCookies parses all "Set-Cookie" values from +// the header h, removes the successfully parsed values from the +// "Set-Cookie" key in h and returns the parsed Cookies. +func readSetCookies(h Header) []*Cookie { + cookies := []*Cookie{} + var unparsedLines []string + for _, line := range h["Set-Cookie"] { + parts := strings.Split(strings.TrimSpace(line), ";", -1) + if len(parts) == 1 && parts[0] == "" { + continue + } + parts[0] = strings.TrimSpace(parts[0]) + j := strings.Index(parts[0], "=") + if j < 0 { + unparsedLines = append(unparsedLines, line) + continue + } + name, value := parts[0][:j], parts[0][j+1:] + if !isCookieNameValid(name) { + unparsedLines = append(unparsedLines, line) + continue + } + value, success := parseCookieValue(value) + if !success { + unparsedLines = append(unparsedLines, line) + continue + } + c := &Cookie{ + Name: name, + Value: value, + Raw: line, + } + for i := 1; i < len(parts); i++ { + parts[i] = strings.TrimSpace(parts[i]) + if len(parts[i]) == 0 { + continue + } + + attr, val := parts[i], "" + if j := strings.Index(attr, "="); j >= 0 { + attr, val = attr[:j], attr[j+1:] + } + val, success = parseCookieValue(val) + if !success { + c.Unparsed = append(c.Unparsed, parts[i]) + continue + } + switch strings.ToLower(attr) { + case "secure": + c.Secure = true + continue + case "httponly": + c.HttpOnly = true + continue + case "domain": + c.Domain = val + // TODO: Add domain parsing + continue + case "max-age": + secs, err := strconv.Atoi(val) + if err != nil || secs < 0 || secs != 0 && val[0] == '0' { + break + } + if secs <= 0 { + c.MaxAge = -1 + } else { + c.MaxAge = secs + } + continue + case "expires": + c.RawExpires = val + exptime, err := time.Parse(time.RFC1123, val) + if err != nil { + c.Expires = time.Time{} + break + } + c.Expires = *exptime + continue + case "path": + c.Path = val + // TODO: Add path parsing + continue + } + c.Unparsed = append(c.Unparsed, parts[i]) + } + cookies = append(cookies, c) + } + h["Set-Cookie"] = unparsedLines, unparsedLines != nil + return cookies +} + +// writeSetCookies writes the wire representation of the set-cookies +// to w. Each cookie is written on a separate "Set-Cookie: " line. +// This choice is made because HTTP parsers tend to have a limit on +// line-length, so it seems safer to place cookies on separate lines. +func writeSetCookies(w io.Writer, kk []*Cookie) os.Error { + if kk == nil { + return nil + } + lines := make([]string, 0, len(kk)) + var b bytes.Buffer + for _, c := range kk { + b.Reset() + fmt.Fprintf(&b, "%s=%s", c.Name, c.Value) + if len(c.Path) > 0 { + fmt.Fprintf(&b, "; Path=%s", URLEscape(c.Path)) + } + if len(c.Domain) > 0 { + fmt.Fprintf(&b, "; Domain=%s", URLEscape(c.Domain)) + } + if len(c.Expires.Zone) > 0 { + fmt.Fprintf(&b, "; Expires=%s", c.Expires.Format(time.RFC1123)) + } + if c.MaxAge > 0 { + fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge) + } else if c.MaxAge < 0 { + fmt.Fprintf(&b, "; Max-Age=0") + } + if c.HttpOnly { + fmt.Fprintf(&b, "; HttpOnly") + } + if c.Secure { + fmt.Fprintf(&b, "; Secure") + } + lines = append(lines, "Set-Cookie: "+b.String()+"\r\n") + } + sort.SortStrings(lines) + for _, l := range lines { + if _, err := io.WriteString(w, l); err != nil { + return err + } + } + return nil +} + +// readCookies parses all "Cookie" values from +// the header h, removes the successfully parsed values from the +// "Cookie" key in h and returns the parsed Cookies. +func readCookies(h Header) []*Cookie { + cookies := []*Cookie{} + lines, ok := h["Cookie"] + if !ok { + return cookies + } + unparsedLines := []string{} + for _, line := range lines { + parts := strings.Split(strings.TrimSpace(line), ";", -1) + if len(parts) == 1 && parts[0] == "" { + continue + } + // Per-line attributes + parsedPairs := 0 + for i := 0; i < len(parts); i++ { + parts[i] = strings.TrimSpace(parts[i]) + if len(parts[i]) == 0 { + continue + } + attr, val := parts[i], "" + if j := strings.Index(attr, "="); j >= 0 { + attr, val = attr[:j], attr[j+1:] + } + if !isCookieNameValid(attr) { + continue + } + val, success := parseCookieValue(val) + if !success { + continue + } + cookies = append(cookies, &Cookie{Name: attr, Value: val}) + parsedPairs++ + } + if parsedPairs == 0 { + unparsedLines = append(unparsedLines, line) + } + } + h["Cookie"] = unparsedLines, len(unparsedLines) > 0 + return cookies +} + +// writeCookies writes the wire representation of the cookies +// to w. Each cookie is written on a separate "Cookie: " line. +// This choice is made because HTTP parsers tend to have a limit on +// line-length, so it seems safer to place cookies on separate lines. +func writeCookies(w io.Writer, kk []*Cookie) os.Error { + lines := make([]string, 0, len(kk)) + for _, c := range kk { + lines = append(lines, fmt.Sprintf("Cookie: %s=%s\r\n", c.Name, c.Value)) + } + sort.SortStrings(lines) + for _, l := range lines { + if _, err := io.WriteString(w, l); err != nil { + return err + } + } + return nil +} + +func unquoteCookieValue(v string) string { + if len(v) > 1 && v[0] == '"' && v[len(v)-1] == '"' { + return v[1 : len(v)-1] + } + return v +} + +func isCookieByte(c byte) bool { + switch true { + case c == 0x21, 0x23 <= c && c <= 0x2b, 0x2d <= c && c <= 0x3a, + 0x3c <= c && c <= 0x5b, 0x5d <= c && c <= 0x7e: + return true + } + return false +} + +func parseCookieValue(raw string) (string, bool) { + raw = unquoteCookieValue(raw) + for i := 0; i < len(raw); i++ { + if !isCookieByte(raw[i]) { + return "", false + } + } + return raw, true +} + +func isCookieNameValid(raw string) bool { + for _, c := range raw { + if !isToken(byte(c)) { + return false + } + } + return true +} diff --git a/libgo/go/http/cookie_test.go b/libgo/go/http/cookie_test.go new file mode 100644 index 00000000000..db09970406b --- /dev/null +++ b/libgo/go/http/cookie_test.go @@ -0,0 +1,110 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "fmt" + "json" + "reflect" + "testing" +) + + +var writeSetCookiesTests = []struct { + Cookies []*Cookie + Raw string +}{ + { + []*Cookie{ + &Cookie{Name: "cookie-1", Value: "v$1"}, + &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600}, + }, + "Set-Cookie: cookie-1=v$1\r\n" + + "Set-Cookie: cookie-2=two; Max-Age=3600\r\n", + }, +} + +func TestWriteSetCookies(t *testing.T) { + for i, tt := range writeSetCookiesTests { + var w bytes.Buffer + writeSetCookies(&w, tt.Cookies) + seen := string(w.Bytes()) + if seen != tt.Raw { + t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.Raw, seen) + continue + } + } +} + +var writeCookiesTests = []struct { + Cookies []*Cookie + Raw string +}{ + { + []*Cookie{&Cookie{Name: "cookie-1", Value: "v$1"}}, + "Cookie: cookie-1=v$1\r\n", + }, +} + +func TestWriteCookies(t *testing.T) { + for i, tt := range writeCookiesTests { + var w bytes.Buffer + writeCookies(&w, tt.Cookies) + seen := string(w.Bytes()) + if seen != tt.Raw { + t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.Raw, seen) + continue + } + } +} + +var readSetCookiesTests = []struct { + Header Header + Cookies []*Cookie +}{ + { + Header{"Set-Cookie": {"Cookie-1=v$1"}}, + []*Cookie{&Cookie{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, + }, +} + +func toJSON(v interface{}) string { + b, err := json.Marshal(v) + if err != nil { + return fmt.Sprintf("%#v", v) + } + return string(b) +} + +func TestReadSetCookies(t *testing.T) { + for i, tt := range readSetCookiesTests { + c := readSetCookies(tt.Header) + if !reflect.DeepEqual(c, tt.Cookies) { + t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies)) + continue + } + } +} + +var readCookiesTests = []struct { + Header Header + Cookies []*Cookie +}{ + { + Header{"Cookie": {"Cookie-1=v$1"}}, + []*Cookie{&Cookie{Name: "Cookie-1", Value: "v$1"}}, + }, +} + +func TestReadCookies(t *testing.T) { + for i, tt := range readCookiesTests { + c := readCookies(tt.Header) + if !reflect.DeepEqual(c, tt.Cookies) { + t.Errorf("#%d readCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies)) + continue + } + } +} diff --git a/libgo/go/http/dump.go b/libgo/go/http/dump.go index 73ac9797399..306c45bc2c9 100644 --- a/libgo/go/http/dump.go +++ b/libgo/go/http/dump.go @@ -7,10 +7,10 @@ package http import ( "bytes" "io" + "io/ioutil" "os" ) - // One of the copies, say from b to r2, could be avoided by using a more // elaborate trick where the other copy is made during Request/Response.Write. // This would complicate things too much, given that these functions are for @@ -23,7 +23,7 @@ func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err os.Error) { if err = b.Close(); err != nil { return nil, nil, err } - return nopCloser{&buf}, nopCloser{bytes.NewBuffer(buf.Bytes())}, nil + return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil } // DumpRequest returns the wire representation of req, diff --git a/libgo/go/http/export_test.go b/libgo/go/http/export_test.go new file mode 100644 index 00000000000..a76b70760df --- /dev/null +++ b/libgo/go/http/export_test.go @@ -0,0 +1,34 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Bridge package to expose http internals to tests in the http_test +// package. + +package http + +func (t *Transport) IdleConnKeysForTesting() (keys []string) { + keys = make([]string, 0) + t.lk.Lock() + defer t.lk.Unlock() + if t.idleConn == nil { + return + } + for key, _ := range t.idleConn { + keys = append(keys, key) + } + return +} + +func (t *Transport) IdleConnCountForTesting(cacheKey string) int { + t.lk.Lock() + defer t.lk.Unlock() + if t.idleConn == nil { + return 0 + } + conns, ok := t.idleConn[cacheKey] + if !ok { + return 0 + } + return len(conns) +} diff --git a/libgo/go/http/fs.go b/libgo/go/http/fs.go index 8e16992e0f0..4ad680ccc31 100644 --- a/libgo/go/http/fs.go +++ b/libgo/go/http/fs.go @@ -11,7 +11,7 @@ import ( "io" "mime" "os" - "path" + "path/filepath" "strconv" "strings" "time" @@ -108,11 +108,11 @@ func serveFile(w ResponseWriter, r *Request, name string, redirect bool) { w.WriteHeader(StatusNotModified) return } - w.SetHeader("Last-Modified", time.SecondsToUTC(d.Mtime_ns/1e9).Format(TimeFormat)) + w.Header().Set("Last-Modified", time.SecondsToUTC(d.Mtime_ns/1e9).Format(TimeFormat)) // use contents of index.html for directory, if present if d.IsDirectory() { - index := name + indexPage + index := name + filepath.FromSlash(indexPage) ff, err := os.Open(index, os.O_RDONLY, 0) if err == nil { defer ff.Close() @@ -135,18 +135,18 @@ func serveFile(w ResponseWriter, r *Request, name string, redirect bool) { code := StatusOK // use extension to find content type. - ext := path.Ext(name) + ext := filepath.Ext(name) if ctype := mime.TypeByExtension(ext); ctype != "" { - w.SetHeader("Content-Type", ctype) + w.Header().Set("Content-Type", ctype) } else { // read first chunk to decide between utf-8 text and binary var buf [1024]byte n, _ := io.ReadFull(f, buf[:]) b := buf[:n] if isText(b) { - w.SetHeader("Content-Type", "text-plain; charset=utf-8") + w.Header().Set("Content-Type", "text-plain; charset=utf-8") } else { - w.SetHeader("Content-Type", "application/octet-stream") // generic binary + w.Header().Set("Content-Type", "application/octet-stream") // generic binary } f.Seek(0, 0) // rewind to output whole file } @@ -166,11 +166,11 @@ func serveFile(w ResponseWriter, r *Request, name string, redirect bool) { } size = ra.length code = StatusPartialContent - w.SetHeader("Content-Range", fmt.Sprintf("bytes %d-%d/%d", ra.start, ra.start+ra.length-1, d.Size)) + w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", ra.start, ra.start+ra.length-1, d.Size)) } - w.SetHeader("Accept-Ranges", "bytes") - w.SetHeader("Content-Length", strconv.Itoa64(size)) + w.Header().Set("Accept-Ranges", "bytes") + w.Header().Set("Content-Length", strconv.Itoa64(size)) w.WriteHeader(code) @@ -202,7 +202,7 @@ func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) { return } path = path[len(f.prefix):] - serveFile(w, r, f.root+"/"+path, true) + serveFile(w, r, filepath.Join(f.root, filepath.FromSlash(path)), true) } // httpRange specifies the byte range to be sent to the client. diff --git a/libgo/go/http/fs_test.go b/libgo/go/http/fs_test.go index a8b67e3f08c..a89c76d0bfb 100644 --- a/libgo/go/http/fs_test.go +++ b/libgo/go/http/fs_test.go @@ -2,89 +2,22 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package http +package http_test import ( "fmt" + . "http" + "http/httptest" "io/ioutil" - "net" "os" - "sync" "testing" ) -var ParseRangeTests = []struct { - s string - length int64 - r []httpRange -}{ - {"", 0, nil}, - {"foo", 0, nil}, - {"bytes=", 0, nil}, - {"bytes=5-4", 10, nil}, - {"bytes=0-2,5-4", 10, nil}, - {"bytes=0-9", 10, []httpRange{{0, 10}}}, - {"bytes=0-", 10, []httpRange{{0, 10}}}, - {"bytes=5-", 10, []httpRange{{5, 5}}}, - {"bytes=0-20", 10, []httpRange{{0, 10}}}, - {"bytes=15-,0-5", 10, nil}, - {"bytes=-5", 10, []httpRange{{5, 5}}}, - {"bytes=-15", 10, []httpRange{{0, 10}}}, - {"bytes=0-499", 10000, []httpRange{{0, 500}}}, - {"bytes=500-999", 10000, []httpRange{{500, 500}}}, - {"bytes=-500", 10000, []httpRange{{9500, 500}}}, - {"bytes=9500-", 10000, []httpRange{{9500, 500}}}, - {"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}}, - {"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}}, - {"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}}, -} - -func TestParseRange(t *testing.T) { - for _, test := range ParseRangeTests { - r := test.r - ranges, err := parseRange(test.s, test.length) - if err != nil && r != nil { - t.Errorf("parseRange(%q) returned error %q", test.s, err) - } - if len(ranges) != len(r) { - t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r)) - continue - } - for i := range r { - if ranges[i].start != r[i].start { - t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start) - } - if ranges[i].length != r[i].length { - t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length) - } - } - } -} - const ( testFile = "testdata/file" testFileLength = 11 ) -var ( - serverOnce sync.Once - serverAddr string -) - -func startServer(t *testing.T) { - serverOnce.Do(func() { - HandleFunc("/ServeFile", func(w ResponseWriter, r *Request) { - ServeFile(w, r, "testdata/file") - }) - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal("listen:", err) - } - serverAddr = l.Addr().String() - go Serve(l, nil) - }) -} - var ServeFileRangeTests = []struct { start, end int r string @@ -99,7 +32,11 @@ var ServeFileRangeTests = []struct { } func TestServeFile(t *testing.T) { - startServer(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ServeFile(w, r, "testdata/file") + })) + defer ts.Close() + var err os.Error file, err := ioutil.ReadFile(testFile) @@ -110,7 +47,7 @@ func TestServeFile(t *testing.T) { // set up the Request (re-used for all tests) var req Request req.Header = make(Header) - if req.URL, err = ParseURL("http://" + serverAddr + "/ServeFile"); err != nil { + if req.URL, err = ParseURL(ts.URL); err != nil { t.Fatal("ParseURL:", err) } req.Method = "GET" @@ -149,7 +86,7 @@ func TestServeFile(t *testing.T) { } func getBody(t *testing.T, req Request) (*Response, []byte) { - r, err := send(&req, DefaultTransport) + r, err := DefaultClient.Do(&req) if err != nil { t.Fatal(req.URL.String(), "send:", err) } diff --git a/libgo/go/http/httptest/recorder.go b/libgo/go/http/httptest/recorder.go new file mode 100644 index 00000000000..0dd19a617cc --- /dev/null +++ b/libgo/go/http/httptest/recorder.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The httptest package provides utilities for HTTP testing. +package httptest + +import ( + "bytes" + "http" + "os" +) + +// ResponseRecorder is an implementation of http.ResponseWriter that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + Flushed bool +} + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// Header returns the response headers. +func (rw *ResponseRecorder) Header() http.Header { + return rw.HeaderMap +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, os.Error) { + if rw.Body != nil { + rw.Body.Write(buf) + } + if rw.Code == 0 { + rw.Code = http.StatusOK + } + return len(buf), nil +} + +// WriteHeader sets rw.Code. +func (rw *ResponseRecorder) WriteHeader(code int) { + rw.Code = code +} + +// Flush sets rw.Flushed to true. +func (rw *ResponseRecorder) Flush() { + rw.Flushed = true +} diff --git a/libgo/go/http/httptest/server.go b/libgo/go/http/httptest/server.go new file mode 100644 index 00000000000..6e825a890d1 --- /dev/null +++ b/libgo/go/http/httptest/server.go @@ -0,0 +1,70 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Implementation of Server + +package httptest + +import ( + "fmt" + "http" + "os" + "net" +) + +// A Server is an HTTP server listening on a system-chosen port on the +// local loopback interface, for use in end-to-end HTTP tests. +type Server struct { + URL string // base URL of form http://ipaddr:port with no trailing slash + Listener net.Listener +} + +// historyListener keeps track of all connections that it's ever +// accepted. +type historyListener struct { + net.Listener + history []net.Conn +} + +func (hs *historyListener) Accept() (c net.Conn, err os.Error) { + c, err = hs.Listener.Accept() + if err == nil { + hs.history = append(hs.history, c) + } + return +} + +// NewServer starts and returns a new Server. +// The caller should call Close when finished, to shut it down. +func NewServer(handler http.Handler) *Server { + ts := new(Server) + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + if l, err = net.Listen("tcp6", "[::1]:0"); err != nil { + panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err)) + } + } + ts.Listener = &historyListener{l, make([]net.Conn, 0)} + ts.URL = "http://" + l.Addr().String() + server := &http.Server{Handler: handler} + go server.Serve(ts.Listener) + return ts +} + +// Close shuts down the server. +func (s *Server) Close() { + s.Listener.Close() +} + +// CloseClientConnections closes any currently open HTTP connections +// to the test Server. +func (s *Server) CloseClientConnections() { + hl, ok := s.Listener.(*historyListener) + if !ok { + return + } + for _, conn := range hl.history { + conn.Close() + } +} diff --git a/libgo/go/http/persist.go b/libgo/go/http/persist.go index 000a4200e59..b93c5fe4855 100644 --- a/libgo/go/http/persist.go +++ b/libgo/go/http/persist.go @@ -25,15 +25,15 @@ var ( // i.e. requests can be read out of sync (but in the same order) while the // respective responses are sent. type ServerConn struct { + lk sync.Mutex // read-write protects the following fields c net.Conn r *bufio.Reader - clsd bool // indicates a graceful close re, we os.Error // read/write errors lastbody io.ReadCloser nread, nwritten int - pipe textproto.Pipeline pipereq map[*Request]uint - lk sync.Mutex // protected read/write to re,we + + pipe textproto.Pipeline } // NewServerConn returns a new ServerConn reading and writing c. If r is not @@ -90,15 +90,21 @@ func (sc *ServerConn) Read() (req *Request, err os.Error) { defer sc.lk.Unlock() return nil, sc.re } + if sc.r == nil { // connection closed by user in the meantime + defer sc.lk.Unlock() + return nil, os.EBADF + } + r := sc.r + lastbody := sc.lastbody + sc.lastbody = nil sc.lk.Unlock() // Make sure body is fully consumed, even if user does not call body.Close - if sc.lastbody != nil { + if lastbody != nil { // body.Close is assumed to be idempotent and multiple calls to // it should return the error that its first invokation // returned. - err = sc.lastbody.Close() - sc.lastbody = nil + err = lastbody.Close() if err != nil { sc.lk.Lock() defer sc.lk.Unlock() @@ -107,10 +113,10 @@ func (sc *ServerConn) Read() (req *Request, err os.Error) { } } - req, err = ReadRequest(sc.r) + req, err = ReadRequest(r) + sc.lk.Lock() + defer sc.lk.Unlock() if err != nil { - sc.lk.Lock() - defer sc.lk.Unlock() if err == io.ErrUnexpectedEOF { // A close from the opposing client is treated as a // graceful close, even if there was some unparse-able @@ -119,18 +125,16 @@ func (sc *ServerConn) Read() (req *Request, err os.Error) { return nil, sc.re } else { sc.re = err - return + return req, err } } sc.lastbody = req.Body sc.nread++ if req.Close { - sc.lk.Lock() - defer sc.lk.Unlock() sc.re = ErrPersistEOF return req, sc.re } - return + return req, err } // Pending returns the number of unanswered requests @@ -165,24 +169,27 @@ func (sc *ServerConn) Write(req *Request, resp *Response) os.Error { defer sc.lk.Unlock() return sc.we } - sc.lk.Unlock() + if sc.c == nil { // connection closed by user in the meantime + defer sc.lk.Unlock() + return os.EBADF + } + c := sc.c if sc.nread <= sc.nwritten { + defer sc.lk.Unlock() return os.NewError("persist server pipe count") } - if resp.Close { // After signaling a keep-alive close, any pipelined unread // requests will be lost. It is up to the user to drain them // before signaling. - sc.lk.Lock() sc.re = ErrPersistEOF - sc.lk.Unlock() } + sc.lk.Unlock() - err := resp.Write(sc.c) + err := resp.Write(c) + sc.lk.Lock() + defer sc.lk.Unlock() if err != nil { - sc.lk.Lock() - defer sc.lk.Unlock() sc.we = err return err } @@ -196,14 +203,17 @@ func (sc *ServerConn) Write(req *Request, resp *Response) os.Error { // responsible for closing the underlying connection. One must call Close to // regain control of that connection and deal with it as desired. type ClientConn struct { + lk sync.Mutex // read-write protects the following fields c net.Conn r *bufio.Reader re, we os.Error // read/write errors lastbody io.ReadCloser nread, nwritten int - pipe textproto.Pipeline pipereq map[*Request]uint - lk sync.Mutex // protects read/write to re,we,pipereq,etc. + + pipe textproto.Pipeline + writeReq func(*Request, io.Writer) os.Error + readRes func(buf *bufio.Reader, method string) (*Response, os.Error) } // NewClientConn returns a new ClientConn reading and writing c. If r is not @@ -212,7 +222,21 @@ func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn { if r == nil { r = bufio.NewReader(c) } - return &ClientConn{c: c, r: r, pipereq: make(map[*Request]uint)} + return &ClientConn{ + c: c, + r: r, + pipereq: make(map[*Request]uint), + writeReq: (*Request).Write, + readRes: ReadResponse, + } +} + +// NewProxyClientConn works like NewClientConn but writes Requests +// using Request's WriteProxy method. +func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn { + cc := NewClientConn(c, r) + cc.writeReq = (*Request).WriteProxy + return cc } // Close detaches the ClientConn and returns the underlying connection as well @@ -221,11 +245,11 @@ func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn { // logic. The user should not call Close while Read or Write is in progress. func (cc *ClientConn) Close() (c net.Conn, r *bufio.Reader) { cc.lk.Lock() + defer cc.lk.Unlock() c = cc.c r = cc.r cc.c = nil cc.r = nil - cc.lk.Unlock() return } @@ -261,20 +285,22 @@ func (cc *ClientConn) Write(req *Request) (err os.Error) { defer cc.lk.Unlock() return cc.we } - cc.lk.Unlock() - + if cc.c == nil { // connection closed by user in the meantime + defer cc.lk.Unlock() + return os.EBADF + } + c := cc.c if req.Close { // We write the EOF to the write-side error, because there // still might be some pipelined reads - cc.lk.Lock() cc.we = ErrPersistEOF - cc.lk.Unlock() } + cc.lk.Unlock() - err = req.Write(cc.c) + err = cc.writeReq(req, c) + cc.lk.Lock() + defer cc.lk.Unlock() if err != nil { - cc.lk.Lock() - defer cc.lk.Unlock() cc.we = err return err } @@ -316,15 +342,21 @@ func (cc *ClientConn) Read(req *Request) (resp *Response, err os.Error) { defer cc.lk.Unlock() return nil, cc.re } + if cc.r == nil { // connection closed by user in the meantime + defer cc.lk.Unlock() + return nil, os.EBADF + } + r := cc.r + lastbody := cc.lastbody + cc.lastbody = nil cc.lk.Unlock() // Make sure body is fully consumed, even if user does not call body.Close - if cc.lastbody != nil { + if lastbody != nil { // body.Close is assumed to be idempotent and multiple calls to // it should return the error that its first invokation // returned. - err = cc.lastbody.Close() - cc.lastbody = nil + err = lastbody.Close() if err != nil { cc.lk.Lock() defer cc.lk.Unlock() @@ -333,24 +365,22 @@ func (cc *ClientConn) Read(req *Request) (resp *Response, err os.Error) { } } - resp, err = ReadResponse(cc.r, req.Method) + resp, err = cc.readRes(r, req.Method) + cc.lk.Lock() + defer cc.lk.Unlock() if err != nil { - cc.lk.Lock() - defer cc.lk.Unlock() cc.re = err - return + return resp, err } cc.lastbody = resp.Body cc.nread++ if resp.Close { - cc.lk.Lock() - defer cc.lk.Unlock() cc.re = ErrPersistEOF // don't send any more requests return resp, cc.re } - return + return resp, err } // Do is convenience method that writes a request and reads a response. diff --git a/libgo/go/http/pprof/pprof.go b/libgo/go/http/pprof/pprof.go index f7db9aab93b..0bac26687d7 100644 --- a/libgo/go/http/pprof/pprof.go +++ b/libgo/go/http/pprof/pprof.go @@ -41,14 +41,14 @@ func init() { // command line, with arguments separated by NUL bytes. // The package initialization registers it as /debug/pprof/cmdline. func Cmdline(w http.ResponseWriter, r *http.Request) { - w.SetHeader("content-type", "text/plain; charset=utf-8") + w.Header().Set("content-type", "text/plain; charset=utf-8") fmt.Fprintf(w, strings.Join(os.Args, "\x00")) } // Heap responds with the pprof-formatted heap profile. // The package initialization registers it as /debug/pprof/heap. func Heap(w http.ResponseWriter, r *http.Request) { - w.SetHeader("content-type", "text/plain; charset=utf-8") + w.Header().Set("content-type", "text/plain; charset=utf-8") pprof.WriteHeapProfile(w) } @@ -56,7 +56,7 @@ func Heap(w http.ResponseWriter, r *http.Request) { // responding with a table mapping program counters to function names. // The package initialization registers it as /debug/pprof/symbol. func Symbol(w http.ResponseWriter, r *http.Request) { - w.SetHeader("content-type", "text/plain; charset=utf-8") + w.Header().Set("content-type", "text/plain; charset=utf-8") // We don't know how many symbols we have, but we // do have symbol information. Pprof only cares whether diff --git a/libgo/go/http/proxy_test.go b/libgo/go/http/proxy_test.go index 0f2ca458fed..7050ef5ed06 100644 --- a/libgo/go/http/proxy_test.go +++ b/libgo/go/http/proxy_test.go @@ -12,31 +12,33 @@ import ( // TODO(mattn): // test ProxyAuth -var MatchNoProxyTests = []struct { +var UseProxyTests = []struct { host string match bool }{ - {"localhost", true}, // match completely - {"barbaz.net", true}, // match as .barbaz.net - {"foobar.com:443", true}, // have a port but match - {"foofoobar.com", false}, // not match as a part of foobar.com - {"baz.com", false}, // not match as a part of barbaz.com - {"localhost.net", false}, // not match as suffix of address - {"local.localhost", false}, // not match as prefix as address - {"barbarbaz.net", false}, // not match because NO_PROXY have a '.' - {"www.foobar.com", false}, // not match because NO_PROXY is not .foobar.com + {"localhost", false}, // match completely + {"barbaz.net", false}, // match as .barbaz.net + {"foobar.com:443", false}, // have a port but match + {"foofoobar.com", true}, // not match as a part of foobar.com + {"baz.com", true}, // not match as a part of barbaz.com + {"localhost.net", true}, // not match as suffix of address + {"local.localhost", true}, // not match as prefix as address + {"barbarbaz.net", true}, // not match because NO_PROXY have a '.' + {"www.foobar.com", true}, // not match because NO_PROXY is not .foobar.com } -func TestMatchNoProxy(t *testing.T) { +func TestUseProxy(t *testing.T) { oldenv := os.Getenv("NO_PROXY") no_proxy := "foobar.com, .barbaz.net , localhost" os.Setenv("NO_PROXY", no_proxy) defer os.Setenv("NO_PROXY", oldenv) - for _, test := range MatchNoProxyTests { - if matchNoProxy(test.host) != test.match { + tr := &Transport{} + + for _, test := range UseProxyTests { + if tr.useProxy(test.host) != test.match { if test.match { - t.Errorf("matchNoProxy(%v) = %v, want %v", test.host, !test.match, test.match) + t.Errorf("useProxy(%v) = %v, want %v", test.host, !test.match, test.match) } else { t.Errorf("not expected: '%s' shouldn't match as '%s'", test.host, no_proxy) } diff --git a/libgo/go/http/range_test.go b/libgo/go/http/range_test.go new file mode 100644 index 00000000000..5274a81fa34 --- /dev/null +++ b/libgo/go/http/range_test.go @@ -0,0 +1,57 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "testing" +) + +var ParseRangeTests = []struct { + s string + length int64 + r []httpRange +}{ + {"", 0, nil}, + {"foo", 0, nil}, + {"bytes=", 0, nil}, + {"bytes=5-4", 10, nil}, + {"bytes=0-2,5-4", 10, nil}, + {"bytes=0-9", 10, []httpRange{{0, 10}}}, + {"bytes=0-", 10, []httpRange{{0, 10}}}, + {"bytes=5-", 10, []httpRange{{5, 5}}}, + {"bytes=0-20", 10, []httpRange{{0, 10}}}, + {"bytes=15-,0-5", 10, nil}, + {"bytes=-5", 10, []httpRange{{5, 5}}}, + {"bytes=-15", 10, []httpRange{{0, 10}}}, + {"bytes=0-499", 10000, []httpRange{{0, 500}}}, + {"bytes=500-999", 10000, []httpRange{{500, 500}}}, + {"bytes=-500", 10000, []httpRange{{9500, 500}}}, + {"bytes=9500-", 10000, []httpRange{{9500, 500}}}, + {"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}}, + {"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}}, + {"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}}, +} + +func TestParseRange(t *testing.T) { + for _, test := range ParseRangeTests { + r := test.r + ranges, err := parseRange(test.s, test.length) + if err != nil && r != nil { + t.Errorf("parseRange(%q) returned error %q", test.s, err) + } + if len(ranges) != len(r) { + t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r)) + continue + } + for i := range r { + if ranges[i].start != r[i].start { + t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start) + } + if ranges[i].length != r[i].length { + t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length) + } + } + } +} diff --git a/libgo/go/http/readrequest_test.go b/libgo/go/http/readrequest_test.go index 6ee07bc9148..19e2ff77476 100644 --- a/libgo/go/http/readrequest_test.go +++ b/libgo/go/http/readrequest_test.go @@ -93,7 +93,7 @@ var reqTests = []reqTest{ Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1, - Header: map[string][]string{}, + Header: Header{}, Close: false, ContentLength: -1, Host: "test", diff --git a/libgo/go/http/request.go b/libgo/go/http/request.go index a7dc328a007..d82894fab08 100644 --- a/libgo/go/http/request.go +++ b/libgo/go/http/request.go @@ -11,6 +11,7 @@ package http import ( "bufio" + "crypto/tls" "container/vector" "fmt" "io" @@ -92,6 +93,9 @@ type Request struct { // following a hyphen uppercase and the rest lowercase. Header Header + // Cookie records the HTTP cookies sent with the request. + Cookie []*Cookie + // The message body. Body io.ReadCloser @@ -134,6 +138,22 @@ type Request struct { // response has multiple trailer lines with the same key, they will be // concatenated, delimited by commas. Trailer Header + + // RemoteAddr allows HTTP servers and other software to record + // the network address that sent the request, usually for + // logging. This field is not filled in by ReadRequest and + // has no defined format. The HTTP server in this package + // sets RemoteAddr to an "IP:port" address before invoking a + // handler. + RemoteAddr string + + // TLS allows HTTP servers and other software to record + // information about the TLS connection on which the request + // was received. This field is not filled in by ReadRequest. + // The HTTP server in this package sets the field for + // TLS-enabled connections before invoking a handler; + // otherwise it leaves the field nil. + TLS *tls.ConnectionState } // ProtoAtLeast returns whether the HTTP protocol used @@ -190,6 +210,8 @@ func (req *Request) Write(w io.Writer) os.Error { // WriteProxy is like Write but writes the request in the form // expected by an HTTP proxy. It includes the scheme and host // name in the URI instead of using a separate Host: header line. +// If req.RawURL is non-empty, WriteProxy uses it unchanged +// instead of URL but still omits the Host: header. func (req *Request) WriteProxy(w io.Writer) os.Error { return req.write(w, true) } @@ -206,13 +228,12 @@ func (req *Request) write(w io.Writer, usingProxy bool) os.Error { if req.URL.RawQuery != "" { uri += "?" + req.URL.RawQuery } - } - - if usingProxy { - if uri == "" || uri[0] != '/' { - uri = "/" + uri + if usingProxy { + if uri == "" || uri[0] != '/' { + uri = "/" + uri + } + uri = req.URL.Scheme + "://" + host + uri } - uri = req.URL.Scheme + "://" + host + uri } fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), uri) @@ -243,11 +264,15 @@ func (req *Request) write(w io.Writer, usingProxy bool) os.Error { // from Request, and introduce Request methods along the lines of // Response.{GetHeader,AddHeader} and string constants for "Host", // "User-Agent" and "Referer". - err = writeSortedKeyValue(w, req.Header, reqExcludeHeader) + err = writeSortedHeader(w, req.Header, reqExcludeHeader) if err != nil { return err } + if err = writeCookies(w, req.Cookie); err != nil { + return err + } + io.WriteString(w, "\r\n") // Write body and trailer @@ -484,6 +509,8 @@ func ReadRequest(b *bufio.Reader) (req *Request, err os.Error) { return nil, err } + req.Cookie = readCookies(req.Header) + return req, nil } diff --git a/libgo/go/http/request_test.go b/libgo/go/http/request_test.go index ae1c4e98245..19083adf624 100644 --- a/libgo/go/http/request_test.go +++ b/libgo/go/http/request_test.go @@ -2,10 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package http +package http_test import ( "bytes" + "fmt" + . "http" + "http/httptest" + "io" + "os" "reflect" "regexp" "strings" @@ -141,17 +146,33 @@ func TestMultipartReader(t *testing.T) { } func TestRedirect(t *testing.T) { - const ( - start = "http://google.com/" - endRe = "^http://www\\.google\\.[a-z.]+/$" - ) - var end = regexp.MustCompile(endRe) - r, url, err := Get(start) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + switch r.URL.Path { + case "/": + w.Header().Set("Location", "/foo/") + w.WriteHeader(StatusSeeOther) + case "/foo/": + fmt.Fprintf(w, "foo") + default: + w.WriteHeader(StatusBadRequest) + } + })) + defer ts.Close() + + var end = regexp.MustCompile("/foo/$") + r, url, err := Get(ts.URL) if err != nil { t.Fatal(err) } r.Body.Close() if r.StatusCode != 200 || !end.MatchString(url) { - t.Fatalf("Get(%s) got status %d at %q, want 200 matching %q", start, r.StatusCode, url, endRe) + t.Fatalf("Get got status %d at %q, want 200 matching /foo/$", r.StatusCode, url) } } + +// TODO: stop copy/pasting this around. move to io/ioutil? +type nopCloser struct { + io.Reader +} + +func (nopCloser) Close() os.Error { return nil } diff --git a/libgo/go/http/requestwrite_test.go b/libgo/go/http/requestwrite_test.go index 55ca745d58c..726baa26686 100644 --- a/libgo/go/http/requestwrite_test.go +++ b/libgo/go/http/requestwrite_test.go @@ -6,12 +6,15 @@ package http import ( "bytes" + "io/ioutil" "testing" ) type reqWriteTest struct { - Req Request - Raw string + Req Request + Body []byte + Raw string + RawProxy string } var reqWriteTests = []reqWriteTest{ @@ -50,6 +53,8 @@ var reqWriteTests = []reqWriteTest{ Form: map[string][]string{}, }, + nil, + "GET http://www.techcrunch.com/ HTTP/1.1\r\n" + "Host: www.techcrunch.com\r\n" + "User-Agent: Fake\r\n" + @@ -59,6 +64,15 @@ var reqWriteTests = []reqWriteTest{ "Accept-Language: en-us,en;q=0.5\r\n" + "Keep-Alive: 300\r\n" + "Proxy-Connection: keep-alive\r\n\r\n", + + "GET http://www.techcrunch.com/ HTTP/1.1\r\n" + + "User-Agent: Fake\r\n" + + "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + + "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" + + "Accept-Encoding: gzip,deflate\r\n" + + "Accept-Language: en-us,en;q=0.5\r\n" + + "Keep-Alive: 300\r\n" + + "Proxy-Connection: keep-alive\r\n\r\n", }, // HTTP/1.1 => chunked coding; body; empty trailer { @@ -71,16 +85,22 @@ var reqWriteTests = []reqWriteTest{ }, ProtoMajor: 1, ProtoMinor: 1, - Header: map[string][]string{}, - Body: nopCloser{bytes.NewBufferString("abcdef")}, + Header: Header{}, TransferEncoding: []string{"chunked"}, }, + []byte("abcdef"), + "GET /search HTTP/1.1\r\n" + "Host: www.google.com\r\n" + "User-Agent: Go http package\r\n" + "Transfer-Encoding: chunked\r\n\r\n" + "6\r\nabcdef\r\n0\r\n\r\n", + + "GET http://www.google.com/search HTTP/1.1\r\n" + + "User-Agent: Go http package\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + "6\r\nabcdef\r\n0\r\n\r\n", }, // HTTP/1.1 POST => chunked coding; body; empty trailer { @@ -93,18 +113,25 @@ var reqWriteTests = []reqWriteTest{ }, ProtoMajor: 1, ProtoMinor: 1, - Header: map[string][]string{}, + Header: Header{}, Close: true, - Body: nopCloser{bytes.NewBufferString("abcdef")}, TransferEncoding: []string{"chunked"}, }, + []byte("abcdef"), + "POST /search HTTP/1.1\r\n" + "Host: www.google.com\r\n" + "User-Agent: Go http package\r\n" + "Connection: close\r\n" + "Transfer-Encoding: chunked\r\n\r\n" + "6\r\nabcdef\r\n0\r\n\r\n", + + "POST http://www.google.com/search HTTP/1.1\r\n" + + "User-Agent: Go http package\r\n" + + "Connection: close\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + "6\r\nabcdef\r\n0\r\n\r\n", }, // default to HTTP/1.1 { @@ -114,16 +141,26 @@ var reqWriteTests = []reqWriteTest{ Host: "www.google.com", }, + nil, + "GET /search HTTP/1.1\r\n" + "Host: www.google.com\r\n" + "User-Agent: Go http package\r\n" + "\r\n", + + // Looks weird but RawURL overrides what WriteProxy would choose. + "GET /search HTTP/1.1\r\n" + + "User-Agent: Go http package\r\n" + + "\r\n", }, } func TestRequestWrite(t *testing.T) { for i := range reqWriteTests { tt := &reqWriteTests[i] + if tt.Body != nil { + tt.Req.Body = ioutil.NopCloser(bytes.NewBuffer(tt.Body)) + } var braw bytes.Buffer err := tt.Req.Write(&braw) if err != nil { @@ -135,5 +172,20 @@ func TestRequestWrite(t *testing.T) { t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.Raw, sraw) continue } + + if tt.Body != nil { + tt.Req.Body = ioutil.NopCloser(bytes.NewBuffer(tt.Body)) + } + var praw bytes.Buffer + err = tt.Req.WriteProxy(&praw) + if err != nil { + t.Errorf("error writing #%d: %s", i, err) + continue + } + sraw = praw.String() + if sraw != tt.RawProxy { + t.Errorf("Test Proxy %d, expecting:\n%s\nGot:\n%s\n", i, tt.RawProxy, sraw) + continue + } } } diff --git a/libgo/go/http/response.go b/libgo/go/http/response.go index 3f919c86a3c..1f725ecdddd 100644 --- a/libgo/go/http/response.go +++ b/libgo/go/http/response.go @@ -46,6 +46,9 @@ type Response struct { // Keys in the map are canonicalized (see CanonicalHeaderKey). Header Header + // SetCookie records the Set-Cookie requests sent with the response. + SetCookie []*Cookie + // Body represents the response body. Body io.ReadCloser @@ -64,10 +67,9 @@ type Response struct { // ReadResponse nor Response.Write ever closes a connection. Close bool - // Trailer maps trailer keys to values. Like for Header, if the - // response has multiple trailer lines with the same key, they will be - // concatenated, delimited by commas. - Trailer map[string][]string + // Trailer maps trailer keys to values, in the same + // format as the header. + Trailer Header } // ReadResponse reads and returns an HTTP response from r. The RequestMethod @@ -124,6 +126,8 @@ func ReadResponse(r *bufio.Reader, requestMethod string) (resp *Response, err os return nil, err } + resp.SetCookie = readSetCookies(resp.Header) + return resp, nil } @@ -188,11 +192,15 @@ func (resp *Response) Write(w io.Writer) os.Error { } // Rest of header - err = writeSortedKeyValue(w, resp.Header, respExcludeHeader) + err = writeSortedHeader(w, resp.Header, respExcludeHeader) if err != nil { return err } + if err = writeSetCookies(w, resp.SetCookie); err != nil { + return err + } + // End-of-header io.WriteString(w, "\r\n") @@ -206,16 +214,22 @@ func (resp *Response) Write(w io.Writer) os.Error { return nil } -func writeSortedKeyValue(w io.Writer, kvm map[string][]string, exclude map[string]bool) os.Error { - keys := make([]string, 0, len(kvm)) - for k := range kvm { - if !exclude[k] { +func writeSortedHeader(w io.Writer, h Header, exclude map[string]bool) os.Error { + keys := make([]string, 0, len(h)) + for k := range h { + if exclude == nil || !exclude[k] { keys = append(keys, k) } } sort.SortStrings(keys) for _, k := range keys { - for _, v := range kvm[k] { + for _, v := range h[k] { + v = strings.Replace(v, "\n", " ", -1) + v = strings.Replace(v, "\r", " ", -1) + v = strings.TrimSpace(v) + if v == "" { + continue + } if _, err := fmt.Fprintf(w, "%s: %s\r\n", k, v); err != nil { return err } diff --git a/libgo/go/http/responsewrite_test.go b/libgo/go/http/responsewrite_test.go index aabb833f9c8..de0635da516 100644 --- a/libgo/go/http/responsewrite_test.go +++ b/libgo/go/http/responsewrite_test.go @@ -6,6 +6,7 @@ package http import ( "bytes" + "io/ioutil" "testing" ) @@ -22,8 +23,8 @@ var respWriteTests = []respWriteTest{ ProtoMajor: 1, ProtoMinor: 0, RequestMethod: "GET", - Header: map[string][]string{}, - Body: nopCloser{bytes.NewBufferString("abcdef")}, + Header: Header{}, + Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")), ContentLength: 6, }, @@ -38,8 +39,8 @@ var respWriteTests = []respWriteTest{ ProtoMajor: 1, ProtoMinor: 0, RequestMethod: "GET", - Header: map[string][]string{}, - Body: nopCloser{bytes.NewBufferString("abcdef")}, + Header: Header{}, + Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")), ContentLength: -1, }, "HTTP/1.0 200 OK\r\n" + @@ -53,8 +54,8 @@ var respWriteTests = []respWriteTest{ ProtoMajor: 1, ProtoMinor: 1, RequestMethod: "GET", - Header: map[string][]string{}, - Body: nopCloser{bytes.NewBufferString("abcdef")}, + Header: Header{}, + Body: ioutil.NopCloser(bytes.NewBufferString("abcdef")), ContentLength: 6, TransferEncoding: []string{"chunked"}, Close: true, @@ -65,6 +66,29 @@ var respWriteTests = []respWriteTest{ "Transfer-Encoding: chunked\r\n\r\n" + "6\r\nabcdef\r\n0\r\n\r\n", }, + + // Header value with a newline character (Issue 914). + // Also tests removal of leading and trailing whitespace. + { + Response{ + StatusCode: 204, + ProtoMajor: 1, + ProtoMinor: 1, + RequestMethod: "GET", + Header: Header{ + "Foo": []string{" Bar\nBaz "}, + }, + Body: nil, + ContentLength: 0, + TransferEncoding: []string{"chunked"}, + Close: true, + }, + + "HTTP/1.1 204 No Content\r\n" + + "Connection: close\r\n" + + "Foo: Bar Baz\r\n" + + "\r\n", + }, } func TestResponseWrite(t *testing.T) { @@ -78,7 +102,7 @@ func TestResponseWrite(t *testing.T) { } sraw := braw.String() if sraw != tt.Raw { - t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.Raw, sraw) + t.Errorf("Test %d, expecting:\n%q\nGot:\n%q\n", i, tt.Raw, sraw) continue } } diff --git a/libgo/go/http/serve_test.go b/libgo/go/http/serve_test.go index 42fe3e5e4d2..683de85b867 100644 --- a/libgo/go/http/serve_test.go +++ b/libgo/go/http/serve_test.go @@ -4,16 +4,18 @@ // End-to-end serving tests -package http +package http_test import ( "bufio" "bytes" "fmt" - "io" + . "http" + "http/httptest" "io/ioutil" "os" "net" + "reflect" "strings" "testing" "time" @@ -143,7 +145,7 @@ func TestConsumingBodyOnNextConn(t *testing.T) { type stringHandler string func (s stringHandler) ServeHTTP(w ResponseWriter, r *Request) { - w.SetHeader("Result", string(s)) + w.Header().Set("Result", string(s)) } var handlers = []struct { @@ -170,13 +172,10 @@ func TestHostHandlers(t *testing.T) { for _, h := range handlers { Handle(h.pattern, stringHandler(h.msg)) } - l, err := net.Listen("tcp", "127.0.0.1:0") // any port - if err != nil { - t.Fatal(err) - } - defer l.Close() - go Serve(l, nil) - conn, err := net.Dial("tcp", "", l.Addr().String()) + ts := httptest.NewServer(nil) + defer ts.Close() + + conn, err := net.Dial("tcp", "", ts.Listener.Addr().String()) if err != nil { t.Fatal(err) } @@ -205,46 +204,6 @@ func TestHostHandlers(t *testing.T) { } } -type responseWriterMethodCall struct { - method string - headerKey, headerValue string // if method == "SetHeader" - bytesWritten []byte // if method == "Write" - responseCode int // if method == "WriteHeader" -} - -type recordingResponseWriter struct { - log []*responseWriterMethodCall -} - -func (rw *recordingResponseWriter) RemoteAddr() string { - return "1.2.3.4" -} - -func (rw *recordingResponseWriter) UsingTLS() bool { - return false -} - -func (rw *recordingResponseWriter) SetHeader(k, v string) { - rw.log = append(rw.log, &responseWriterMethodCall{method: "SetHeader", headerKey: k, headerValue: v}) -} - -func (rw *recordingResponseWriter) Write(buf []byte) (int, os.Error) { - rw.log = append(rw.log, &responseWriterMethodCall{method: "Write", bytesWritten: buf}) - return len(buf), nil -} - -func (rw *recordingResponseWriter) WriteHeader(code int) { - rw.log = append(rw.log, &responseWriterMethodCall{method: "WriteHeader", responseCode: code}) -} - -func (rw *recordingResponseWriter) Flush() { - rw.log = append(rw.log, &responseWriterMethodCall{method: "Flush"}) -} - -func (rw *recordingResponseWriter) Hijack() (io.ReadWriteCloser, *bufio.ReadWriter, os.Error) { - panic("Not supported") -} - // Tests for http://code.google.com/p/go/issues/detail?id=900 func TestMuxRedirectLeadingSlashes(t *testing.T) { paths := []string{"//foo.txt", "///foo.txt", "/../../foo.txt"} @@ -254,41 +213,24 @@ func TestMuxRedirectLeadingSlashes(t *testing.T) { t.Errorf("%s", err) } mux := NewServeMux() - resp := new(recordingResponseWriter) - resp.log = make([]*responseWriterMethodCall, 0) + resp := httptest.NewRecorder() mux.ServeHTTP(resp, req) - dumpLog := func() { - t.Logf("For path %q:", path) - for _, call := range resp.log { - t.Logf("Got call: %s, header=%s, value=%s, buf=%q, code=%d", call.method, - call.headerKey, call.headerValue, call.bytesWritten, call.responseCode) - } - } - - if len(resp.log) != 2 { - dumpLog() - t.Errorf("expected 2 calls to response writer; got %d", len(resp.log)) - return - } - - if resp.log[0].method != "SetHeader" || - resp.log[0].headerKey != "Location" || resp.log[0].headerValue != "/foo.txt" { - dumpLog() - t.Errorf("Expected SetHeader of Location to /foo.txt") + if loc, expected := resp.Header().Get("Location"), "/foo.txt"; loc != expected { + t.Errorf("Expected Location header set to %q; got %q", expected, loc) return } - if resp.log[1].method != "WriteHeader" || resp.log[1].responseCode != StatusMovedPermanently { - dumpLog() - t.Errorf("Expected WriteHeader of StatusMovedPermanently") + if code, expected := resp.Code, StatusMovedPermanently; code != expected { + t.Errorf("Expected response code of StatusMovedPermanently; got %d", code) return } } } func TestServerTimeouts(t *testing.T) { + // TODO(bradfitz): convert this to use httptest.Server l, err := net.ListenTCP("tcp", &net.TCPAddr{Port: 0}) if err != nil { t.Fatalf("listen error: %v", err) @@ -308,7 +250,9 @@ func TestServerTimeouts(t *testing.T) { url := fmt.Sprintf("http://localhost:%d/", addr.Port) // Hit the HTTP server successfully. - r, _, err := Get(url) + tr := &Transport{DisableKeepAlives: true} // they interfere with this test + c := &Client{Transport: tr} + r, _, err := c.Get(url) if err != nil { t.Fatalf("http Get #1: %v", err) } @@ -353,16 +297,9 @@ func TestServerTimeouts(t *testing.T) { // TestIdentityResponse verifies that a handler can unset func TestIdentityResponse(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("failed to listen on a port: %v", err) - } - defer l.Close() - urlBase := "http://" + l.Addr().String() + "/" - handler := HandlerFunc(func(rw ResponseWriter, req *Request) { - rw.SetHeader("Content-Length", "3") - rw.SetHeader("Transfer-Encoding", req.FormValue("te")) + rw.Header().Set("Content-Length", "3") + rw.Header().Set("Transfer-Encoding", req.FormValue("te")) switch { case req.FormValue("overwrite") == "1": _, err := rw.Write([]byte("foo TOO LONG")) @@ -370,22 +307,22 @@ func TestIdentityResponse(t *testing.T) { t.Errorf("expected ErrContentLength; got %v", err) } case req.FormValue("underwrite") == "1": - rw.SetHeader("Content-Length", "500") + rw.Header().Set("Content-Length", "500") rw.Write([]byte("too short")) default: rw.Write([]byte("foo")) } }) - server := &Server{Handler: handler} - go server.Serve(l) + ts := httptest.NewServer(handler) + defer ts.Close() // Note: this relies on the assumption (which is true) that // Get sends HTTP/1.1 or greater requests. Otherwise the // server wouldn't have the choice to send back chunked // responses. for _, te := range []string{"", "identity"} { - url := urlBase + "?te=" + te + url := ts.URL + "/?te=" + te res, _, err := Get(url) if err != nil { t.Fatalf("error with Get of %s: %v", url, err) @@ -400,18 +337,18 @@ func TestIdentityResponse(t *testing.T) { t.Errorf("for %s expected len(res.TransferEncoding) of %d; got %d (%v)", url, expected, tl, res.TransferEncoding) } + res.Body.Close() } // Verify that ErrContentLength is returned - url := urlBase + "?overwrite=1" - _, _, err = Get(url) + url := ts.URL + "/?overwrite=1" + _, _, err := Get(url) if err != nil { t.Fatalf("error with Get of %s: %v", url, err) } - // Verify that the connection is closed when the declared Content-Length // is larger than what the handler wrote. - conn, err := net.Dial("tcp", "", l.Addr().String()) + conn, err := net.Dial("tcp", "", ts.Listener.Addr().String()) if err != nil { t.Fatalf("error dialing: %v", err) } @@ -432,3 +369,141 @@ func TestIdentityResponse(t *testing.T) { expectedSuffix, string(got)) } } + +// TestServeHTTP10Close verifies that HTTP/1.0 requests won't be kept alive. +func TestServeHTTP10Close(t *testing.T) { + s := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ServeFile(w, r, "testdata/file") + })) + defer s.Close() + + conn, err := net.Dial("tcp", "", s.Listener.Addr().String()) + if err != nil { + t.Fatal("dial error:", err) + } + defer conn.Close() + + _, err = fmt.Fprint(conn, "GET / HTTP/1.0\r\n\r\n") + if err != nil { + t.Fatal("print error:", err) + } + + r := bufio.NewReader(conn) + _, err = ReadResponse(r, "GET") + if err != nil { + t.Fatal("ReadResponse error:", err) + } + + success := make(chan bool) + go func() { + select { + case <-time.After(5e9): + t.Fatal("body not closed after 5s") + case <-success: + } + }() + + _, err = ioutil.ReadAll(r) + if err != nil { + t.Fatal("read error:", err) + } + + success <- true +} + +func TestSetsRemoteAddr(t *testing.T) { + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "%s", r.RemoteAddr) + })) + defer ts.Close() + + res, _, err := Get(ts.URL) + if err != nil { + t.Fatalf("Get error: %v", err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("ReadAll error: %v", err) + } + ip := string(body) + if !strings.HasPrefix(ip, "127.0.0.1:") && !strings.HasPrefix(ip, "[::1]:") { + t.Fatalf("Expected local addr; got %q", ip) + } +} + +func TestChunkedResponseHeaders(t *testing.T) { + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", "intentional gibberish") // we check that this is deleted + fmt.Fprintf(w, "I am a chunked response.") + })) + defer ts.Close() + + res, _, err := Get(ts.URL) + if err != nil { + t.Fatalf("Get error: %v", err) + } + if g, e := res.ContentLength, int64(-1); g != e { + t.Errorf("expected ContentLength of %d; got %d", e, g) + } + if g, e := res.TransferEncoding, []string{"chunked"}; !reflect.DeepEqual(g, e) { + t.Errorf("expected TransferEncoding of %v; got %v", e, g) + } + if _, haveCL := res.Header["Content-Length"]; haveCL { + t.Errorf("Unexpected Content-Length") + } +} + +// Test304Responses verifies that 304s don't declare that they're +// chunking in their response headers and aren't allowed to produce +// output. +func Test304Responses(t *testing.T) { + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.WriteHeader(StatusNotModified) + _, err := w.Write([]byte("illegal body")) + if err != ErrBodyNotAllowed { + t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err) + } + })) + defer ts.Close() + res, _, err := Get(ts.URL) + if err != nil { + t.Error(err) + } + if len(res.TransferEncoding) > 0 { + t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Error(err) + } + if len(body) > 0 { + t.Errorf("got unexpected body %q", string(body)) + } +} + +// TestHeadResponses verifies that responses to HEAD requests don't +// declare that they're chunking in their response headers and aren't +// allowed to produce output. +func TestHeadResponses(t *testing.T) { + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + _, err := w.Write([]byte("Ignored body")) + if err != ErrBodyNotAllowed { + t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err) + } + })) + defer ts.Close() + res, err := Head(ts.URL) + if err != nil { + t.Error(err) + } + if len(res.TransferEncoding) > 0 { + t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Error(err) + } + if len(body) > 0 { + t.Errorf("got unexpected body %q", string(body)) + } +} diff --git a/libgo/go/http/server.go b/libgo/go/http/server.go index 977c8c2297a..8e7039371ae 100644 --- a/libgo/go/http/server.go +++ b/libgo/go/http/server.go @@ -6,7 +6,6 @@ // TODO(rsc): // logging -// cgi support // post support package http @@ -49,23 +48,10 @@ type Handler interface { // A ResponseWriter interface is used by an HTTP handler to // construct an HTTP response. type ResponseWriter interface { - // RemoteAddr returns the address of the client that sent the current request - RemoteAddr() string - - // UsingTLS returns true if the client is connected using TLS - UsingTLS() bool - - // SetHeader sets a header line in the eventual response. - // For example, SetHeader("Content-Type", "text/html; charset=utf-8") - // will result in the header line - // - // Content-Type: text/html; charset=utf-8 - // - // being sent. UTF-8 encoded HTML is the default setting for - // Content-Type in this library, so users need not make that - // particular call. Calls to SetHeader after WriteHeader (or Write) - // are ignored. An empty value removes the header if previously set. - SetHeader(string, string) + // Header returns the header map that will be sent by WriteHeader. + // Changing the header after a call to WriteHeader (or Write) has + // no effect. + Header() Header // Write writes the data to the connection as part of an HTTP reply. // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK) @@ -78,39 +64,52 @@ type ResponseWriter interface { // Thus explicit calls to WriteHeader are mainly used to // send error codes. WriteHeader(int) +} +// The Flusher interface is implemented by ResponseWriters that allow +// an HTTP handler to flush buffered data to the client. +// +// Note that even for ResponseWriters that support Flush, +// if the client is connected through an HTTP proxy, +// the buffered data may not reach the client until the response +// completes. +type Flusher interface { // Flush sends any buffered data to the client. Flush() +} +// The Hijacker interface is implemented by ResponseWriters that allow +// an HTTP handler to take over the connection. +type Hijacker interface { // Hijack lets the caller take over the connection. // After a call to Hijack(), the HTTP server library // will not do anything else with the connection. // It becomes the caller's responsibility to manage // and close the connection. - Hijack() (io.ReadWriteCloser, *bufio.ReadWriter, os.Error) + Hijack() (net.Conn, *bufio.ReadWriter, os.Error) } // A conn represents the server side of an HTTP connection. type conn struct { - remoteAddr string // network address of remote side - handler Handler // request handler - rwc io.ReadWriteCloser // i/o connection - buf *bufio.ReadWriter // buffered rwc - hijacked bool // connection has been hijacked by handler - usingTLS bool // a flag indicating connection over TLS + remoteAddr string // network address of remote side + handler Handler // request handler + rwc net.Conn // i/o connection + buf *bufio.ReadWriter // buffered rwc + hijacked bool // connection has been hijacked by handler + tlsState *tls.ConnectionState // or nil when not using TLS } // A response represents the server side of an HTTP response. type response struct { conn *conn - req *Request // request for this response - chunking bool // using chunked transfer encoding for reply body - wroteHeader bool // reply header has been written - wroteContinue bool // 100 Continue response was written - header map[string]string // reply header parameters - written int64 // number of bytes written in body - contentLength int64 // explicitly-declared Content-Length; or -1 - status int // status code passed to WriteHeader + req *Request // request for this response + chunking bool // using chunked transfer encoding for reply body + wroteHeader bool // reply header has been written + wroteContinue bool // 100 Continue response was written + header Header // reply header parameters + written int64 // number of bytes written in body + contentLength int64 // explicitly-declared Content-Length; or -1 + status int // status code passed to WriteHeader // close connection after this reply. set on request and // updated after response from handler if there's a @@ -125,10 +124,15 @@ func newConn(rwc net.Conn, handler Handler) (c *conn, err os.Error) { c.remoteAddr = rwc.RemoteAddr().String() c.handler = handler c.rwc = rwc - _, c.usingTLS = rwc.(*tls.Conn) br := bufio.NewReader(rwc) bw := bufio.NewWriter(rwc) c.buf = bufio.NewReadWriter(br, bw) + + if tlsConn, ok := rwc.(*tls.Conn); ok { + c.tlsState = new(tls.ConnectionState) + *c.tlsState = tlsConn.ConnectionState() + } + return c, nil } @@ -168,10 +172,13 @@ func (c *conn) readRequest() (w *response, err os.Error) { return nil, err } + req.RemoteAddr = c.remoteAddr + req.TLS = c.tlsState + w = new(response) w.conn = c w.req = req - w.header = make(map[string]string) + w.header = make(Header) w.contentLength = -1 // Expect 100 Continue support @@ -182,21 +189,10 @@ func (c *conn) readRequest() (w *response, err os.Error) { return w, nil } -// UsingTLS implements the ResponseWriter.UsingTLS -func (w *response) UsingTLS() bool { - return w.conn.usingTLS -} - -// RemoteAddr implements the ResponseWriter.RemoteAddr method -func (w *response) RemoteAddr() string { return w.conn.remoteAddr } - -// SetHeader implements the ResponseWriter.SetHeader method -// An empty value removes the header from the map. -func (w *response) SetHeader(hdr, val string) { - w.header[CanonicalHeaderKey(hdr)] = val, val != "" +func (w *response) Header() Header { + return w.header } -// WriteHeader implements the ResponseWriter.WriteHeader method func (w *response) WriteHeader(code int) { if w.conn.hijacked { log.Print("http: response.WriteHeader on hijacked connection") @@ -211,55 +207,55 @@ func (w *response) WriteHeader(code int) { if code == StatusNotModified { // Must not have body. for _, header := range []string{"Content-Type", "Content-Length", "Transfer-Encoding"} { - if w.header[header] != "" { + if w.header.Get(header) != "" { // TODO: return an error if WriteHeader gets a return parameter // or set a flag on w to make future Writes() write an error page? // for now just log and drop the header. log.Printf("http: StatusNotModified response with header %q defined", header) - w.header[header] = "", false + w.header.Del(header) } } } else { // Default output is HTML encoded in UTF-8. - if w.header["Content-Type"] == "" { - w.SetHeader("Content-Type", "text/html; charset=utf-8") + if w.header.Get("Content-Type") == "" { + w.header.Set("Content-Type", "text/html; charset=utf-8") } } - if w.header["Date"] == "" { - w.SetHeader("Date", time.UTC().Format(TimeFormat)) + if w.header.Get("Date") == "" { + w.Header().Set("Date", time.UTC().Format(TimeFormat)) } // Check for a explicit (and valid) Content-Length header. var hasCL bool var contentLength int64 - if clenStr, ok := w.header["Content-Length"]; ok { + if clenStr := w.header.Get("Content-Length"); clenStr != "" { var err os.Error contentLength, err = strconv.Atoi64(clenStr) if err == nil { hasCL = true } else { log.Printf("http: invalid Content-Length of %q sent", clenStr) - w.SetHeader("Content-Length", "") + w.header.Del("Content-Length") } } - te, hasTE := w.header["Transfer-Encoding"] + te := w.header.Get("Transfer-Encoding") + hasTE := te != "" if hasCL && hasTE && te != "identity" { // TODO: return an error if WriteHeader gets a return parameter // For now just ignore the Content-Length. log.Printf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", te, contentLength) - w.SetHeader("Content-Length", "") + w.header.Del("Content-Length") hasCL = false } - if w.req.Method == "HEAD" { + if w.req.Method == "HEAD" || code == StatusNotModified { // do nothing } else if hasCL { - w.chunking = false w.contentLength = contentLength - w.SetHeader("Transfer-Encoding", "") + w.header.Del("Transfer-Encoding") } else if w.req.ProtoAtLeast(1, 1) { // HTTP/1.1 or greater: use chunked transfer encoding // to avoid closing the connection at EOF. @@ -267,26 +263,28 @@ func (w *response) WriteHeader(code int) { // might have set. Deal with that as need arises once we have a valid // use case. w.chunking = true - w.SetHeader("Transfer-Encoding", "chunked") + w.header.Set("Transfer-Encoding", "chunked") } else { // HTTP version < 1.1: cannot do chunked transfer // encoding and we don't know the Content-Length so // signal EOF by closing connection. w.closeAfterReply = true - w.chunking = false // redundant - w.SetHeader("Transfer-Encoding", "") // in case already set + w.header.Del("Transfer-Encoding") // in case already set } if w.req.wantsHttp10KeepAlive() && (w.req.Method == "HEAD" || hasCL) { _, connectionHeaderSet := w.header["Connection"] if !connectionHeaderSet { - w.SetHeader("Connection", "keep-alive") + w.header.Set("Connection", "keep-alive") } + } else if !w.req.ProtoAtLeast(1, 1) { + // Client did not ask to keep connection alive. + w.closeAfterReply = true } // Cannot use Content-Length with non-identity Transfer-Encoding. if w.chunking { - w.SetHeader("Content-Length", "") + w.header.Del("Content-Length") } if !w.req.ProtoAtLeast(1, 0) { return @@ -301,13 +299,10 @@ func (w *response) WriteHeader(code int) { text = "status code " + codestring } io.WriteString(w.conn.buf, proto+" "+codestring+" "+text+"\r\n") - for k, v := range w.header { - io.WriteString(w.conn.buf, k+": "+v+"\r\n") - } + writeSortedHeader(w.conn.buf, w.header, nil) io.WriteString(w.conn.buf, "\r\n") } -// Write implements the ResponseWriter.Write method func (w *response) Write(data []byte) (n int, err os.Error) { if w.conn.hijacked { log.Print("http: response.Write on hijacked connection") @@ -382,7 +377,7 @@ func errorKludge(w *response) { msg += " would ignore this error page if this text weren't here.\n" // Is it text? ("Content-Type" is always in the map) - baseType := strings.Split(w.header["Content-Type"], ";", 2)[0] + baseType := strings.Split(w.header.Get("Content-Type"), ";", 2)[0] switch baseType { case "text/html": io.WriteString(w, "